diff --git a/hack/.golint_failures b/hack/.golint_failures index c53673bb7b5..a9a1453759c 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -608,7 +608,6 @@ test/e2e/chaosmonkey test/e2e/common test/e2e/framework test/e2e/lifecycle/bootstrap -test/e2e/network test/e2e/node test/e2e/scalability test/e2e/scheduling diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index f14e6ad3c25..acd94976b63 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -27,8 +27,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const dnsTestPodHostName = "dns-querier-1" @@ -60,16 +60,16 @@ var _ = SIGDescribe("DNS", func() { } wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a pod to probe DNS") + ginkgo.By("creating a pod to probe DNS") pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) }) - It("should resolve DNS of partial qualified names for the cluster ", func() { + ginkgo.It("should resolve DNS of partial qualified names for the cluster ", func() { // All the names we need to be able to resolve. // TODO: Spin up a separate test service and test that dns works for that service. namesToResolve := []string{ @@ -89,11 +89,11 @@ var _ = SIGDescribe("DNS", func() { hostEntries := []string{hostFQDN, dnsTestPodHostName} wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostEntries, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostEntries, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a pod to probe DNS") + ginkgo.By("creating a pod to probe DNS") pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) }) @@ -108,11 +108,11 @@ var _ = SIGDescribe("DNS", func() { hostEntries := []string{hostFQDN, dnsTestPodHostName} wheezyProbeCmd, wheezyFileNames := createProbeCommand(nil, hostEntries, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(nil, hostEntries, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes /etc/hosts and exposes the results by HTTP. - By("creating a pod to probe /etc/hosts") + ginkgo.By("creating a pod to probe /etc/hosts") pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) }) @@ -125,27 +125,27 @@ var _ = SIGDescribe("DNS", func() { framework.ConformanceIt("should provide DNS for services ", func() { // NOTE: This only contains the FQDN and the Host name, for testing partial name, see the test below // Create a test headless service. - By("Creating a test headless service") + ginkgo.By("Creating a test headless service") testServiceSelector := map[string]string{ "dns-test": "true", } headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) - Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName) defer func() { - By("deleting the test headless service") - defer GinkgoRecover() + ginkgo.By("deleting the test headless service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) }() regularServiceName := "test-service-2" regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService) - Expect(err).NotTo(HaveOccurred(), "failed to create regular service: %s", regularServiceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create regular service: %s", regularServiceName) defer func() { - By("deleting the test service") - defer GinkgoRecover() + ginkgo.By("deleting the test service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(regularService.Name, nil) }() @@ -160,39 +160,39 @@ var _ = SIGDescribe("DNS", func() { wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a pod to probe DNS") + ginkgo.By("creating a pod to probe DNS") pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod.ObjectMeta.Labels = testServiceSelector validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) }) - It("should resolve DNS of partial qualified names for services ", func() { + ginkgo.It("should resolve DNS of partial qualified names for services ", func() { // Create a test headless service. - By("Creating a test headless service") + ginkgo.By("Creating a test headless service") testServiceSelector := map[string]string{ "dns-test": "true", } headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) - Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName) defer func() { - By("deleting the test headless service") - defer GinkgoRecover() + ginkgo.By("deleting the test headless service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) }() regularServiceName := "test-service-2" regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService) - Expect(err).NotTo(HaveOccurred(), "failed to create regular service: %s", regularServiceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create regular service: %s", regularServiceName) defer func() { - By("deleting the test service") - defer GinkgoRecover() + ginkgo.By("deleting the test service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(regularService.Name, nil) }() @@ -209,20 +209,20 @@ var _ = SIGDescribe("DNS", func() { wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a pod to probe DNS") + ginkgo.By("creating a pod to probe DNS") pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod.ObjectMeta.Labels = testServiceSelector validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) }) - It("should provide DNS for pods for Hostname [LinuxOnly]", func() { + ginkgo.It("should provide DNS for pods for Hostname [LinuxOnly]", func() { // Create a test headless service. - By("Creating a test headless service") + ginkgo.By("Creating a test headless service") testServiceSelector := map[string]string{ "dns-test-hostname-attribute": "true", } @@ -230,11 +230,11 @@ var _ = SIGDescribe("DNS", func() { podHostname := "dns-querier-2" headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) - Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", serviceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", serviceName) defer func() { - By("deleting the test headless service") - defer GinkgoRecover() + ginkgo.By("deleting the test headless service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) }() @@ -242,11 +242,11 @@ var _ = SIGDescribe("DNS", func() { hostNames := []string{hostFQDN, podHostname} wheezyProbeCmd, wheezyFileNames := createProbeCommand(nil, hostNames, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(nil, hostNames, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a pod to probe DNS") + ginkgo.By("creating a pod to probe DNS") pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod1.ObjectMeta.Labels = testServiceSelector pod1.Spec.Hostname = podHostname @@ -255,9 +255,9 @@ var _ = SIGDescribe("DNS", func() { validateDNSResults(f, pod1, append(wheezyFileNames, jessieFileNames...)) }) - It("should provide DNS for pods for Subdomain", func() { + ginkgo.It("should provide DNS for pods for Subdomain", func() { // Create a test headless service. - By("Creating a test headless service") + ginkgo.By("Creating a test headless service") testServiceSelector := map[string]string{ "dns-test-hostname-attribute": "true", } @@ -265,11 +265,11 @@ var _ = SIGDescribe("DNS", func() { podHostname := "dns-querier-2" headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) - Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", serviceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", serviceName) defer func() { - By("deleting the test headless service") - defer GinkgoRecover() + ginkgo.By("deleting the test headless service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) }() @@ -277,11 +277,11 @@ var _ = SIGDescribe("DNS", func() { namesToResolve := []string{hostFQDN} wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a pod to probe DNS") + ginkgo.By("creating a pod to probe DNS") pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod1.ObjectMeta.Labels = testServiceSelector pod1.Spec.Hostname = podHostname @@ -298,72 +298,72 @@ var _ = SIGDescribe("DNS", func() { */ framework.ConformanceIt("should provide DNS for ExternalName services", func() { // Create a test ExternalName service. - By("Creating a test externalName service") + ginkgo.By("Creating a test externalName service") serviceName := "dns-test-service-3" externalNameService := framework.CreateServiceSpec(serviceName, "foo.example.com", false, nil) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService) - Expect(err).NotTo(HaveOccurred(), "failed to create ExternalName service: %s", serviceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create ExternalName service: %s", serviceName) defer func() { - By("deleting the test externalName service") - defer GinkgoRecover() + ginkgo.By("deleting the test externalName service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil) }() hostFQDN := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy") jessieProbeCmd, jessieFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "jessie") - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a pod to probe DNS") + ginkgo.By("creating a pod to probe DNS") pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) validateTargetedProbeOutput(f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") // Test changing the externalName field - By("changing the externalName to bar.example.com") + ginkgo.By("changing the externalName to bar.example.com") _, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { s.Spec.ExternalName = "bar.example.com" }) - Expect(err).NotTo(HaveOccurred(), "failed to change externalName of service: %s", serviceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to change externalName of service: %s", serviceName) wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy") jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "jessie") - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a second pod to probe DNS") + ginkgo.By("creating a second pod to probe DNS") pod2 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) validateTargetedProbeOutput(f, pod2, []string{wheezyFileName, jessieFileName}, "bar.example.com.") // Test changing type from ExternalName to ClusterIP - By("changing the service to type=ClusterIP") + ginkgo.By("changing the service to type=ClusterIP") _, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.Ports = []v1.ServicePort{ {Port: 80, Name: "http", Protocol: v1.ProtocolTCP}, } }) - Expect(err).NotTo(HaveOccurred(), "failed to change service type to ClusterIP for service: %s", serviceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to change service type to ClusterIP for service: %s", serviceName) wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "A", "wheezy") jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "A", "jessie") - By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") - By("Running these commands on jessie: " + jessieProbeCmd + "\n") + ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") + ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n") // Run a pod which probes DNS and exposes the results by HTTP. - By("creating a third pod to probe DNS") + ginkgo.By("creating a third pod to probe DNS") pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(externalNameService.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to get service: %s", externalNameService.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get service: %s", externalNameService.Name) validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP) }) - It("should support configurable pod DNS nameservers", func() { - By("Creating a pod with dnsPolicy=None and customized dnsConfig...") + ginkgo.It("should support configurable pod DNS nameservers", func() { + ginkgo.By("Creating a pod with dnsPolicy=None and customized dnsConfig...") testServerIP := "1.1.1.1" testSearchPath := "resolv.conf.local" testAgnhostPod := f.NewAgnhostPod(f.Namespace.Name, "pause") @@ -373,15 +373,15 @@ var _ = SIGDescribe("DNS", func() { Searches: []string{testSearchPath}, } testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testAgnhostPod) - Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testAgnhostPod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testAgnhostPod.Name) framework.Logf("Created pod %v", testAgnhostPod) defer func() { framework.Logf("Deleting pod %s...", testAgnhostPod.Name) if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil { - framework.Failf("Failed to delete pod %s: %v", testAgnhostPod.Name, err) + framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err) } }() - Expect(f.WaitForPodRunning(testAgnhostPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testAgnhostPod.Name) + gomega.Expect(f.WaitForPodRunning(testAgnhostPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testAgnhostPod.Name) runCommand := func(arg string) string { cmd := []string{"/agnhost", arg} @@ -393,25 +393,25 @@ var _ = SIGDescribe("DNS", func() { CaptureStdout: true, CaptureStderr: true, }) - Expect(err).NotTo(HaveOccurred(), "failed to run command '/agnhost %s' on pod, stdout: %v, stderr: %v, err: %v", arg, stdout, stderr, err) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to run command '/agnhost %s' on pod, stdout: %v, stderr: %v, err: %v", arg, stdout, stderr, err) return stdout } - By("Verifying customized DNS suffix list is configured on pod...") + ginkgo.By("Verifying customized DNS suffix list is configured on pod...") stdout := runCommand("dns-suffix") if !strings.Contains(stdout, testSearchPath) { framework.Failf("customized DNS suffix list not found configured in pod, expected to contain: %s, got: %s", testSearchPath, stdout) } - By("Verifying customized DNS server is configured on pod...") + ginkgo.By("Verifying customized DNS server is configured on pod...") stdout = runCommand("dns-server-list") if !strings.Contains(stdout, testServerIP) { framework.Failf("customized DNS server not found in configured in pod, expected to contain: %s, got: %s", testServerIP, stdout) } }) - It("should support configurable pod resolv.conf", func() { - By("Preparing a test DNS service with injected DNS names...") + ginkgo.It("should support configurable pod resolv.conf", func() { + ginkgo.By("Preparing a test DNS service with injected DNS names...") testInjectedIP := "1.1.1.1" testDNSNameShort := "notexistname" testSearchPath := "resolv.conf.local" @@ -421,23 +421,23 @@ var _ = SIGDescribe("DNS", func() { testDNSNameFull: testInjectedIP, }) testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testServerPod) - Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testServerPod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testServerPod.Name) e2elog.Logf("Created pod %v", testServerPod) defer func() { e2elog.Logf("Deleting pod %s...", testServerPod.Name) if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { - framework.Failf("Failed to delete pod %s: %v", testServerPod.Name, err) + framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err) } }() - Expect(f.WaitForPodRunning(testServerPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testServerPod.Name) + gomega.Expect(f.WaitForPodRunning(testServerPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testServerPod.Name) // Retrieve server pod IP. testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(testServerPod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to get pod %v", testServerPod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod %v", testServerPod.Name) testServerIP := testServerPod.Status.PodIP e2elog.Logf("testServerIP is %s", testServerIP) - By("Creating a pod with dnsPolicy=None and customized dnsConfig...") + ginkgo.By("Creating a pod with dnsPolicy=None and customized dnsConfig...") testUtilsPod := generateDNSUtilsPod() testUtilsPod.Spec.DNSPolicy = v1.DNSNone testNdotsValue := "2" @@ -452,17 +452,17 @@ var _ = SIGDescribe("DNS", func() { }, } testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod) - Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testUtilsPod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testUtilsPod.Name) e2elog.Logf("Created pod %v", testUtilsPod) defer func() { e2elog.Logf("Deleting pod %s...", testUtilsPod.Name) if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil { - framework.Failf("Failed to delete pod %s: %v", testUtilsPod.Name, err) + framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err) } }() - Expect(f.WaitForPodRunning(testUtilsPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testUtilsPod.Name) + gomega.Expect(f.WaitForPodRunning(testUtilsPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testUtilsPod.Name) - By("Verifying customized DNS option is configured on pod...") + ginkgo.By("Verifying customized DNS option is configured on pod...") // TODO: Figure out a better way other than checking the actual resolv,conf file. cmd := []string{"cat", "/etc/resolv.conf"} stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{ @@ -473,12 +473,12 @@ var _ = SIGDescribe("DNS", func() { CaptureStdout: true, CaptureStderr: true, }) - Expect(err).NotTo(HaveOccurred(), "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err) if !strings.Contains(stdout, "ndots:2") { framework.Failf("customized DNS options not found in resolv.conf, got: %s", stdout) } - By("Verifying customized name server and search path are working...") + ginkgo.By("Verifying customized name server and search path are working...") // Do dig on not-exist-dns-name and see if the injected DNS record is returned. // This verifies both: // - Custom search path is appended. @@ -494,7 +494,7 @@ var _ = SIGDescribe("DNS", func() { CaptureStderr: true, }) if err != nil { - e2elog.Logf("Failed to execute dig command, stdout:%v, stderr: %v, err: %v", stdout, stderr, err) + e2elog.Logf("ginkgo.Failed to execute dig command, stdout:%v, stderr: %v, err: %v", stdout, stderr, err) return false, nil } res := strings.Split(stdout, "\n") @@ -505,7 +505,7 @@ var _ = SIGDescribe("DNS", func() { return true, nil } err = wait.PollImmediate(5*time.Second, 3*time.Minute, digFunc) - Expect(err).NotTo(HaveOccurred(), "failed to verify customized name server and search path") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to verify customized name server and search path") // TODO: Add more test cases for other DNSPolicies. }) diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index 929ec23e063..a53653b6389 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -35,8 +35,8 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) type dnsTestCommon struct { @@ -62,14 +62,14 @@ func newDNSTestCommon() dnsTestCommon { } func (t *dnsTestCommon) init() { - By("Finding a DNS pod") + ginkgo.By("Finding a DNS pod") label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"})) options := metav1.ListOptions{LabelSelector: label.String()} namespace := "kube-system" pods, err := t.f.ClientSet.CoreV1().Pods(namespace).List(options) - Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", namespace) - Expect(len(pods.Items)).Should(BeNumerically(">=", 1)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s", namespace) + gomega.Expect(len(pods.Items)).Should(gomega.BeNumerically(">=", 1)) t.dnsPod = &pods.Items[0] e2elog.Logf("Using DNS pod: %v", t.dnsPod.Name) @@ -157,23 +157,23 @@ func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) { }.AsSelector().String(), } cmList, err := t.c.CoreV1().ConfigMaps(t.ns).List(options) - Expect(err).NotTo(HaveOccurred(), "failed to list ConfigMaps in namespace: %s", t.ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list ConfigMaps in namespace: %s", t.ns) if len(cmList.Items) == 0 { - By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)) + ginkgo.By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)) _, err := t.c.CoreV1().ConfigMaps(t.ns).Create(cm) - Expect(err).NotTo(HaveOccurred(), "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm) } else { - By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)) + ginkgo.By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)) _, err := t.c.CoreV1().ConfigMaps(t.ns).Update(cm) - Expect(err).NotTo(HaveOccurred(), "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm) } } func (t *dnsTestCommon) fetchDNSConfigMapData() map[string]string { if t.name == "coredns" { pcm, err := t.c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(t.name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to get DNS ConfigMap: %s", t.name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get DNS ConfigMap: %s", t.name) return pcm.Data } return nil @@ -189,10 +189,10 @@ func (t *dnsTestCommon) restoreDNSConfigMap(configMapData map[string]string) { } func (t *dnsTestCommon) deleteConfigMap() { - By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name)) + ginkgo.By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name)) t.cm = nil err := t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete config map: %s", t.name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete config map: %s", t.name) } func (t *dnsTestCommon) createUtilPodLabel(baseName string) { @@ -224,9 +224,9 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) { var err error t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.utilPod) - Expect(err).NotTo(HaveOccurred(), "failed to create pod: %v", t.utilPod) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %v", t.utilPod) e2elog.Logf("Created pod %v", t.utilPod) - Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(HaveOccurred(), "pod failed to start running: %v", t.utilPod) + gomega.Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(gomega.HaveOccurred(), "pod failed to start running: %v", t.utilPod) t.utilService = &v1.Service{ TypeMeta: metav1.TypeMeta{ @@ -249,7 +249,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) { } t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(t.utilService) - Expect(err).NotTo(HaveOccurred(), "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name) e2elog.Logf("Created service %v", t.utilService) } @@ -272,7 +272,7 @@ func (t *dnsTestCommon) deleteCoreDNSPods() { for _, pod := range pods.Items { err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s", pod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s", pod.Name) } } @@ -315,13 +315,13 @@ func (t *dnsTestCommon) createDNSPodFromObj(pod *v1.Pod) { var err error t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod) - Expect(err).NotTo(HaveOccurred(), "failed to create pod: %v", t.dnsServerPod) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %v", t.dnsServerPod) e2elog.Logf("Created pod %v", t.dnsServerPod) - Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(HaveOccurred(), "pod failed to start running: %v", t.dnsServerPod) + gomega.Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(gomega.HaveOccurred(), "pod failed to start running: %v", t.dnsServerPod) t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get( t.dnsServerPod.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to get pod: %s", t.dnsServerPod.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod: %s", t.dnsServerPod.Name) } func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) { @@ -539,30 +539,30 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client e2elog.Logf("Lookups using %s/%s failed for: %v\n", pod.Namespace, pod.Name, failed) return false, nil })) - Expect(len(failed)).To(Equal(0)) + gomega.Expect(len(failed)).To(gomega.Equal(0)) } func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) { - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) defer func() { - By("deleting the pod") - defer GinkgoRecover() + ginkgo.By("deleting the pod") + defer ginkgo.GinkgoRecover() podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) }() if _, err := podClient.Create(pod); err != nil { - framework.Failf("Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) + framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) } framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - By("retrieving the pod") + ginkgo.By("retrieving the pod") pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) + framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) } // Try to find results for each expected name. - By("looking for the results for each expected name from probers") + ginkgo.By("looking for the results for each expected name from probers") assertFilesExist(fileNames, "results", pod, f.ClientSet) // TODO: probe from the host, too. @@ -571,26 +571,26 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) } func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) { - By("submitting the pod to kubernetes") + ginkgo.By("submitting the pod to kubernetes") podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) defer func() { - By("deleting the pod") - defer GinkgoRecover() + ginkgo.By("deleting the pod") + defer ginkgo.GinkgoRecover() podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) }() if _, err := podClient.Create(pod); err != nil { - framework.Failf("Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) + framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) } framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) - By("retrieving the pod") + ginkgo.By("retrieving the pod") pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) + framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) } // Try to find the expected value for each expected name. - By("looking for the results for each expected name from probers") + ginkgo.By("looking for the results for each expected name from probers") assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value) e2elog.Logf("DNS probes using %s succeeded\n", pod.Name) diff --git a/test/e2e/network/dns_configmap.go b/test/e2e/network/dns_configmap.go index 1cb384b504c..b10dd9ca816 100644 --- a/test/e2e/network/dns_configmap.go +++ b/test/e2e/network/dns_configmap.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) type dnsFederationsConfigMapTest struct { @@ -45,7 +45,7 @@ var _ = SIGDescribe("DNS configMap federations [Feature:Federation]", func() { t := &dnsFederationsConfigMapTest{dnsTestCommon: newDNSTestCommon()} - It("should be able to change federation configuration [Slow][Serial]", func() { + ginkgo.It("should be able to change federation configuration [Slow][Serial]", func() { t.c = t.f.ClientSet t.run() }) @@ -96,17 +96,17 @@ func (t *dnsFederationsConfigMapTest) run() { }`, framework.TestContext.ClusterDNSDomain, framework.TestContext.ClusterDNSDomain)} valid2m := map[string]string{t.labels[1]: "xyz.com"} - By("default -> valid1") + ginkgo.By("default -> valid1") t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true) t.deleteCoreDNSPods() t.validate(framework.TestContext.ClusterDNSDomain) - By("valid1 -> valid2") + ginkgo.By("valid1 -> valid2") t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true) t.deleteCoreDNSPods() t.validate(framework.TestContext.ClusterDNSDomain) - By("valid2 -> default") + ginkgo.By("valid2 -> default") t.setConfigMap(&v1.ConfigMap{Data: originalConfigMapData}, nil, false) t.deleteCoreDNSPods() t.validate(framework.TestContext.ClusterDNSDomain) @@ -121,27 +121,27 @@ func (t *dnsFederationsConfigMapTest) run() { valid2m := map[string]string{t.labels[1]: "xyz"} invalid := map[string]string{"federations": "invalid.map=xyz"} - By("empty -> valid1") + ginkgo.By("empty -> valid1") t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true) t.validate(framework.TestContext.ClusterDNSDomain) - By("valid1 -> valid2") + ginkgo.By("valid1 -> valid2") t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true) t.validate(framework.TestContext.ClusterDNSDomain) - By("valid2 -> invalid") + ginkgo.By("valid2 -> invalid") t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false) t.validate(framework.TestContext.ClusterDNSDomain) - By("invalid -> valid1") + ginkgo.By("invalid -> valid1") t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true) t.validate(framework.TestContext.ClusterDNSDomain) - By("valid1 -> deleted") + ginkgo.By("valid1 -> deleted") t.deleteConfigMap() t.validate(framework.TestContext.ClusterDNSDomain) - By("deleted -> invalid") + ginkgo.By("deleted -> invalid") t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false) t.validate(framework.TestContext.ClusterDNSDomain) } @@ -151,7 +151,7 @@ func (t *dnsFederationsConfigMapTest) validate(dnsDomain string) { federations := t.fedMap if len(federations) == 0 { - By(fmt.Sprintf("Validating federation labels %v do not exist", t.labels)) + ginkgo.By(fmt.Sprintf("Validating federation labels %v do not exist", t.labels)) for _, label := range t.labels { var federationDNS = fmt.Sprintf("e2e-dns-configmap.%s.%s.svc.%s.", @@ -173,7 +173,7 @@ func (t *dnsFederationsConfigMapTest) validate(dnsDomain string) { // Check local mapping. Checking a remote mapping requires // creating an arbitrary DNS record which is not possible at the // moment. - By(fmt.Sprintf("Validating federation record %v", label)) + ginkgo.By(fmt.Sprintf("Validating federation record %v", label)) predicate := func(actual []string) bool { for _, v := range actual { if v == localDNS { @@ -407,16 +407,16 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) { serviceName := "dns-externalname-upstream-test" externalNameService := framework.CreateServiceSpec(serviceName, googleDNSHostname, false, nil) if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService); err != nil { - Fail(fmt.Sprintf("Failed when creating service: %v", err)) + ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err)) } serviceNameLocal := "dns-externalname-upstream-local" externalNameServiceLocal := framework.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil) if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameServiceLocal); err != nil { - Fail(fmt.Sprintf("Failed when creating service: %v", err)) + ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err)) } defer func() { - By("deleting the test externalName service") - defer GinkgoRecover() + ginkgo.By("deleting the test externalName service") + defer ginkgo.GinkgoRecover() f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil) f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameServiceLocal.Name, nil) }() @@ -482,28 +482,28 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) { var _ = SIGDescribe("DNS configMap nameserver [IPv4]", func() { - Context("Change stubDomain", func() { + ginkgo.Context("Change stubDomain", func() { nsTest := &dnsNameserverTest{dnsTestCommon: newDNSTestCommon()} - It("should be able to change stubDomain configuration [Slow][Serial]", func() { + ginkgo.It("should be able to change stubDomain configuration [Slow][Serial]", func() { nsTest.c = nsTest.f.ClientSet nsTest.run(false) }) }) - Context("Forward PTR lookup", func() { + ginkgo.Context("Forward PTR lookup", func() { fwdTest := &dnsPtrFwdTest{dnsTestCommon: newDNSTestCommon()} - It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() { + ginkgo.It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() { fwdTest.c = fwdTest.f.ClientSet fwdTest.run(false) }) }) - Context("Forward external name lookup", func() { + ginkgo.Context("Forward external name lookup", func() { externalNameTest := &dnsExternalNameTest{dnsTestCommon: newDNSTestCommon()} - It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() { + ginkgo.It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() { externalNameTest.c = externalNameTest.f.ClientSet externalNameTest.run(false) }) @@ -512,28 +512,28 @@ var _ = SIGDescribe("DNS configMap nameserver [IPv4]", func() { var _ = SIGDescribe("DNS configMap nameserver [Feature:Networking-IPv6]", func() { - Context("Change stubDomain", func() { + ginkgo.Context("Change stubDomain", func() { nsTest := &dnsNameserverTest{dnsTestCommon: newDNSTestCommon()} - It("should be able to change stubDomain configuration [Slow][Serial]", func() { + ginkgo.It("should be able to change stubDomain configuration [Slow][Serial]", func() { nsTest.c = nsTest.f.ClientSet nsTest.run(true) }) }) - Context("Forward PTR lookup", func() { + ginkgo.Context("Forward PTR lookup", func() { fwdTest := &dnsPtrFwdTest{dnsTestCommon: newDNSTestCommon()} - It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() { + ginkgo.It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() { fwdTest.c = fwdTest.f.ClientSet fwdTest.run(true) }) }) - Context("Forward external name lookup", func() { + ginkgo.Context("Forward external name lookup", func() { externalNameTest := &dnsExternalNameTest{dnsTestCommon: newDNSTestCommon()} - It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() { + ginkgo.It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() { externalNameTest.c = externalNameTest.f.ClientSet externalNameTest.run(true) }) diff --git a/test/e2e/network/dns_scale_records.go b/test/e2e/network/dns_scale_records.go index 1b6ae5dcc55..37002bfd2c6 100644 --- a/test/e2e/network/dns_scale_records.go +++ b/test/e2e/network/dns_scale_records.go @@ -30,7 +30,7 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" testutils "k8s.io/kubernetes/test/utils" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) const ( @@ -43,7 +43,7 @@ const ( var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { f := framework.NewDefaultFramework("performancedns") - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)) framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute) @@ -52,7 +52,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { }) // answers dns for service - creates the maximum number of services, and then check dns record for one - It("Should answer DNS query for maximum number of services per cluster", func() { + ginkgo.It("Should answer DNS query for maximum number of services per cluster", func() { // get integer ceiling of maxServicesPerCluster / maxServicesPerNamespace numNs := (maxServicesPerCluster + maxServicesPerNamespace - 1) / maxServicesPerNamespace @@ -64,7 +64,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { services := generateServicesInNamespaces(namespaces, maxServicesPerCluster) createService := func(i int) { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() framework.ExpectNoError(testutils.CreateServiceWithRetries(f.ClientSet, services[i].Namespace, services[i])) } e2elog.Logf("Creating %v test services", maxServicesPerCluster) diff --git a/test/e2e/network/example_cluster_dns.go b/test/e2e/network/example_cluster_dns.go index 17cf3758791..cf7e3dcd458 100644 --- a/test/e2e/network/example_cluster_dns.go +++ b/test/e2e/network/example_cluster_dns.go @@ -32,8 +32,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -52,11 +52,11 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { f := framework.NewDefaultFramework("cluster-dns") var c clientset.Interface - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet }) - It("should create pod that uses dns", func() { + ginkgo.It("should create pod that uses dns", func() { mkpath := func(file string) string { return filepath.Join(os.Getenv("GOPATH"), "src/k8s.io/examples/staging/cluster-dns", file) } @@ -84,7 +84,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { var err error namespaceName := fmt.Sprintf("dnsexample%d", i) namespaces[i], err = f.CreateNamespace(namespaceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName) } for _, ns := range namespaces { @@ -106,13 +106,13 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName})) options := metav1.ListOptions{LabelSelector: label.String()} pods, err := c.CoreV1().Pods(ns.Name).List(options) - Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", ns.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s", ns.Name) err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods) - Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for all pods to respond") e2elog.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name) err = framework.ServiceResponding(c, ns.Name, backendSvcName) - Expect(err).NotTo(HaveOccurred(), "waiting for the service to respond") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for the service to respond") } // Now another tricky part: @@ -134,7 +134,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { queryDNS := fmt.Sprintf(queryDNSPythonTemplate, backendSvcName+"."+namespaces[0].Name) _, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDNS}, "ok", dnsReadyTimeout) - Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for output from pod exec") updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, fmt.Sprintf("dns-backend.development.svc.%s", framework.TestContext.ClusterDNSDomain), fmt.Sprintf("dns-backend.%s.svc.%s", namespaces[0].Name, framework.TestContext.ClusterDNSDomain)) @@ -153,7 +153,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { // wait for pods to print their result for _, ns := range namespaces { _, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout) - Expect(err).NotTo(HaveOccurred(), "pod %s failed to print result in logs", frontendPodName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "pod %s failed to print result in logs", frontendPodName) } }) }) @@ -165,10 +165,10 @@ func getNsCmdFlag(ns *v1.Namespace) string { // pass enough context with the 'old' parameter so that it replaces what your really intended. func prepareResourceWithReplacedString(inputFile, old, new string) string { f, err := os.Open(inputFile) - Expect(err).NotTo(HaveOccurred(), "failed to open file: %s", inputFile) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to open file: %s", inputFile) defer f.Close() data, err := ioutil.ReadAll(f) - Expect(err).NotTo(HaveOccurred(), "failed to read from file: %s", inputFile) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to read from file: %s", inputFile) podYaml := strings.Replace(string(data), old, new, 1) return podYaml } diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index ecce3c4195d..07363bd5550 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -30,8 +30,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework/providers/gce" gcecloud "k8s.io/legacy-cloud-providers/gce" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -49,38 +49,38 @@ var _ = SIGDescribe("Firewall rule", func() { var cloudConfig framework.CloudConfig var gceCloud *gcecloud.Cloud - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce") var err error cs = f.ClientSet cloudConfig = framework.TestContext.CloudConfig gceCloud, err = gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) // This test takes around 6 minutes to run - It("[Slow] [Serial] should create valid firewall rules for LoadBalancer type service", func() { + ginkgo.It("[Slow] [Serial] should create valid firewall rules for LoadBalancer type service", func() { ns := f.Namespace.Name // This source ranges is just used to examine we have exact same things on LB firewall rules firewallTestSourceRanges := []string{"0.0.0.0/1", "128.0.0.0/1"} serviceName := "firewall-test-loadbalancer" - By("Getting cluster ID") + ginkgo.By("Getting cluster ID") clusterID, err := gce.GetClusterID(cs) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e2elog.Logf("Got cluster ID: %v", clusterID) jig := framework.NewServiceTestJig(cs, serviceName) nodeList := jig.GetNodes(framework.MaxNodesForEndpointsTests) - Expect(nodeList).NotTo(BeNil()) + gomega.Expect(nodeList).NotTo(gomega.BeNil()) nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests) if len(nodesNames) <= 0 { framework.Failf("Expect at least 1 node, got: %v", nodesNames) } nodesSet := sets.NewString(nodesNames...) - By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global") + ginkgo.By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global") svc := jig.CreateLoadBalancerService(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) { svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: firewallTestHTTPPort}} svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges @@ -90,61 +90,61 @@ var _ = SIGDescribe("Firewall rule", func() { svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.LoadBalancerSourceRanges = nil }) - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) - By("Waiting for the local traffic health check firewall rule to be deleted") + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) + ginkgo.By("Waiting for the local traffic health check firewall rule to be deleted") localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false) _, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }() svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP - By("Checking if service's firewall rule is correct") + ginkgo.By("Checking if service's firewall rule is correct") lbFw := gce.ConstructFirewallForLBService(svc, cloudConfig.NodeTag) fw, err := gceCloud.GetFirewall(lbFw.Name) - Expect(err).NotTo(HaveOccurred()) - Expect(gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred()) - By("Checking if service's nodes health check firewall rule is correct") + ginkgo.By("Checking if service's nodes health check firewall rule is correct") nodesHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true) fw, err = gceCloud.GetFirewall(nodesHCFw.Name) - Expect(err).NotTo(HaveOccurred()) - Expect(gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred()) // OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE - By("Updating LoadBalancer service to ExternalTrafficPolicy=Local") + ginkgo.By("Updating LoadBalancer service to ExternalTrafficPolicy=Local") svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal }) - By("Waiting for the nodes health check firewall rule to be deleted") + ginkgo.By("Waiting for the nodes health check firewall rule to be deleted") _, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Waiting for the correct local traffic health check firewall rule to be created") + ginkgo.By("Waiting for the correct local traffic health check firewall rule to be created") localHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false) fw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault) - Expect(err).NotTo(HaveOccurred()) - Expect(gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests)) + ginkgo.By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests)) for i, nodeName := range nodesNames { podName := fmt.Sprintf("netexec%v", i) jig.LaunchNetexecPodOnNode(f, nodeName, podName, firewallTestHTTPPort, firewallTestUDPPort, true) defer func() { e2elog.Logf("Cleaning up the netexec pod: %v", podName) - Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(gomega.HaveOccurred()) }() } // Send requests from outside of the cluster because internal traffic is whitelisted - By("Accessing the external service ip from outside, all non-master nodes should be reached") - Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) + ginkgo.By("Accessing the external service ip from outside, all non-master nodes should be reached") + gomega.Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(gomega.HaveOccurred()) // Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster // by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect // simulation, we really want to check that traffic doesn't reach a vm outside the GKE cluster, but // that's much harder to do in the current e2e framework. - By(fmt.Sprintf("Removing tags from one of the nodes: %v", nodesNames[0])) + ginkgo.By(fmt.Sprintf("Removing tags from one of the nodes: %v", nodesNames[0])) nodesSet.Delete(nodesNames[0]) // Instance could run in a different zone in multi-zone test. Figure out which zone // it is in before proceeding. @@ -154,31 +154,31 @@ var _ = SIGDescribe("Firewall rule", func() { } removedTags := gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{}) defer func() { - By("Adding tags back to the node and wait till the traffic is recovered") + ginkgo.By("Adding tags back to the node and wait till the traffic is recovered") nodesSet.Insert(nodesNames[0]) gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags) // Make sure traffic is recovered before exit - Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) + gomega.Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(gomega.HaveOccurred()) }() - By("Accessing serivce through the external ip and examine got no response from the node without tags") - Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred()) + ginkgo.By("Accessing serivce through the external ip and examine got no response from the node without tags") + gomega.Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet, 15)).NotTo(gomega.HaveOccurred()) }) - It("should have correct firewall rules for e2e cluster", func() { + ginkgo.It("should have correct firewall rules for e2e cluster", func() { nodes := framework.GetReadySchedulableNodesOrDie(cs) if len(nodes.Items) <= 0 { framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items)) } - By("Checking if e2e firewall rules are correct") + ginkgo.By("Checking if e2e firewall rules are correct") for _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) { fw, err := gceCloud.GetFirewall(expFw.Name) - Expect(err).NotTo(HaveOccurred()) - Expect(gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred()) } - By("Checking well known ports on master and nodes are not exposed externally") + ginkgo.By("Checking well known ports on master and nodes are not exposed externally") nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP) if len(nodeAddrs) == 0 { framework.Failf("did not find any node addresses") diff --git a/test/e2e/network/framework.go b/test/e2e/network/framework.go index bbabb66fc79..4080c32308b 100644 --- a/test/e2e/network/framework.go +++ b/test/e2e/network/framework.go @@ -18,6 +18,7 @@ package network import "github.com/onsi/ginkgo" +// SIGDescribe annotates the test with the SIG label. func SIGDescribe(text string, body func()) bool { return ginkgo.Describe("[sig-network] "+text, body) } diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index 5ea0bd4fda5..45eca3516d0 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -40,8 +40,8 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/providers/gce" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -50,7 +50,7 @@ const ( ) var _ = SIGDescribe("Loadbalancing: L7", func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() var ( ns string jig *ingress.TestJig @@ -58,7 +58,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { ) f := framework.NewDefaultFramework("ingress") - BeforeEach(func() { + ginkgo.BeforeEach(func() { jig = ingress.NewIngressTestJig(f.ClientSet) ns = f.Namespace.Name @@ -81,59 +81,59 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // // Slow by design ~10m for each "It" block dominated by loadbalancer setup time // TODO: write similar tests for nginx, haproxy and AWS Ingress. - Describe("GCE [Slow] [Feature:Ingress]", func() { + ginkgo.Describe("GCE [Slow] [Feature:Ingress]", func() { var gceController *gce.IngressController // Platform specific setup - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") - By("Initializing gce controller") + ginkgo.By("Initializing gce controller") gceController = &gce.IngressController{ Ns: ns, Client: jig.Client, Cloud: framework.TestContext.CloudConfig, } err := gceController.Init() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) // Platform specific cleanup - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DescribeIng(ns) } if jig.Ingress == nil { - By("No ingress created, no cleanup necessary") + ginkgo.By("No ingress created, no cleanup necessary") return } - By("Deleting ingress") + ginkgo.By("Deleting ingress") jig.TryDeleteIngress() - By("Cleaning up cloud resources") - Expect(gceController.CleanupIngressController()).NotTo(HaveOccurred()) + ginkgo.By("Cleaning up cloud resources") + gomega.Expect(gceController.CleanupIngressController()).NotTo(gomega.HaveOccurred()) }) - It("should conform to Ingress spec", func() { + ginkgo.It("should conform to Ingress spec", func() { conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) for _, t := range conformanceTests { - By(t.EntryLog) + ginkgo.By(t.EntryLog) t.Execute() - By(t.ExitLog) + ginkgo.By(t.ExitLog) jig.WaitForIngress(true) } }) - It("should create ingress with pre-shared certificate", func() { + ginkgo.It("should create ingress with pre-shared certificate", func() { executePresharedCertTest(f, jig, "") }) - It("should support multiple TLS certs", func() { - By("Creating an ingress with no certs.") + ginkgo.It("should support multiple TLS certs", func() { + ginkgo.By("Creating an ingress with no certs.") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "multiple-certs"), ns, map[string]string{ ingress.IngressStaticIPKey: ns, }, map[string]string{}) - By("Adding multiple certs to the ingress.") + ginkgo.By("Adding multiple certs to the ingress.") hosts := []string{"test1.ingress.com", "test2.ingress.com", "test3.ingress.com", "test4.ingress.com"} secrets := []string{"tls-secret-1", "tls-secret-2", "tls-secret-3", "tls-secret-4"} certs := [][]byte{} @@ -143,33 +143,33 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } for i, host := range hosts { err := jig.WaitForIngressWithCert(true, []string{host}, certs[i]) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) } - By("Remove all but one of the certs on the ingress.") + ginkgo.By("Remove all but one of the certs on the ingress.") jig.RemoveHTTPS(secrets[1]) jig.RemoveHTTPS(secrets[2]) jig.RemoveHTTPS(secrets[3]) - By("Test that the remaining cert is properly served.") + ginkgo.By("Test that the remaining cert is properly served.") err := jig.WaitForIngressWithCert(true, []string{hosts[0]}, certs[0]) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) - By("Add back one of the certs that was removed and check that all certs are served.") + ginkgo.By("Add back one of the certs that was removed and check that all certs are served.") jig.AddHTTPS(secrets[1], hosts[1]) for i, host := range hosts[:2] { err := jig.WaitForIngressWithCert(true, []string{host}, certs[i]) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) } }) - It("multicluster ingress should get instance group annotation", func() { + ginkgo.It("multicluster ingress should get instance group annotation", func() { name := "echomap" jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, map[string]string{ ingress.IngressClassKey: ingress.MulticlusterIngressClassValue, }, map[string]string{}) - By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name)) + ginkgo.By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name)) pollErr := wait.Poll(2*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -237,118 +237,118 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // zone based on pod labels. }) - Describe("GCE [Slow] [Feature:NEG]", func() { + ginkgo.Describe("GCE [Slow] [Feature:NEG]", func() { var gceController *gce.IngressController // Platform specific setup - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") - By("Initializing gce controller") + ginkgo.By("Initializing gce controller") gceController = &gce.IngressController{ Ns: ns, Client: jig.Client, Cloud: framework.TestContext.CloudConfig, } err := gceController.Init() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) // Platform specific cleanup - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DescribeIng(ns) } if jig.Ingress == nil { - By("No ingress created, no cleanup necessary") + ginkgo.By("No ingress created, no cleanup necessary") return } - By("Deleting ingress") + ginkgo.By("Deleting ingress") jig.TryDeleteIngress() - By("Cleaning up cloud resources") - Expect(gceController.CleanupIngressController()).NotTo(HaveOccurred()) + ginkgo.By("Cleaning up cloud resources") + gomega.Expect(gceController.CleanupIngressController()).NotTo(gomega.HaveOccurred()) }) - It("should conform to Ingress spec", func() { + ginkgo.It("should conform to Ingress spec", func() { jig.PollInterval = 5 * time.Second conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{ ingress.NEGAnnotation: `{"ingress": true}`, }) for _, t := range conformanceTests { - By(t.EntryLog) + ginkgo.By(t.EntryLog) t.Execute() - By(t.ExitLog) + ginkgo.By(t.ExitLog) jig.WaitForIngress(true) - Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) + gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) } }) - It("should be able to switch between IG and NEG modes", func() { + ginkgo.It("should be able to switch between IG and NEG modes", func() { var err error - By("Create a basic HTTP ingress using NEG") + ginkgo.By("Create a basic HTTP ingress using NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) - Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) + gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) - By("Switch backend service to use IG") + ginkgo.By("Switch backend service to use IG") svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress": false}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)); err != nil { - e2elog.Logf("Failed to verify IG backend service: %v", err) + e2elog.Logf("ginkgo.Failed to verify IG backend service: %v", err) return false, nil } return true, nil }) - Expect(err).NotTo(HaveOccurred(), "Expect backend service to target IG, but failed to observe") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Expect backend service to target IG, but failed to observe") jig.WaitForIngress(true) - By("Switch backend service to use NEG") + ginkgo.By("Switch backend service to use NEG") svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress": true}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)); err != nil { - e2elog.Logf("Failed to verify NEG backend service: %v", err) + e2elog.Logf("ginkgo.Failed to verify NEG backend service: %v", err) return false, nil } return true, nil }) - Expect(err).NotTo(HaveOccurred(), "Expect backend service to target NEG, but failed to observe") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Expect backend service to target NEG, but failed to observe") jig.WaitForIngress(true) }) - It("should be able to create a ClusterIP service", func() { - By("Create a basic HTTP ingress using NEG") + ginkgo.It("should be able to create a ClusterIP service", func() { + ginkgo.By("Create a basic HTTP ingress using NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) svcPorts := jig.GetServicePorts(false) - Expect(gceController.WaitForNegBackendService(svcPorts)).NotTo(HaveOccurred()) + gomega.Expect(gceController.WaitForNegBackendService(svcPorts)).NotTo(gomega.HaveOccurred()) // ClusterIP ServicePorts have no NodePort for _, sp := range svcPorts { - Expect(sp.NodePort).To(Equal(int32(0))) + gomega.Expect(sp.NodePort).To(gomega.Equal(int32(0))) } }) - It("should sync endpoints to NEG", func() { + ginkgo.It("should sync endpoints to NEG", func() { name := "hostname" scaleAndValidateNEG := func(num int) { scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if scale.Spec.Replicas != int32(num) { scale.Spec.Replicas = int32(num) _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() @@ -358,45 +358,45 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { e2elog.Logf("Expecting %d backends, got %d", num, res.Len()) return res.Len() == num, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - By("Create a basic HTTP ingress using NEG") + ginkgo.By("Create a basic HTTP ingress using NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) jig.WaitForIngressToStable() - Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) + gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) // initial replicas number is 1 scaleAndValidateNEG(1) - By("Scale up number of backends to 5") + ginkgo.By("Scale up number of backends to 5") scaleAndValidateNEG(5) - By("Scale down number of backends to 3") + ginkgo.By("Scale down number of backends to 3") scaleAndValidateNEG(3) - By("Scale up number of backends to 6") + ginkgo.By("Scale up number of backends to 6") scaleAndValidateNEG(6) - By("Scale down number of backends to 2") + ginkgo.By("Scale down number of backends to 2") scaleAndValidateNEG(3) }) - It("rolling update backend pods should not cause service disruption", func() { + ginkgo.It("rolling update backend pods should not cause service disruption", func() { name := "hostname" replicas := 8 - By("Create a basic HTTP ingress using NEG") + ginkgo.By("Create a basic HTTP ingress using NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) jig.WaitForIngressToStable() - Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) + gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("Scale backend replicas to %d", replicas)) + ginkgo.By(fmt.Sprintf("Scale backend replicas to %d", replicas)) scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) scale.Spec.Replicas = int32(replicas) _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() @@ -405,21 +405,21 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } return res.Len() == replicas, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) - By("Trigger rolling update and observe service disruption") + ginkgo.By("Trigger rolling update and observe service disruption") deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // trigger by changing graceful termination period to 60 seconds gracePeriod := int64(60) deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod _, err = f.ClientSet.AppsV1().Deployments(ns).Update(deploy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { res, err := jig.GetDistinctResponseFromIngress() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if int(deploy.Status.UpdatedReplicas) == replicas { if res.Len() == replicas { return true, nil @@ -427,29 +427,28 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { e2elog.Logf("Expecting %d different responses, but got %d.", replicas, res.Len()) return false, nil - } else { - e2elog.Logf("Waiting for rolling update to finished. Keep sending traffic.") - return false, nil } + e2elog.Logf("Waiting for rolling update to finished. Keep sending traffic.") + return false, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) - It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func() { + ginkgo.It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func() { name := "hostname" expectedKeys := []int32{80, 443} scaleAndValidateExposedNEG := func(num int) { scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if scale.Spec.Replicas != int32(num) { scale.Spec.Replicas = int32(num) _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) var status ingress.NegStatus v, ok := svc.Annotations[ingress.NEGStatusAnnotation] @@ -482,10 +481,10 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { } gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, neg := range status.NetworkEndpointGroups { networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if len(networkEndpoints) != num { e2elog.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints)) return false, nil @@ -494,31 +493,31 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { return true, nil }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } - By("Create a basic HTTP ingress using NEG") + ginkgo.By("Create a basic HTTP ingress using NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) - Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) + gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred()) // initial replicas number is 1 scaleAndValidateExposedNEG(1) - By("Scale up number of backends to 5") + ginkgo.By("Scale up number of backends to 5") scaleAndValidateExposedNEG(5) - By("Scale down number of backends to 3") + ginkgo.By("Scale down number of backends to 3") scaleAndValidateExposedNEG(3) - By("Scale up number of backends to 6") + ginkgo.By("Scale up number of backends to 6") scaleAndValidateExposedNEG(6) - By("Scale down number of backends to 2") + ginkgo.By("Scale down number of backends to 2") scaleAndValidateExposedNEG(3) }) - It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func() { - By("Create a basic HTTP ingress using standalone NEG") + ginkgo.It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func() { + ginkgo.By("Create a basic HTTP ingress using standalone NEG") jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) jig.WaitForIngress(true) @@ -526,120 +525,120 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { detectNegAnnotation(f, jig, gceController, ns, name, 2) // Add Ingress annotation - NEGs should stay the same. - By("Adding NEG Ingress annotation") + ginkgo.By("Adding NEG Ingress annotation") svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } detectNegAnnotation(f, jig, gceController, ns, name, 2) // Modify exposed NEG annotation, but keep ingress annotation - By("Modifying exposed NEG annotation, but keep Ingress annotation") + ginkgo.By("Modifying exposed NEG annotation, but keep Ingress annotation") svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } detectNegAnnotation(f, jig, gceController, ns, name, 2) // Remove Ingress annotation. Expect 1 NEG - By("Disabling Ingress annotation, but keeping one standalone NEG") + ginkgo.By("Disabling Ingress annotation, but keeping one standalone NEG") svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, svc := range svcList.Items { svc.Annotations[ingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}` _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } detectNegAnnotation(f, jig, gceController, ns, name, 1) // Remove NEG annotation entirely. Expect 0 NEGs. - By("Removing NEG annotation") + ginkgo.By("Removing NEG annotation") svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, svc := range svcList.Items { delete(svc.Annotations, ingress.NEGAnnotation) // Service cannot be ClusterIP if it's using Instance Groups. svc.Spec.Type = v1.ServiceTypeNodePort _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } detectNegAnnotation(f, jig, gceController, ns, name, 0) }) }) - Describe("GCE [Slow] [Feature:kubemci]", func() { + ginkgo.Describe("GCE [Slow] [Feature:kubemci]", func() { var gceController *gce.IngressController var ipName, ipAddress string // Platform specific setup - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") jig.Class = ingress.MulticlusterIngressClassValue jig.PollInterval = 5 * time.Second - By("Initializing gce controller") + ginkgo.By("Initializing gce controller") gceController = &gce.IngressController{ Ns: ns, Client: jig.Client, Cloud: framework.TestContext.CloudConfig, } err := gceController.Init() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // TODO(https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/19): // Kubemci should reserve a static ip if user has not specified one. ipName = "kubemci-" + string(uuid.NewUUID()) // ip released when the rest of lb resources are deleted in CleanupIngressController ipAddress = gceController.CreateStaticIP(ipName) - By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ipName, ipAddress)) + ginkgo.By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ipName, ipAddress)) }) // Platform specific cleanup - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DescribeIng(ns) } if jig.Ingress == nil { - By("No ingress created, no cleanup necessary") + ginkgo.By("No ingress created, no cleanup necessary") } else { - By("Deleting ingress") + ginkgo.By("Deleting ingress") jig.TryDeleteIngress() } - By("Cleaning up cloud resources") - Expect(gceController.CleanupIngressController()).NotTo(HaveOccurred()) + ginkgo.By("Cleaning up cloud resources") + gomega.Expect(gceController.CleanupIngressController()).NotTo(gomega.HaveOccurred()) }) - It("should conform to Ingress spec", func() { + ginkgo.It("should conform to Ingress spec", func() { conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{ ingress.IngressStaticIPKey: ipName, }) for _, t := range conformanceTests { - By(t.EntryLog) + ginkgo.By(t.EntryLog) t.Execute() - By(t.ExitLog) + ginkgo.By(t.ExitLog) jig.WaitForIngress(false /*waitForNodePort*/) } }) - It("should create ingress with pre-shared certificate", func() { + ginkgo.It("should create ingress with pre-shared certificate", func() { executePresharedCertTest(f, jig, ipName) }) - It("should create ingress with backend HTTPS", func() { + ginkgo.It("should create ingress with backend HTTPS", func() { executeBacksideBacksideHTTPSTest(f, jig, ipName) }) - It("should support https-only annotation", func() { + ginkgo.It("should support https-only annotation", func() { executeStaticIPHttpsOnlyTest(f, jig, ipName, ipAddress) }) - It("should remove clusters as expected", func() { + ginkgo.It("should remove clusters as expected", func() { ingAnnotations := map[string]string{ ingress.IngressStaticIPKey: ipName, } @@ -668,8 +667,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { verifyKubemciStatusHas(name, "is spread across 0 cluster") }) - It("single and multi-cluster ingresses should be able to exist together", func() { - By("Creating a single cluster ingress first") + ginkgo.It("single and multi-cluster ingresses should be able to exist together", func() { + ginkgo.By("Creating a single cluster ingress first") jig.Class = "" singleIngFilePath := filepath.Join(ingress.GCEIngressManifestPath, "static-ip-2") jig.CreateIngress(singleIngFilePath, ns, map[string]string{}, map[string]string{}) @@ -678,7 +677,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { singleIng := jig.Ingress // Create the multi-cluster ingress next. - By("Creating a multi-cluster ingress next") + ginkgo.By("Creating a multi-cluster ingress next") jig.Class = ingress.MulticlusterIngressClassValue ingAnnotations := map[string]string{ ingress.IngressStaticIPKey: ipName, @@ -688,7 +687,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { jig.WaitForIngress(false /*waitForNodePort*/) mciIngress := jig.Ingress - By("Deleting the single cluster ingress and verifying that multi-cluster ingress continues to work") + ginkgo.By("Deleting the single cluster ingress and verifying that multi-cluster ingress continues to work") jig.Ingress = singleIng jig.Class = "" jig.TryDeleteIngress() @@ -696,18 +695,18 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { jig.Class = ingress.MulticlusterIngressClassValue jig.WaitForIngress(false /*waitForNodePort*/) - By("Cleanup: Deleting the multi-cluster ingress") + ginkgo.By("Cleanup: Deleting the multi-cluster ingress") jig.TryDeleteIngress() }) }) // Time: borderline 5m, slow by design - Describe("[Slow] Nginx", func() { + ginkgo.Describe("[Slow] Nginx", func() { var nginxController *ingress.NginxIngressController - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") - By("Initializing nginx controller") + ginkgo.By("Initializing nginx controller") jig.Class = "nginx" nginxController = &ingress.NginxIngressController{Ns: ns, Client: jig.Client} @@ -723,30 +722,30 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { nginxController.Init() }) - AfterEach(func() { + ginkgo.AfterEach(func() { if framework.ProviderIs("gce", "gke") { framework.ExpectNoError(gce.GcloudComputeResourceDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID)) } - if CurrentGinkgoTestDescription().Failed { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DescribeIng(ns) } if jig.Ingress == nil { - By("No ingress created, no cleanup necessary") + ginkgo.By("No ingress created, no cleanup necessary") return } - By("Deleting ingress") + ginkgo.By("Deleting ingress") jig.TryDeleteIngress() }) - It("should conform to Ingress spec", func() { + ginkgo.It("should conform to Ingress spec", func() { // Poll more frequently to reduce e2e completion time. // This test runs in presubmit. jig.PollInterval = 5 * time.Second conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) for _, t := range conformanceTests { - By(t.EntryLog) + ginkgo.By(t.EntryLog) t.Execute() - By(t.ExitLog) + ginkgo.By(t.ExitLog) jig.WaitForIngress(false) } }) @@ -766,28 +765,28 @@ func verifyKubemciStatusHas(name, expectedSubStr string) { func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) { preSharedCertName := "test-pre-shared-cert" - By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName)) + ginkgo.By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName)) testHostname := "test.ingress.com" cert, key, err := ingress.GenerateRSACerts(testHostname, true) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer func() { // We would not be able to delete the cert until ingress controller // cleans up the target proxy that references it. - By("Deleting ingress before deleting ssl certificate") + ginkgo.By("Deleting ingress before deleting ssl certificate") if jig.Ingress != nil { jig.TryDeleteIngress() } - By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName)) + ginkgo.By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName)) err := wait.Poll(framework.LoadBalancerPollInterval, framework.LoadBalancerCleanupTimeout, func() (bool, error) { if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) { - e2elog.Logf("Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err) + e2elog.Logf("ginkgo.Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err) return false, nil } return true, nil }) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to delete ssl certificate %q: %v", preSharedCertName, err)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("ginkgo.Failed to delete ssl certificate %q: %v", preSharedCertName, err)) }() _, err = gceCloud.CreateSslCertificate(&compute.SslCertificate{ Name: preSharedCertName, @@ -795,9 +794,9 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat PrivateKey: string(key), Description: "pre-shared cert for ingress testing", }) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create ssl certificate %q: %v", preSharedCertName, err)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("ginkgo.Failed to create ssl certificate %q: %v", preSharedCertName, err)) - By("Creating an ingress referencing the pre-shared certificate") + ginkgo.By("Creating an ingress referencing the pre-shared certificate") // Create an ingress referencing this cert using pre-shared-cert annotation. ingAnnotations := map[string]string{ ingress.IngressPreSharedCertKey: preSharedCertName, @@ -810,9 +809,9 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat } jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{}) - By("Test that ingress works with the pre-shared certificate") + ginkgo.By("Test that ingress works with the pre-shared certificate") err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert) - Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) } func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig, ipName, ip string) { @@ -821,30 +820,30 @@ func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig, ingress.IngressAllowHTTPKey: "false", }, map[string]string{}) - By("waiting for Ingress to come up with ip: " + ip) + ginkgo.By("waiting for Ingress to come up with ip: " + ip) httpClient := ingress.BuildInsecureClient(ingress.IngressReqTimeout) framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false)) - By("should reject HTTP traffic") + ginkgo.By("should reject HTTP traffic") framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true)) } func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) { - By("Creating a set of ingress, service and deployment that have backside re-encryption configured") + ginkgo.By("Creating a set of ingress, service and deployment that have backside re-encryption configured") deployCreated, svcCreated, ingCreated, err := jig.SetUpBacksideHTTPSIngress(f.ClientSet, f.Namespace.Name, staticIPName) defer func() { - By("Cleaning up re-encryption ingress, service and deployment") + ginkgo.By("Cleaning up re-encryption ingress, service and deployment") if errs := jig.DeleteTestResource(f.ClientSet, deployCreated, svcCreated, ingCreated); len(errs) > 0 { - framework.Failf("Failed to cleanup re-encryption ingress: %v", errs) + framework.Failf("ginkgo.Failed to cleanup re-encryption ingress: %v", errs) } }() - Expect(err).NotTo(HaveOccurred(), "Failed to create re-encryption ingress") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "ginkgo.Failed to create re-encryption ingress") - By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name)) + ginkgo.By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name)) ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, framework.LoadBalancerPollTimeout) - Expect(err).NotTo(HaveOccurred(), "Failed to wait for ingress IP") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "ginkgo.Failed to wait for ingress IP") - By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP)) + ginkgo.By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP)) timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout} err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) { resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "") @@ -858,7 +857,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ e2elog.Logf("Poll succeeded, request was served by HTTPS") return true, nil }) - Expect(err).NotTo(HaveOccurred(), "Failed to verify backside re-encryption ingress") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "ginkgo.Failed to verify backside re-encryption ingress") } func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) { @@ -872,7 +871,7 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro if negs == 0 { err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)) if err != nil { - e2elog.Logf("Failed to validate IG backend service: %v", err) + e2elog.Logf("ginkgo.Failed to validate IG backend service: %v", err) return false, nil } return true, nil @@ -898,10 +897,10 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro } gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, neg := range status.NetworkEndpointGroups { networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) if len(networkEndpoints) != 1 { e2elog.Logf("Expect NEG %s to exist, but got %d", neg, len(networkEndpoints)) return false, nil @@ -910,11 +909,11 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro err = gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)) if err != nil { - e2elog.Logf("Failed to validate NEG backend service: %v", err) + e2elog.Logf("ginkgo.Failed to validate NEG backend service: %v", err) return false, nil } return true, nil }); err != nil { - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) } } diff --git a/test/e2e/network/ingress_scale.go b/test/e2e/network/ingress_scale.go index dc9d9df107e..bda4be54b62 100644 --- a/test/e2e/network/ingress_scale.go +++ b/test/e2e/network/ingress_scale.go @@ -20,26 +20,26 @@ import ( "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/network/scale" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() var ( ns string ) f := framework.NewDefaultFramework("ingress-scale") - BeforeEach(func() { + ginkgo.BeforeEach(func() { ns = f.Namespace.Name }) - Describe("GCE [Slow] [Serial] [Feature:IngressScale]", func() { + ginkgo.Describe("GCE [Slow] [Serial] [Feature:IngressScale]", func() { var ( scaleFramework *scale.IngressScaleFramework ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("gce", "gke") scaleFramework = scale.NewIngressScaleFramework(f.ClientSet, ns, framework.TestContext.CloudConfig) @@ -48,13 +48,13 @@ var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() { } }) - AfterEach(func() { + ginkgo.AfterEach(func() { if errs := scaleFramework.CleanupScaleTest(); len(errs) != 0 { framework.Failf("Unexpected error while cleaning up ingress scale test: %v", errs) } }) - It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() { + ginkgo.It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() { if errs := scaleFramework.RunScaleTest(); len(errs) != 0 { framework.Failf("Unexpected error while running ingress scale test: %v", errs) } diff --git a/test/e2e/network/kube_proxy.go b/test/e2e/network/kube_proxy.go index f08c736fe38..adea7307ad8 100644 --- a/test/e2e/network/kube_proxy.go +++ b/test/e2e/network/kube_proxy.go @@ -33,8 +33,8 @@ import ( "k8s.io/kubernetes/test/images/net/nat" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var kubeProxyE2eImage = imageutils.GetE2EImage(imageutils.Net) @@ -49,7 +49,7 @@ var _ = SIGDescribe("Network", func() { fr := framework.NewDefaultFramework("network") - It("should set TCP CLOSE_WAIT timeout", func() { + ginkgo.It("should set TCP CLOSE_WAIT timeout", func() { nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet) ips := framework.CollectAddresses(nodes, v1.NodeInternalIP) @@ -145,21 +145,21 @@ var _ = SIGDescribe("Network", func() { }, } - By(fmt.Sprintf( + ginkgo.By(fmt.Sprintf( "Launching a server daemon on node %v (node ip: %v, image: %v)", serverNodeInfo.name, serverNodeInfo.nodeIP, kubeProxyE2eImage)) fr.PodClient().CreateSync(serverPodSpec) - By(fmt.Sprintf( + ginkgo.By(fmt.Sprintf( "Launching a client daemon on node %v (node ip: %v, image: %v)", clientNodeInfo.name, clientNodeInfo.nodeIP, kubeProxyE2eImage)) fr.PodClient().CreateSync(clientPodSpec) - By("Make client connect") + ginkgo.By("Make client connect") options := nat.CloseWaitClientOptions{ RemoteAddr: fmt.Sprintf("%v:%v", @@ -179,7 +179,7 @@ var _ = SIGDescribe("Network", func() { <-time.After(time.Duration(1) * time.Second) - By("Checking /proc/net/nf_conntrack for the timeout") + ginkgo.By("Checking /proc/net/nf_conntrack for the timeout") // If test flakes occur here, then this check should be performed // in a loop as there may be a race with the client connecting. e2essh.IssueSSHCommandWithResult( @@ -214,8 +214,8 @@ var _ = SIGDescribe("Network", func() { e2elog.Logf("conntrack entry timeout was: %v, expected: %v", timeoutSeconds, expectedTimeoutSeconds) - Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should( - BeNumerically("<", (epsilonSeconds))) + gomega.Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should( + gomega.BeNumerically("<", (epsilonSeconds))) }) // Regression test for #74839, where: @@ -223,7 +223,7 @@ var _ = SIGDescribe("Network", func() { // a problem where spurious retransmits in a long-running TCP connection to a service // IP could result in the connection being closed with the error "Connection reset by // peer" - It("should resolve connrection reset issue #74839 [Slow]", func() { + ginkgo.It("should resolve connrection reset issue #74839 [Slow]", func() { serverLabel := map[string]string{ "app": "boom-server", } @@ -265,7 +265,7 @@ var _ = SIGDescribe("Network", func() { _, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(serverPod) framework.ExpectNoError(err) - By("Server pod created") + ginkgo.By("Server pod created") svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -284,7 +284,7 @@ var _ = SIGDescribe("Network", func() { _, err = fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(svc) framework.ExpectNoError(err) - By("Server service created") + ginkgo.By("Server service created") pod := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -319,13 +319,13 @@ var _ = SIGDescribe("Network", func() { _, err = fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(pod) framework.ExpectNoError(err) - By("Client pod created") + ginkgo.By("Client pod created") for i := 0; i < 20; i++ { time.Sleep(3 * time.Second) resultPod, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Get(serverPod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(resultPod.Status.ContainerStatuses[0].LastTerminationState.Terminated).Should(BeNil()) + gomega.Expect(resultPod.Status.ContainerStatuses[0].LastTerminationState.Terminated).Should(gomega.BeNil()) } }) }) diff --git a/test/e2e/network/network_policy.go b/test/e2e/network/network_policy.go index 2586c0c7011..6db4cd155a0 100644 --- a/test/e2e/network/network_policy.go +++ b/test/e2e/network/network_policy.go @@ -27,8 +27,8 @@ import ( "fmt" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) /* @@ -45,27 +45,27 @@ var _ = SIGDescribe("NetworkPolicy", func() { var podServer *v1.Pod f := framework.NewDefaultFramework("network-policy") - Context("NetworkPolicy between server and client", func() { - BeforeEach(func() { - By("Creating a simple server that serves on port 80 and 81.") + ginkgo.Context("NetworkPolicy between server and client", func() { + ginkgo.BeforeEach(func() { + ginkgo.By("Creating a simple server that serves on port 80 and 81.") podServer, service = createServerPodAndService(f, f.Namespace, "server", []int{80, 81}) - By("Waiting for pod ready", func() { + ginkgo.By("Waiting for pod ready", func() { err := f.WaitForPodReady(podServer.Name) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) // Create pods, which should be able to communicate with the server on port 80 and 81. - By("Testing pods can connect to both ports when no policy is present.") + ginkgo.By("Testing pods can connect to both ports when no policy is present.") testCanConnect(f, f.Namespace, "client-can-connect-80", service, 80) testCanConnect(f, f.Namespace, "client-can-connect-81", service, 81) }) - AfterEach(func() { + ginkgo.AfterEach(func() { cleanupServerPodAndService(f, podServer, service) }) - It("should support a 'default-deny' policy [Feature:NetworkPolicy]", func() { + ginkgo.It("should support a 'default-deny' policy [Feature:NetworkPolicy]", func() { policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "deny-all", @@ -77,7 +77,7 @@ var _ = SIGDescribe("NetworkPolicy", func() { } policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) // Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server, @@ -85,8 +85,8 @@ var _ = SIGDescribe("NetworkPolicy", func() { testCannotConnect(f, f.Namespace, "client-cannot-connect", service, 80) }) - It("should enforce policy based on PodSelector [Feature:NetworkPolicy]", func() { - By("Creating a network policy for the server which allows traffic from the pod 'client-a'.") + ginkgo.It("should enforce policy based on PodSelector [Feature:NetworkPolicy]", func() { + ginkgo.By("Creating a network policy for the server which allows traffic from the pod 'client-a'.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-client-a-via-pod-selector", @@ -112,18 +112,18 @@ var _ = SIGDescribe("NetworkPolicy", func() { } policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) - By("Creating client-a which should be able to contact the server.", func() { + ginkgo.By("Creating client-a which should be able to contact the server.", func() { testCanConnect(f, f.Namespace, "client-a", service, 80) }) - By("Creating client-b which should not be able to contact the server.", func() { + ginkgo.By("Creating client-b which should not be able to contact the server.", func() { testCannotConnect(f, f.Namespace, "client-b", service, 80) }) }) - It("should enforce policy based on NamespaceSelector [Feature:NetworkPolicy]", func() { + ginkgo.It("should enforce policy based on NamespaceSelector [Feature:NetworkPolicy]", func() { nsA := f.Namespace nsBName := f.BaseName + "-b" // The CreateNamespace helper uses the input name as a Name Generator, so the namespace itself @@ -132,15 +132,15 @@ var _ = SIGDescribe("NetworkPolicy", func() { nsB, err := f.CreateNamespace(nsBName, map[string]string{ "ns-name": nsBName, }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Create Server with Service in NS-B e2elog.Logf("Waiting for server to come up.") err = framework.WaitForPodRunningInNamespace(f.ClientSet, podServer) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Create Policy for that service that allows traffic only via namespace B - By("Creating a network policy for the server which allows traffic from namespace-b.") + ginkgo.By("Creating a network policy for the server which allows traffic from namespace-b.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-ns-b-via-namespace-selector", @@ -165,15 +165,15 @@ var _ = SIGDescribe("NetworkPolicy", func() { }, } policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) testCannotConnect(f, nsA, "client-a", service, 80) testCanConnect(f, nsB, "client-b", service, 80) }) - It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() { - By("Creating a network policy for the Service which allows traffic only to one port.") + ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() { + ginkgo.By("Creating a network policy for the Service which allows traffic only to one port.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-ingress-on-port-81", @@ -194,16 +194,16 @@ var _ = SIGDescribe("NetworkPolicy", func() { }, } policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) - By("Testing pods can connect only to the port allowed by the policy.") + ginkgo.By("Testing pods can connect only to the port allowed by the policy.") testCannotConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-b", service, 81) }) - It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() { - By("Creating a network policy for the Service which allows traffic only to one port.") + ginkgo.It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() { + ginkgo.By("Creating a network policy for the Service which allows traffic only to one port.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-ingress-on-port-80", @@ -224,10 +224,10 @@ var _ = SIGDescribe("NetworkPolicy", func() { }, } policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) - By("Creating a network policy for the Service which allows traffic only to another port.") + ginkgo.By("Creating a network policy for the Service which allows traffic only to another port.") policy2 := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-ingress-on-port-81", @@ -248,16 +248,16 @@ var _ = SIGDescribe("NetworkPolicy", func() { }, } policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy2) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy2) - By("Testing pods can connect to both ports when both policies are present.") + ginkgo.By("Testing pods can connect to both ports when both policies are present.") testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-b", service, 81) }) - It("should support allow-all policy [Feature:NetworkPolicy]", func() { - By("Creating a network policy which allows all traffic.") + ginkgo.It("should support allow-all policy [Feature:NetworkPolicy]", func() { + ginkgo.By("Creating a network policy which allows all traffic.") policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-all", @@ -271,15 +271,15 @@ var _ = SIGDescribe("NetworkPolicy", func() { }, } policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) - By("Testing pods can connect to both ports when an 'allow-all' policy is present.") + ginkgo.By("Testing pods can connect to both ports when an 'allow-all' policy is present.") testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-b", service, 81) }) - It("should allow ingress access on one named port [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow ingress access on one named port [Feature:NetworkPolicy]", func() { policy := &networkingv1.NetworkPolicy{ ObjectMeta: metav1.ObjectMeta{ Name: "allow-client-a-via-named-port-ingress-rule", @@ -301,18 +301,18 @@ var _ = SIGDescribe("NetworkPolicy", func() { } policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) - By("Creating client-a which should be able to contact the server.", func() { + ginkgo.By("Creating client-a which should be able to contact the server.", func() { testCanConnect(f, f.Namespace, "client-a", service, 80) }) - By("Creating client-b which should not be able to contact the server on port 81.", func() { + ginkgo.By("Creating client-b which should not be able to contact the server on port 81.", func() { testCannotConnect(f, f.Namespace, "client-b", service, 81) }) }) - It("should allow egress access on one named port [Feature:NetworkPolicy]", func() { + ginkgo.It("should allow egress access on one named port [Feature:NetworkPolicy]", func() { clientPodName := "client-a" protocolUDP := v1.ProtocolUDP policy := &networkingv1.NetworkPolicy{ @@ -343,13 +343,13 @@ var _ = SIGDescribe("NetworkPolicy", func() { } policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) defer cleanupNetworkPolicy(f, policy) - By("Creating client-a which should be able to contact the server.", func() { + ginkgo.By("Creating client-a which should be able to contact the server.", func() { testCanConnect(f, f.Namespace, clientPodName, service, 80) }) - By("Creating client-a which should not be able to contact the server on port 81.", func() { + ginkgo.By("Creating client-a which should not be able to contact the server on port 81.", func() { testCannotConnect(f, f.Namespace, clientPodName, service, 81) }) }) @@ -357,10 +357,10 @@ var _ = SIGDescribe("NetworkPolicy", func() { }) func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) { - By(fmt.Sprintf("Creating client pod %s that should successfully connect to %s.", podName, service.Name)) + ginkgo.By(fmt.Sprintf("Creating client pod %s that should successfully connect to %s.", podName, service.Name)) podClient := createNetworkClientPod(f, ns, podName, service, targetPort) defer func() { - By(fmt.Sprintf("Cleaning up the pod %s", podName)) + ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podName)) if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil { framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } @@ -368,7 +368,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se e2elog.Logf("Waiting for %s to complete.", podClient.Name) err := framework.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podClient.Name, ns.Name) - Expect(err).NotTo(HaveOccurred(), "Pod did not finish as expected.") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Pod did not finish as expected.") e2elog.Logf("Waiting for %s to complete.", podClient.Name) err = framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name) @@ -404,10 +404,10 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se } func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) { - By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", podName, service.Name)) + ginkgo.By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", podName, service.Name)) podClient := createNetworkClientPod(f, ns, podName, service, targetPort) defer func() { - By(fmt.Sprintf("Cleaning up the pod %s", podName)) + ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podName)) if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil { framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } @@ -495,7 +495,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, }) } - By(fmt.Sprintf("Creating a server pod %s in namespace %s", podName, namespace.Name)) + ginkgo.By(fmt.Sprintf("Creating a server pod %s in namespace %s", podName, namespace.Name)) pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(&v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: podName, @@ -508,11 +508,11 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, RestartPolicy: v1.RestartPolicyNever, }, }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e2elog.Logf("Created pod %v", pod.ObjectMeta.Name) svcName := fmt.Sprintf("svc-%s", podName) - By(fmt.Sprintf("Creating a service %s for pod %s in namespace %s", svcName, podName, namespace.Name)) + ginkgo.By(fmt.Sprintf("Creating a service %s for pod %s in namespace %s", svcName, podName, namespace.Name)) svc, err := f.ClientSet.CoreV1().Services(namespace.Name).Create(&v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: svcName, @@ -524,18 +524,18 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, }, }, }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e2elog.Logf("Created service %s", svc.Name) return pod, svc } func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1.Service) { - By("Cleaning up the server.") + ginkgo.By("Cleaning up the server.") if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { framework.Failf("unable to cleanup pod %v: %v", pod.Name, err) } - By("Cleaning up the server's service.") + ginkgo.By("Cleaning up the server's service.") if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(service.Name, nil); err != nil { framework.Failf("unable to cleanup svc %v: %v", service.Name, err) } @@ -569,13 +569,13 @@ func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, pod }, }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) return pod } func cleanupNetworkPolicy(f *framework.Framework, policy *networkingv1.NetworkPolicy) { - By("Cleaning up the policy.") + ginkgo.By("Cleaning up the policy.") if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(policy.Name, nil); err != nil { framework.Failf("unable to cleanup policy %v: %v", policy.Name, err) } diff --git a/test/e2e/network/network_tiers.go b/test/e2e/network/network_tiers.go index c5c7b09bde3..ef8130267f9 100644 --- a/test/e2e/network/network_tiers.go +++ b/test/e2e/network/network_tiers.go @@ -33,8 +33,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework/providers/gce" gcecloud "k8s.io/legacy-cloud-providers/gce" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { @@ -43,14 +43,14 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { var cs clientset.Interface serviceLBNames := []string{} - BeforeEach(func() { + ginkgo.BeforeEach(func() { // This test suite requires the GCE environment. framework.SkipUnlessProviderIs("gce") cs = f.ClientSet }) - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DescribeSvc(f.Namespace.Name) } for _, lb := range serviceLBNames { @@ -60,7 +60,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { //reset serviceLBNames serviceLBNames = []string{} }) - It("should be able to create and tear down a standard-tier load balancer [Slow]", func() { + ginkgo.It("should be able to create and tear down a standard-tier load balancer [Slow]", func() { lagTimeout := framework.LoadBalancerLagTimeoutDefault createTimeout := framework.GetServiceLoadBalancerCreationTimeout(cs) @@ -68,19 +68,19 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { ns := f.Namespace.Name jig := framework.NewServiceTestJig(cs, svcName) - By("creating a pod to be part of the service " + svcName) + ginkgo.By("creating a pod to be part of the service " + svcName) jig.RunOrFail(ns, nil) // Test 1: create a standard tiered LB for the Service. - By("creating a Service of type LoadBalancer using the standard network tier") + ginkgo.By("creating a Service of type LoadBalancer using the standard network tier") svc := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard)) }) // Verify that service has been updated properly. svcTier, err := gcecloud.GetServiceNetworkTier(svc) - Expect(err).NotTo(HaveOccurred()) - Expect(svcTier).To(Equal(cloud.NetworkTierStandard)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierStandard)) // Record the LB name for test cleanup. serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) @@ -88,26 +88,26 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { ingressIP := waitAndVerifyLBWithTier(jig, ns, svcName, "", createTimeout, lagTimeout) // Test 2: re-create a LB of a different tier for the updated Service. - By("updating the Service to use the premium (default) tier") + ginkgo.By("updating the Service to use the premium (default) tier") svc = jig.UpdateServiceOrFail(ns, svcName, func(svc *v1.Service) { clearNetworkTier(svc) }) // Verify that service has been updated properly. svcTier, err = gcecloud.GetServiceNetworkTier(svc) - Expect(err).NotTo(HaveOccurred()) - Expect(svcTier).To(Equal(cloud.NetworkTierDefault)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierDefault)) // Wait until the ingress IP changes. Each tier has its own pool of // IPs, so changing tiers implies changing IPs. ingressIP = waitAndVerifyLBWithTier(jig, ns, svcName, ingressIP, createTimeout, lagTimeout) // Test 3: create a standard-tierd LB with a user-requested IP. - By("reserving a static IP for the load balancer") + ginkgo.By("reserving a static IP for the load balancer") requestedAddrName := fmt.Sprintf("e2e-ext-lb-net-tier-%s", framework.RunID) gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) requestedIP, err := reserveAlphaRegionalAddress(gceCloud, requestedAddrName, cloud.NetworkTierStandard) - Expect(err).NotTo(HaveOccurred(), "failed to reserve a STANDARD tiered address") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to reserve a STANDARD tiered address") defer func() { if requestedAddrName != "" { // Release GCE static address - this is not kube-managed and will not be automatically released. @@ -116,19 +116,19 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { } } }() - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) e2elog.Logf("Allocated static IP to be used by the load balancer: %q", requestedIP) - By("updating the Service to use the standard tier with a requested IP") + ginkgo.By("updating the Service to use the standard tier with a requested IP") svc = jig.UpdateServiceOrFail(ns, svc.Name, func(svc *v1.Service) { svc.Spec.LoadBalancerIP = requestedIP setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard)) }) // Verify that service has been updated properly. - Expect(svc.Spec.LoadBalancerIP).To(Equal(requestedIP)) + gomega.Expect(svc.Spec.LoadBalancerIP).To(gomega.Equal(requestedIP)) svcTier, err = gcecloud.GetServiceNetworkTier(svc) - Expect(err).NotTo(HaveOccurred()) - Expect(svcTier).To(Equal(cloud.NetworkTierStandard)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierStandard)) // Wait until the ingress IP changes and verifies the LB. ingressIP = waitAndVerifyLBWithTier(jig, ns, svcName, ingressIP, createTimeout, lagTimeout) @@ -150,10 +150,10 @@ func waitAndVerifyLBWithTier(jig *framework.ServiceTestJig, ns, svcName, existin lbIngress := &svc.Status.LoadBalancer.Ingress[0] ingressIP := framework.GetIngressPoint(lbIngress) - By("running sanity and reachability checks") + ginkgo.By("running sanity and reachability checks") if svc.Spec.LoadBalancerIP != "" { // Verify that the new ingress IP is the requested IP if it's set. - Expect(ingressIP).To(Equal(svc.Spec.LoadBalancerIP)) + gomega.Expect(ingressIP).To(gomega.Equal(svc.Spec.LoadBalancerIP)) } jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) // If the IP has been used by previous test, sometimes we get the lingering @@ -163,10 +163,10 @@ func waitAndVerifyLBWithTier(jig *framework.ServiceTestJig, ns, svcName, existin // Verify the network tier matches the desired. svcNetTier, err := gcecloud.GetServiceNetworkTier(svc) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) netTier, err := getLBNetworkTierByIP(ingressIP) - Expect(err).NotTo(HaveOccurred(), "failed to get the network tier of the load balancer") - Expect(netTier).To(Equal(svcNetTier)) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get the network tier of the load balancer") + gomega.Expect(netTier).To(gomega.Equal(svcNetTier)) return ingressIP } diff --git a/test/e2e/network/networking.go b/test/e2e/network/networking.go index 281211a4f5e..0821e0bd1b5 100644 --- a/test/e2e/network/networking.go +++ b/test/e2e/network/networking.go @@ -24,18 +24,18 @@ import ( "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/test/e2e/framework" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) var _ = SIGDescribe("Networking", func() { var svcname = "nettest" f := framework.NewDefaultFramework(svcname) - BeforeEach(func() { + ginkgo.BeforeEach(func() { // Assert basic external connectivity. // Since this is not really a test of kubernetes in any way, we // leave it as a pre-test assertion, rather than a Ginko test. - By("Executing a successful http request from the external internet") + ginkgo.By("Executing a successful http request from the external internet") resp, err := http.Get("http://google.com") if err != nil { framework.Failf("Unable to connect/talk to the internet: %v", err) @@ -45,20 +45,20 @@ var _ = SIGDescribe("Networking", func() { } }) - It("should provide Internet connection for containers [Feature:Networking-IPv4]", func() { - By("Running container which tries to ping 8.8.8.8") + ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv4]", func() { + ginkgo.By("Running container which tries to ping 8.8.8.8") framework.ExpectNoError( framework.CheckConnectivityToHost(f, "", "ping-test", "8.8.8.8", framework.IPv4PingCommand, 30)) }) - It("should provide Internet connection for containers [Feature:Networking-IPv6][Experimental]", func() { - By("Running container which tries to ping 2001:4860:4860::8888") + ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv6][Experimental]", func() { + ginkgo.By("Running container which tries to ping 2001:4860:4860::8888") framework.ExpectNoError( framework.CheckConnectivityToHost(f, "", "ping-test", "2001:4860:4860::8888", framework.IPv6PingCommand, 30)) }) // First test because it has no dependencies on variables created later on. - It("should provide unchanging, static URL paths for kubernetes api services", func() { + ginkgo.It("should provide unchanging, static URL paths for kubernetes api services", func() { tests := []struct { path string }{ @@ -74,22 +74,22 @@ var _ = SIGDescribe("Networking", func() { tests = append(tests, struct{ path string }{path: "/logs"}) } for _, test := range tests { - By(fmt.Sprintf("testing: %s", test.path)) + ginkgo.By(fmt.Sprintf("testing: %s", test.path)) data, err := f.ClientSet.CoreV1().RESTClient().Get(). AbsPath(test.path). DoRaw() if err != nil { - framework.Failf("Failed: %v\nBody: %s", err, string(data)) + framework.Failf("ginkgo.Failed: %v\nBody: %s", err, string(data)) } } }) - It("should check kube-proxy urls", func() { + ginkgo.It("should check kube-proxy urls", func() { // TODO: this is overkill we just need the host networking pod // to hit kube-proxy urls. config := framework.NewNetworkingTestConfig(f) - By("checking kube-proxy URLs") + ginkgo.By("checking kube-proxy URLs") config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "200 OK") // Verify /healthz returns the proper content. config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "lastUpdated") @@ -98,116 +98,116 @@ var _ = SIGDescribe("Networking", func() { }) // TODO: Remove [Slow] when this has had enough bake time to prove presubmit worthiness. - Describe("Granular Checks: Services [Slow]", func() { + ginkgo.Describe("Granular Checks: Services [Slow]", func() { - It("should function for pod-Service: http", func() { + ginkgo.It("should function for pod-Service: http", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) - By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeHTTPPort)) config.DialFromTestContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) }) - It("should function for pod-Service: udp", func() { + ginkgo.It("should function for pod-Service: udp", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) - By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeUDPPort)) config.DialFromTestContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) }) - It("should function for node-Service: http", func() { + ginkgo.It("should function for node-Service: http", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterHTTPPort)) config.DialFromNode("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) - By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) }) - It("should function for node-Service: udp", func() { + ginkgo.It("should function for node-Service: udp", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterUDPPort)) config.DialFromNode("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) - By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) }) - It("should function for endpoint-Service: http", func() { + ginkgo.It("should function for endpoint-Service: http", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterHTTPPort)) config.DialFromEndpointContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) - By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHTTPPort)) config.DialFromEndpointContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) }) - It("should function for endpoint-Service: udp", func() { + ginkgo.It("should function for endpoint-Service: udp", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterUDPPort)) config.DialFromEndpointContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) - By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUDPPort)) config.DialFromEndpointContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) }) - It("should update endpoints: http", func() { + ginkgo.It("should update endpoints: http", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DeleteNetProxyPod() - By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) }) - It("should update endpoints: udp", func() { + ginkgo.It("should update endpoints: udp", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DeleteNetProxyPod() - By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) }) // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. - It("should update nodePort: http [Slow]", func() { + ginkgo.It("should update nodePort: http [Slow]", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DeleteNodePortService() - By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, config.MaxTries, sets.NewString()) }) // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. - It("should update nodePort: udp [Slow]", func() { + ginkgo.It("should update nodePort: udp [Slow]", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DeleteNodePortService() - By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, config.MaxTries, sets.NewString()) }) - It("should function for client IP based session affinity: http", func() { + ginkgo.It("should function for client IP based session affinity: http", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort)) + ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort)) // Check if number of endpoints returned are exactly one. eps, err := config.GetEndpointsFromTestContainer("http", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort, framework.SessionAffinityChecks) if err != nil { - framework.Failf("Failed to get endpoints from test container, error: %v", err) + framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) } if len(eps) == 0 { framework.Failf("Unexpected no endpoints return") @@ -217,14 +217,14 @@ var _ = SIGDescribe("Networking", func() { } }) - It("should function for client IP based session affinity: udp", func() { + ginkgo.It("should function for client IP based session affinity: udp", func() { config := framework.NewNetworkingTestConfig(f) - By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort)) + ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort)) // Check if number of endpoints returned are exactly one. eps, err := config.GetEndpointsFromTestContainer("udp", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort, framework.SessionAffinityChecks) if err != nil { - framework.Failf("Failed to get endpoints from test container, error: %v", err) + framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) } if len(eps) == 0 { framework.Failf("Unexpected no endpoints return") diff --git a/test/e2e/network/networking_perf.go b/test/e2e/network/networking_perf.go index 9892feeab6d..ecf05e83ac1 100644 --- a/test/e2e/network/networking_perf.go +++ b/test/e2e/network/networking_perf.go @@ -22,8 +22,8 @@ import ( "math" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" @@ -54,12 +54,12 @@ func networkingIPerfTest(isIPv6 bool) { familyStr = "-V " } - It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() { + ginkgo.It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() { nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) totalPods := len(nodes.Items) // for a single service, we expect to divide bandwidth between the network. Very crude estimate. expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods)) - Expect(totalPods).NotTo(Equal(0)) + gomega.Expect(totalPods).NotTo(gomega.Equal(0)) appName := "iperf-e2e" _, err := f.CreateServiceForSimpleAppWithPods( 8001, diff --git a/test/e2e/network/no_snat.go b/test/e2e/network/no_snat.go index e62026156a5..52b0afe7c6f 100644 --- a/test/e2e/network/no_snat.go +++ b/test/e2e/network/no_snat.go @@ -29,8 +29,8 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - . "github.com/onsi/ginkgo" - // . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -94,11 +94,11 @@ var ( // Produces a pod spec that passes nip as NODE_IP env var using downward API func newTestPod(nodename string, nip string) *v1.Pod { pod := testPod - node_ip := v1.EnvVar{ + nodeIP := v1.EnvVar{ Name: "NODE_IP", Value: nip, } - pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, node_ip) + pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, nodeIP) pod.Spec.NodeName = nodename return &pod } @@ -135,12 +135,12 @@ func checknosnatURL(proxy, pip string, ips []string) string { // We use the [Feature:NoSNAT] tag so that most jobs will skip this test by default. var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { f := framework.NewDefaultFramework("no-snat-test") - It("Should be able to send traffic between Pods without SNAT", func() { + ginkgo.It("Should be able to send traffic between Pods without SNAT", func() { cs := f.ClientSet pc := cs.CoreV1().Pods(f.Namespace.Name) nc := cs.CoreV1().Nodes() - By("creating a test pod on each Node") + ginkgo.By("creating a test pod on each Node") nodes, err := nc.List(metav1.ListOptions{}) framework.ExpectNoError(err) if len(nodes.Items) == 0 { @@ -167,7 +167,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { // on the master, but do allow this on the nodes. node, err := getSchedulable(nodes.Items) framework.ExpectNoError(err) - By("creating a no-snat-test-proxy Pod on Node " + node.Name + " port " + strconv.Itoa(testProxyPort) + + ginkgo.By("creating a no-snat-test-proxy Pod on Node " + node.Name + " port " + strconv.Itoa(testProxyPort) + " so we can target our test Pods through this Node's ExternalIP") extIP, err := getIP(v1.NodeExternalIP, node) @@ -177,7 +177,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { _, err = pc.Create(newTestProxyPod(node.Name)) framework.ExpectNoError(err) - By("waiting for all of the no-snat-test pods to be scheduled and running") + ginkgo.By("waiting for all of the no-snat-test pods to be scheduled and running") err = wait.PollImmediate(10*time.Second, 1*time.Minute, func() (bool, error) { pods, err := pc.List(metav1.ListOptions{LabelSelector: "no-snat-test"}) if err != nil { @@ -197,7 +197,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { }) framework.ExpectNoError(err) - By("waiting for the no-snat-test-proxy Pod to be scheduled and running") + ginkgo.By("waiting for the no-snat-test-proxy Pod to be scheduled and running") err = wait.PollImmediate(10*time.Second, 1*time.Minute, func() (bool, error) { pod, err := pc.Get("no-snat-test-proxy", metav1.GetOptions{}) if err != nil { @@ -213,7 +213,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { }) framework.ExpectNoError(err) - By("sending traffic from each pod to the others and checking that SNAT does not occur") + ginkgo.By("sending traffic from each pod to the others and checking that SNAT does not occur") pods, err := pc.List(metav1.ListOptions{LabelSelector: "no-snat-test"}) framework.ExpectNoError(err) diff --git a/test/e2e/network/proxy.go b/test/e2e/network/proxy.go index 5887aa77658..511f94608ac 100644 --- a/test/e2e/network/proxy.go +++ b/test/e2e/network/proxy.go @@ -38,8 +38,8 @@ import ( testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -55,7 +55,7 @@ const ( var _ = SIGDescribe("Proxy", func() { version := "v1" - Context("version "+version, func() { + ginkgo.Context("version "+version, func() { options := framework.Options{ ClientQPS: -1.0, } @@ -116,12 +116,12 @@ var _ = SIGDescribe("Proxy", func() { }, }, }) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // Make an RC with a single pod. The 'porter' image is // a simple server which serves the values of the // environmental variables below. - By("starting an echo server on multiple ports") + ginkgo.By("starting an echo server on multiple ports") pods := []*v1.Pod{} cfg := testutils.RCConfig{ Client: f.ClientSet, @@ -160,10 +160,10 @@ var _ = SIGDescribe("Proxy", func() { Labels: labels, CreatedPods: &pods, } - Expect(framework.RunRC(cfg)).NotTo(HaveOccurred()) + gomega.Expect(framework.RunRC(cfg)).NotTo(gomega.HaveOccurred()) defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, cfg.Name) - Expect(endpoints.WaitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)).NotTo(HaveOccurred()) + gomega.Expect(endpoints.WaitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)).NotTo(gomega.HaveOccurred()) // table constructors // Try proxying through the service and directly to through the pod. @@ -212,7 +212,7 @@ var _ = SIGDescribe("Proxy", func() { e2elog.Logf("setup took %v, starting test cases", d) numberTestCases := len(expectations) totalAttempts := numberTestCases * proxyAttempts - By(fmt.Sprintf("running %v cases, %v attempts per case, %v total attempts", numberTestCases, proxyAttempts, totalAttempts)) + ginkgo.By(fmt.Sprintf("running %v cases, %v attempts per case, %v total attempts", numberTestCases, proxyAttempts, totalAttempts)) for i := 0; i < proxyAttempts; i++ { wg.Add(numberTestCases) @@ -297,25 +297,25 @@ func pickNode(cs clientset.Interface) (string, error) { func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) { node, err := pickNode(f.ClientSet) - Expect(err).NotTo(HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) // TODO: Change it to test whether all requests succeeded when requests // not reaching Kubelet issue is debugged. serviceUnavailableErrors := 0 for i := 0; i < proxyAttempts; i++ { _, status, d, err := doProxy(f, prefix+node+nodeDest, i) if status == http.StatusServiceUnavailable { - e2elog.Logf("Failed proxying node logs due to service unavailable: %v", err) + e2elog.Logf("ginkgo.Failed proxying node logs due to service unavailable: %v", err) time.Sleep(time.Second) serviceUnavailableErrors++ } else { - Expect(err).NotTo(HaveOccurred()) - Expect(status).To(Equal(http.StatusOK)) - Expect(d).To(BeNumerically("<", proxyHTTPCallTimeout)) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(status).To(gomega.Equal(http.StatusOK)) + gomega.Expect(d).To(gomega.BeNumerically("<", proxyHTTPCallTimeout)) } } if serviceUnavailableErrors > 0 { e2elog.Logf("error: %d requests to proxy node logs failed", serviceUnavailableErrors) } maxFailures := int(math.Floor(0.1 * float64(proxyAttempts))) - Expect(serviceUnavailableErrors).To(BeNumerically("<", maxFailures)) + gomega.Expect(serviceUnavailableErrors).To(gomega.BeNumerically("<", maxFailures)) } diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index a5bb5a3428d..2741d05449d 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -43,8 +43,8 @@ import ( imageutils "k8s.io/kubernetes/test/utils/image" gcecloud "k8s.io/legacy-cloud-providers/gce" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" ) const ( @@ -83,12 +83,12 @@ var _ = SIGDescribe("Services", func() { var cs clientset.Interface serviceLBNames := []string{} - BeforeEach(func() { + ginkgo.BeforeEach(func() { cs = f.ClientSet }) - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DescribeSvc(f.Namespace.Name) } for _, lb := range serviceLBNames { @@ -108,7 +108,7 @@ var _ = SIGDescribe("Services", func() { */ framework.ConformanceIt("should provide secure master service ", func() { _, err := cs.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to fetch the service object for the service named kubernetes") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch the service object for the service named kubernetes") }) /* @@ -125,10 +125,10 @@ var _ = SIGDescribe("Services", func() { "baz": "blah", } - By("creating service " + serviceName + " in namespace " + ns) + ginkgo.By("creating service " + serviceName + " in namespace " + ns) defer func() { err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) }() ports := []v1.ServicePort{{ Port: 80, @@ -136,7 +136,7 @@ var _ = SIGDescribe("Services", func() { }} _, err := jig.CreateServiceWithServicePort(labels, ns, ports) - Expect(err).NotTo(HaveOccurred(), "failed to create service with ServicePorts in namespace: %s", ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service with ServicePorts in namespace: %s", ns) framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{}) @@ -144,7 +144,7 @@ var _ = SIGDescribe("Services", func() { defer func() { for name := range names { err := cs.CoreV1().Pods(ns).Delete(name, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s in namespace: %s", name, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s in namespace: %s", name, ns) } }() @@ -181,7 +181,7 @@ var _ = SIGDescribe("Services", func() { defer func() { err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) }() labels := map[string]string{"foo": "bar"} @@ -189,7 +189,7 @@ var _ = SIGDescribe("Services", func() { svc1port := "svc1" svc2port := "svc2" - By("creating service " + serviceName + " in namespace " + ns) + ginkgo.By("creating service " + serviceName + " in namespace " + ns) ports := []v1.ServicePort{ { Name: "portname1", @@ -203,7 +203,7 @@ var _ = SIGDescribe("Services", func() { }, } _, err := jig.CreateServiceWithServicePort(labels, ns, ports) - Expect(err).NotTo(HaveOccurred(), "failed to create service with ServicePorts in namespace: %s", ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service with ServicePorts in namespace: %s", ns) port1 := 100 port2 := 101 framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{}) @@ -212,7 +212,7 @@ var _ = SIGDescribe("Services", func() { defer func() { for name := range names { err := cs.CoreV1().Pods(ns).Delete(name, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s in namespace: %s", name, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s in namespace: %s", name, ns) } }() @@ -249,7 +249,7 @@ var _ = SIGDescribe("Services", func() { framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{}) }) - It("should preserve source pod IP for traffic thru service cluster IP", func() { + ginkgo.It("should preserve source pod IP for traffic thru service cluster IP", func() { // This behavior is not supported if Kube-proxy is in "userspace" mode. // So we check the kube-proxy mode and skip this test if that's the case. @@ -264,7 +264,7 @@ var _ = SIGDescribe("Services", func() { serviceName := "sourceip-test" ns := f.Namespace.Name - By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) + ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) jig := framework.NewServiceTestJig(cs, serviceName) servicePort := 8080 tcpService := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort)) @@ -272,12 +272,12 @@ var _ = SIGDescribe("Services", func() { defer func() { e2elog.Logf("Cleaning up the sourceip test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) }() - serviceIp := tcpService.Spec.ClusterIP - e2elog.Logf("sourceip-test cluster ip: %s", serviceIp) + serviceIP := tcpService.Spec.ClusterIP + e2elog.Logf("sourceip-test cluster ip: %s", serviceIP) - By("Picking multiple nodes") + ginkgo.By("Picking multiple nodes") nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) if len(nodes.Items) == 1 { @@ -287,30 +287,30 @@ var _ = SIGDescribe("Services", func() { node1 := nodes.Items[0] node2 := nodes.Items[1] - By("Creating a webserver pod be part of the TCP service which echoes back source ip") + ginkgo.By("Creating a webserver pod be part of the TCP service which echoes back source ip") serverPodName := "echoserver-sourceip" jig.LaunchEchoserverPodOnNode(f, node1.Name, serverPodName) defer func() { e2elog.Logf("Cleaning up the echo server pod") err := cs.CoreV1().Pods(ns).Delete(serverPodName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s on node: %s", serverPodName, node1.Name) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s on node: %s", serverPodName, node1.Name) }() // Waiting for service to expose endpoint. framework.ValidateEndpointsOrFail(cs, ns, serviceName, framework.PortsByPodName{serverPodName: {servicePort}}) - By("Retrieve sourceip from a pod on the same node") - sourceIp1, execPodIp1 := execSourceipTest(f, cs, ns, node1.Name, serviceIp, servicePort) - By("Verifying the preserved source ip") - Expect(sourceIp1).To(Equal(execPodIp1)) + ginkgo.By("Retrieve sourceip from a pod on the same node") + sourceIP1, execPodIP1 := execSourceipTest(f, cs, ns, node1.Name, serviceIP, servicePort) + ginkgo.By("Verifying the preserved source ip") + gomega.Expect(sourceIP1).To(gomega.Equal(execPodIP1)) - By("Retrieve sourceip from a pod on a different node") - sourceIp2, execPodIp2 := execSourceipTest(f, cs, ns, node2.Name, serviceIp, servicePort) - By("Verifying the preserved source ip") - Expect(sourceIp2).To(Equal(execPodIp2)) + ginkgo.By("Retrieve sourceip from a pod on a different node") + sourceIP2, execPodIP2 := execSourceipTest(f, cs, ns, node2.Name, serviceIP, servicePort) + ginkgo.By("Verifying the preserved source ip") + gomega.Expect(sourceIP2).To(gomega.Equal(execPodIP2)) }) - It("should be able to up and down services", func() { + ginkgo.It("should be able to up and down services", func() { // TODO: use the ServiceTestJig here // this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) @@ -320,52 +320,52 @@ var _ = SIGDescribe("Services", func() { ns := f.Namespace.Name numPods, servicePort := 3, defaultServeHostnameServicePort - By("creating service1 in namespace " + ns) + ginkgo.By("creating service1 in namespace " + ns) podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service1"), ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) - By("creating service2 in namespace " + ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) + ginkgo.By("creating service2 in namespace " + ns) podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service2"), ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) hosts, err := e2essh.NodeSSHHosts(cs) - Expect(err).NotTo(HaveOccurred(), "failed to find external/internal IPs for every node") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to find external/internal IPs for every node") if len(hosts) == 0 { framework.Failf("No ssh-able nodes") } host := hosts[0] - By("verifying service1 is up") + ginkgo.By("verifying service1 is up") framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) - By("verifying service2 is up") + ginkgo.By("verifying service2 is up") framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) // Stop service 1 and make sure it is gone. - By("stopping service1") + ginkgo.By("stopping service1") framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service1")) - By("verifying service1 is not up") + ginkgo.By("verifying service1 is not up") framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svc1IP, servicePort)) - By("verifying service2 is still up") + ginkgo.By("verifying service2 is still up") framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) // Start another service and verify both are up. - By("creating service3 in namespace " + ns) + ginkgo.By("creating service3 in namespace " + ns) podNames3, svc3IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service3"), ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc3IP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc3IP, ns) if svc2IP == svc3IP { framework.Failf("service IPs conflict: %v", svc2IP) } - By("verifying service2 is still up") + ginkgo.By("verifying service2 is still up") framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) - By("verifying service3 is up") + ginkgo.By("verifying service3 is up") framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames3, svc3IP, servicePort)) }) - It("should work after restarting kube-proxy [Disruptive]", func() { + ginkgo.It("should work after restarting kube-proxy [Disruptive]", func() { // TODO: use the ServiceTestJig here framework.SkipUnlessProviderIs("gce", "gke") @@ -379,20 +379,20 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc1)) }() podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc1), ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) defer func() { framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, svc2)) }() podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService(svc2), ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) if svc1IP == svc2IP { framework.Failf("VIPs conflict: %v", svc1IP) } hosts, err := e2essh.NodeSSHHosts(cs) - Expect(err).NotTo(HaveOccurred(), "failed to find external/internal IPs for every node") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to find external/internal IPs for every node") if len(hosts) == 0 { framework.Failf("No ssh-able nodes") } @@ -401,14 +401,14 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) - By(fmt.Sprintf("Restarting kube-proxy on %v", host)) + ginkgo.By(fmt.Sprintf("Restarting kube-proxy on %v", host)) if err := framework.RestartKubeProxy(host); err != nil { framework.Failf("error restarting kube-proxy: %v", err) } framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) - By("Removing iptable rules") + ginkgo.By("Removing iptable rules") result, err := e2essh.SSH(` sudo iptables -t nat -F KUBE-SERVICES || true; sudo iptables -t nat -F KUBE-PORTALS-HOST || true; @@ -421,7 +421,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) }) - It("should work after restarting apiserver [Disruptive]", func() { + ginkgo.It("should work after restarting apiserver [Disruptive]", func() { // TODO: use the ServiceTestJig here framework.SkipUnlessProviderIs("gce", "gke") @@ -432,10 +432,10 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service1")) }() podNames1, svc1IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service1"), ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc1IP, ns) hosts, err := e2essh.NodeSSHHosts(cs) - Expect(err).NotTo(HaveOccurred(), "failed to find external/internal IPs for every node") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to find external/internal IPs for every node") if len(hosts) == 0 { framework.Failf("No ssh-able nodes") } @@ -444,11 +444,11 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) // Restart apiserver - By("Restarting apiserver") + ginkgo.By("Restarting apiserver") if err := framework.RestartApiserver(cs); err != nil { framework.Failf("error restarting apiserver: %v", err) } - By("Waiting for apiserver to come up by polling /healthz") + ginkgo.By("Waiting for apiserver to come up by polling /healthz") if err := framework.WaitForApiserverUp(cs); err != nil { framework.Failf("error while waiting for apiserver up: %v", err) } @@ -459,7 +459,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(framework.StopServeHostnameService(f.ClientSet, ns, "service2")) }() podNames2, svc2IP, err := framework.StartServeHostnameService(cs, getServeHostnameService("service2"), ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svc2IP, ns) if svc1IP == svc2IP { framework.Failf("VIPs conflict: %v", svc1IP) @@ -471,27 +471,27 @@ var _ = SIGDescribe("Services", func() { // TODO: Run this test against the userspace proxy and nodes // configured with a default deny firewall to validate that the // proxy whitelists NodePort traffic. - It("should be able to create a functioning NodePort service", func() { + ginkgo.It("should be able to create a functioning NodePort service", func() { serviceName := "nodeport-test" ns := f.Namespace.Name jig := framework.NewServiceTestJig(cs, serviceName) nodeIP := framework.PickNodeIP(jig.Client) // for later - By("creating service " + serviceName + " with type=NodePort in namespace " + ns) + ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns) service := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort }) jig.SanityCheckService(service, v1.ServiceTypeNodePort) nodePort := int(service.Spec.Ports[0].NodePort) - By("creating pod to be part of service " + serviceName) + ginkgo.By("creating pod to be part of service " + serviceName) jig.RunOrFail(ns, nil) - By("hitting the pod through the service's NodePort") + ginkgo.By("hitting the pod through the service's NodePort") jig.TestReachableHTTP(nodeIP, nodePort, framework.KubeProxyLagTimeout) - By("verifying the node port is locked") + ginkgo.By("verifying the node port is locked") hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") // Even if the node-ip:node-port check above passed, this hostexec pod // might fall on a node with a laggy kube-proxy. @@ -503,7 +503,7 @@ var _ = SIGDescribe("Services", func() { }) // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. - It("should be able to change the type and ports of a service [Slow] [DisabledForLargeClusters]", func() { + ginkgo.It("should be able to change the type and ports of a service [Slow] [DisabledForLargeClusters]", func() { // requires cloud load-balancer support framework.SkipUnlessProviderIs("gce", "gke", "aws") @@ -525,9 +525,9 @@ var _ = SIGDescribe("Services", func() { ns1 := f.Namespace.Name // LB1 in ns1 on TCP e2elog.Logf("namespace for TCP test: %s", ns1) - By("creating a second namespace") + ginkgo.By("creating a second namespace") namespacePtr, err := f.CreateNamespace("services", nil) - Expect(err).NotTo(HaveOccurred(), "failed to create namespace") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace") ns2 := namespacePtr.Name // LB2 in ns2 on UDP e2elog.Logf("namespace for UDP test: %s", ns2) @@ -537,30 +537,30 @@ var _ = SIGDescribe("Services", func() { // Test TCP and UDP Services. Services with the same name in different // namespaces should get different node ports and load balancers. - By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1) + ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1) tcpService := jig.CreateTCPServiceOrFail(ns1, nil) jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP) - By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2) + ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2) udpService := jig.CreateUDPServiceOrFail(ns2, nil) jig.SanityCheckService(udpService, v1.ServiceTypeClusterIP) - By("verifying that TCP and UDP use the same port") + ginkgo.By("verifying that TCP and UDP use the same port") if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port { framework.Failf("expected to use the same port for TCP and UDP") } svcPort := int(tcpService.Spec.Ports[0].Port) e2elog.Logf("service port (TCP and UDP): %d", svcPort) - By("creating a pod to be part of the TCP service " + serviceName) + ginkgo.By("creating a pod to be part of the TCP service " + serviceName) jig.RunOrFail(ns1, nil) - By("creating a pod to be part of the UDP service " + serviceName) + ginkgo.By("creating a pod to be part of the UDP service " + serviceName) jig.RunOrFail(ns2, nil) // Change the services to NodePort. - By("changing the TCP service to type=NodePort") + ginkgo.By("changing the TCP service to type=NodePort") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeNodePort }) @@ -568,7 +568,7 @@ var _ = SIGDescribe("Services", func() { tcpNodePort := int(tcpService.Spec.Ports[0].NodePort) e2elog.Logf("TCP node port: %d", tcpNodePort) - By("changing the UDP service to type=NodePort") + ginkgo.By("changing the UDP service to type=NodePort") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeNodePort }) @@ -576,10 +576,10 @@ var _ = SIGDescribe("Services", func() { udpNodePort := int(udpService.Spec.Ports[0].NodePort) e2elog.Logf("UDP node port: %d", udpNodePort) - By("hitting the TCP service's NodePort") + ginkgo.By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - By("hitting the UDP service's NodePort") + ginkgo.By("hitting the UDP service's NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) // Change the services to LoadBalancer. @@ -589,10 +589,10 @@ var _ = SIGDescribe("Services", func() { requestedIP := "" staticIPName := "" if framework.ProviderIs("gce", "gke") { - By("creating a static load balancer IP") + ginkgo.By("creating a static load balancer IP") staticIPName = fmt.Sprintf("e2e-external-lb-test-%s", framework.RunID) gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred(), "failed to get GCE cloud provider") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get GCE cloud provider") err = gceCloud.ReserveRegionAddress(&compute.Address{Name: staticIPName}, gceCloud.Region()) defer func() { @@ -603,22 +603,22 @@ var _ = SIGDescribe("Services", func() { } } }() - Expect(err).NotTo(HaveOccurred(), "failed to create region address: %s", staticIPName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create region address: %s", staticIPName) reservedAddr, err := gceCloud.GetRegionAddress(staticIPName, gceCloud.Region()) - Expect(err).NotTo(HaveOccurred(), "failed to get region address: %s", staticIPName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get region address: %s", staticIPName) requestedIP = reservedAddr.Address e2elog.Logf("Allocated static load balancer IP: %s", requestedIP) } - By("changing the TCP service to type=LoadBalancer") + ginkgo.By("changing the TCP service to type=LoadBalancer") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) { s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable s.Spec.Type = v1.ServiceTypeLoadBalancer }) if loadBalancerSupportsUDP { - By("changing the UDP service to type=LoadBalancer") + ginkgo.By("changing the UDP service to type=LoadBalancer") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeLoadBalancer }) @@ -628,7 +628,7 @@ var _ = SIGDescribe("Services", func() { serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(udpService)) } - By("waiting for the TCP service to have a load balancer") + ginkgo.By("waiting for the TCP service to have a load balancer") // Wait for the load balancer to be created asynchronously tcpService = jig.WaitForLoadBalancerOrFail(ns1, tcpService.Name, loadBalancerCreateTimeout) jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) @@ -646,10 +646,10 @@ var _ = SIGDescribe("Services", func() { // This is mostly out of fear of leaking the IP in a timeout case // (as of this writing we're not 100% sure where the leaks are // coming from, so this is first-aid rather than surgery). - By("demoting the static IP to ephemeral") + ginkgo.By("demoting the static IP to ephemeral") if staticIPName != "" { gceCloud, err := gce.GetGCECloud() - Expect(err).NotTo(HaveOccurred(), "failed to get GCE cloud provider") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get GCE cloud provider") // Deleting it after it is attached "demotes" it to an // ephemeral IP, which can be auto-released. if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil { @@ -661,7 +661,7 @@ var _ = SIGDescribe("Services", func() { var udpIngressIP string if loadBalancerSupportsUDP { - By("waiting for the UDP service to have a load balancer") + ginkgo.By("waiting for the UDP service to have a load balancer") // 2nd one should be faster since they ran in parallel. udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name, loadBalancerCreateTimeout) jig.SanityCheckService(udpService, v1.ServiceTypeLoadBalancer) @@ -671,29 +671,29 @@ var _ = SIGDescribe("Services", func() { udpIngressIP = framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) e2elog.Logf("UDP load balancer: %s", udpIngressIP) - By("verifying that TCP and UDP use different load balancers") + ginkgo.By("verifying that TCP and UDP use different load balancers") if tcpIngressIP == udpIngressIP { framework.Failf("Load balancers are not different: %s", framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } } - By("hitting the TCP service's NodePort") + ginkgo.By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - By("hitting the UDP service's NodePort") + ginkgo.By("hitting the UDP service's NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) - By("hitting the TCP service's LoadBalancer") + ginkgo.By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) if loadBalancerSupportsUDP { - By("hitting the UDP service's LoadBalancer") + ginkgo.By("hitting the UDP service's LoadBalancer") jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) } // Change the services' node ports. - By("changing the TCP service's NodePort") + ginkgo.By("changing the TCP service's NodePort") tcpService = jig.ChangeServiceNodePortOrFail(ns1, tcpService.Name, tcpNodePort) jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) tcpNodePortOld := tcpNodePort @@ -706,7 +706,7 @@ var _ = SIGDescribe("Services", func() { } e2elog.Logf("TCP node port: %d", tcpNodePort) - By("changing the UDP service's NodePort") + ginkgo.By("changing the UDP service's NodePort") udpService = jig.ChangeServiceNodePortOrFail(ns2, udpService.Name, udpNodePort) if loadBalancerSupportsUDP { jig.SanityCheckService(udpService, v1.ServiceTypeLoadBalancer) @@ -723,29 +723,29 @@ var _ = SIGDescribe("Services", func() { } e2elog.Logf("UDP node port: %d", udpNodePort) - By("hitting the TCP service's new NodePort") + ginkgo.By("hitting the TCP service's new NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - By("hitting the UDP service's new NodePort") + ginkgo.By("hitting the UDP service's new NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) - By("checking the old TCP NodePort is closed") + ginkgo.By("checking the old TCP NodePort is closed") jig.TestNotReachableHTTP(nodeIP, tcpNodePortOld, framework.KubeProxyLagTimeout) - By("checking the old UDP NodePort is closed") + ginkgo.By("checking the old UDP NodePort is closed") jig.TestNotReachableUDP(nodeIP, udpNodePortOld, framework.KubeProxyLagTimeout) - By("hitting the TCP service's LoadBalancer") + ginkgo.By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) if loadBalancerSupportsUDP { - By("hitting the UDP service's LoadBalancer") + ginkgo.By("hitting the UDP service's LoadBalancer") jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) } // Change the services' main ports. - By("changing the TCP service's port") + ginkgo.By("changing the TCP service's port") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) { s.Spec.Ports[0].Port++ }) @@ -762,7 +762,7 @@ var _ = SIGDescribe("Services", func() { framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } - By("changing the UDP service's port") + ginkgo.By("changing the UDP service's port") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) { s.Spec.Ports[0].Port++ }) @@ -783,59 +783,59 @@ var _ = SIGDescribe("Services", func() { e2elog.Logf("service port (TCP and UDP): %d", svcPort) - By("hitting the TCP service's NodePort") + ginkgo.By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - By("hitting the UDP service's NodePort") + ginkgo.By("hitting the UDP service's NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) - By("hitting the TCP service's LoadBalancer") + ginkgo.By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) if loadBalancerSupportsUDP { - By("hitting the UDP service's LoadBalancer") + ginkgo.By("hitting the UDP service's LoadBalancer") jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) } - By("Scaling the pods to 0") + ginkgo.By("Scaling the pods to 0") jig.Scale(ns1, 0) jig.Scale(ns2, 0) - By("looking for ICMP REJECT on the TCP service's NodePort") + ginkgo.By("looking for ICMP REJECT on the TCP service's NodePort") jig.TestRejectedHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - By("looking for ICMP REJECT on the UDP service's NodePort") + ginkgo.By("looking for ICMP REJECT on the UDP service's NodePort") jig.TestRejectedUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) - By("looking for ICMP REJECT on the TCP service's LoadBalancer") + ginkgo.By("looking for ICMP REJECT on the TCP service's LoadBalancer") jig.TestRejectedHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) if loadBalancerSupportsUDP { - By("looking for ICMP REJECT on the UDP service's LoadBalancer") + ginkgo.By("looking for ICMP REJECT on the UDP service's LoadBalancer") jig.TestRejectedUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) } - By("Scaling the pods to 1") + ginkgo.By("Scaling the pods to 1") jig.Scale(ns1, 1) jig.Scale(ns2, 1) - By("hitting the TCP service's NodePort") + ginkgo.By("hitting the TCP service's NodePort") jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - By("hitting the UDP service's NodePort") + ginkgo.By("hitting the UDP service's NodePort") jig.TestReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) - By("hitting the TCP service's LoadBalancer") + ginkgo.By("hitting the TCP service's LoadBalancer") jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerCreateTimeout) if loadBalancerSupportsUDP { - By("hitting the UDP service's LoadBalancer") + ginkgo.By("hitting the UDP service's LoadBalancer") jig.TestReachableUDP(udpIngressIP, svcPort, loadBalancerCreateTimeout) } // Change the services back to ClusterIP. - By("changing TCP service back to type=ClusterIP") + ginkgo.By("changing TCP service back to type=ClusterIP") tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.Ports[0].NodePort = 0 @@ -844,7 +844,7 @@ var _ = SIGDescribe("Services", func() { tcpService = jig.WaitForLoadBalancerDestroyOrFail(ns1, tcpService.Name, tcpIngressIP, svcPort, loadBalancerCreateTimeout) jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP) - By("changing UDP service back to type=ClusterIP") + ginkgo.By("changing UDP service back to type=ClusterIP") udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.Ports[0].NodePort = 0 @@ -855,32 +855,32 @@ var _ = SIGDescribe("Services", func() { jig.SanityCheckService(udpService, v1.ServiceTypeClusterIP) } - By("checking the TCP NodePort is closed") + ginkgo.By("checking the TCP NodePort is closed") jig.TestNotReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout) - By("checking the UDP NodePort is closed") + ginkgo.By("checking the UDP NodePort is closed") jig.TestNotReachableUDP(nodeIP, udpNodePort, framework.KubeProxyLagTimeout) - By("checking the TCP LoadBalancer is closed") + ginkgo.By("checking the TCP LoadBalancer is closed") jig.TestNotReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeout) if loadBalancerSupportsUDP { - By("checking the UDP LoadBalancer is closed") + ginkgo.By("checking the UDP LoadBalancer is closed") jig.TestNotReachableUDP(udpIngressIP, svcPort, loadBalancerLagTimeout) } }) - It("should be able to update NodePorts with two same port numbers but different protocols", func() { + ginkgo.It("should be able to update NodePorts with two same port numbers but different protocols", func() { serviceName := "nodeport-update-service" ns := f.Namespace.Name jig := framework.NewServiceTestJig(cs, serviceName) - By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) + ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns) tcpService := jig.CreateTCPServiceOrFail(ns, nil) defer func() { e2elog.Logf("Cleaning up the updating NodePorts test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) }() jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP) svcPort := int(tcpService.Spec.Ports[0].Port) @@ -888,7 +888,7 @@ var _ = SIGDescribe("Services", func() { // Change the services to NodePort and add a UDP port. - By("changing the TCP service to type=NodePort and add a UDP port") + ginkgo.By("changing the TCP service to type=NodePort and add a UDP port") newService := jig.UpdateServiceOrFail(ns, tcpService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeNodePort s.Spec.Ports = []v1.ServicePort{ @@ -917,20 +917,20 @@ var _ = SIGDescribe("Services", func() { } }) - It("should be able to change the type from ExternalName to ClusterIP", func() { + ginkgo.It("should be able to change the type from ExternalName to ClusterIP", func() { serviceName := "externalname-service" ns := f.Namespace.Name jig := framework.NewServiceTestJig(cs, serviceName) - By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns) + ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns) externalNameService := jig.CreateExternalNameServiceOrFail(ns, nil) defer func() { e2elog.Logf("Cleaning up the ExternalName to ClusterIP test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) }() jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName) - By("changing the ExternalName service to type=ClusterIP") + ginkgo.By("changing the ExternalName service to type=ClusterIP") clusterIPService := jig.UpdateServiceOrFail(ns, externalNameService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.ExternalName = "" @@ -941,20 +941,20 @@ var _ = SIGDescribe("Services", func() { jig.SanityCheckService(clusterIPService, v1.ServiceTypeClusterIP) }) - It("should be able to change the type from ExternalName to NodePort", func() { + ginkgo.It("should be able to change the type from ExternalName to NodePort", func() { serviceName := "externalname-service" ns := f.Namespace.Name jig := framework.NewServiceTestJig(cs, serviceName) - By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns) + ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns) externalNameService := jig.CreateExternalNameServiceOrFail(ns, nil) defer func() { e2elog.Logf("Cleaning up the ExternalName to NodePort test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) }() jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName) - By("changing the ExternalName service to type=NodePort") + ginkgo.By("changing the ExternalName service to type=NodePort") nodePortService := jig.UpdateServiceOrFail(ns, externalNameService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeNodePort s.Spec.ExternalName = "" @@ -965,20 +965,20 @@ var _ = SIGDescribe("Services", func() { jig.SanityCheckService(nodePortService, v1.ServiceTypeNodePort) }) - It("should be able to change the type from ClusterIP to ExternalName", func() { + ginkgo.It("should be able to change the type from ClusterIP to ExternalName", func() { serviceName := "clusterip-service" ns := f.Namespace.Name jig := framework.NewServiceTestJig(cs, serviceName) - By("creating a service " + serviceName + " with the type=ClusterIP in namespace " + ns) + ginkgo.By("creating a service " + serviceName + " with the type=ClusterIP in namespace " + ns) clusterIPService := jig.CreateTCPServiceOrFail(ns, nil) defer func() { e2elog.Logf("Cleaning up the ClusterIP to ExternalName test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) }() jig.SanityCheckService(clusterIPService, v1.ServiceTypeClusterIP) - By("changing the ClusterIP service to type=ExternalName") + ginkgo.By("changing the ClusterIP service to type=ExternalName") externalNameService := jig.UpdateServiceOrFail(ns, clusterIPService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeExternalName s.Spec.ExternalName = "foo.example.com" @@ -987,22 +987,22 @@ var _ = SIGDescribe("Services", func() { jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName) }) - It("should be able to change the type from NodePort to ExternalName", func() { + ginkgo.It("should be able to change the type from NodePort to ExternalName", func() { serviceName := "nodeport-service" ns := f.Namespace.Name jig := framework.NewServiceTestJig(cs, serviceName) - By("creating a service " + serviceName + " with the type=NodePort in namespace " + ns) + ginkgo.By("creating a service " + serviceName + " with the type=NodePort in namespace " + ns) nodePortService := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort }) defer func() { e2elog.Logf("Cleaning up the NodePort to ExternalName test service") err := cs.CoreV1().Services(ns).Delete(serviceName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns) }() jig.SanityCheckService(nodePortService, v1.ServiceTypeNodePort) - By("changing the NodePort service to type=ExternalName") + ginkgo.By("changing the NodePort service to type=ExternalName") externalNameService := jig.UpdateServiceOrFail(ns, nodePortService.Name, func(s *v1.Service) { s.Spec.Type = v1.ServiceTypeExternalName s.Spec.ExternalName = "foo.example.com" @@ -1012,20 +1012,20 @@ var _ = SIGDescribe("Services", func() { jig.SanityCheckService(externalNameService, v1.ServiceTypeExternalName) }) - It("should use same NodePort with same port but different protocols", func() { + ginkgo.It("should use same NodePort with same port but different protocols", func() { serviceName := "nodeports" ns := f.Namespace.Name t := framework.NewServerTest(cs, ns, serviceName) defer func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { framework.Failf("errors in cleanup: %v", errs) } }() - By("creating service " + serviceName + " with same NodePort but different protocols in namespace " + ns) + ginkgo.By("creating service " + serviceName + " with same NodePort but different protocols in namespace " + ns) service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: t.ServiceName, @@ -1049,7 +1049,7 @@ var _ = SIGDescribe("Services", func() { }, } result, err := t.CreateService(service) - Expect(err).NotTo(HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) if len(result.Spec.Ports) != 2 { framework.Failf("got unexpected len(Spec.Ports) for new service: %v", result) @@ -1059,7 +1059,7 @@ var _ = SIGDescribe("Services", func() { } }) - It("should prevent NodePort collisions", func() { + ginkgo.It("should prevent NodePort collisions", func() { // TODO: use the ServiceTestJig here baseName := "nodeport-collision-" serviceName1 := baseName + "1" @@ -1068,18 +1068,18 @@ var _ = SIGDescribe("Services", func() { t := framework.NewServerTest(cs, ns, serviceName1) defer func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { framework.Failf("errors in cleanup: %v", errs) } }() - By("creating service " + serviceName1 + " with type NodePort in namespace " + ns) + ginkgo.By("creating service " + serviceName1 + " with type NodePort in namespace " + ns) service := t.BuildServiceSpec() service.Spec.Type = v1.ServiceTypeNodePort result, err := t.CreateService(service) - Expect(err).NotTo(HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName1, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName1, ns) if result.Spec.Type != v1.ServiceTypeNodePort { framework.Failf("got unexpected Spec.Type for new service: %v", result) @@ -1092,7 +1092,7 @@ var _ = SIGDescribe("Services", func() { framework.Failf("got unexpected Spec.Ports[0].NodePort for new service: %v", result) } - By("creating service " + serviceName2 + " with conflicting NodePort") + ginkgo.By("creating service " + serviceName2 + " with conflicting NodePort") service2 := t.BuildServiceSpec() service2.Name = serviceName2 service2.Spec.Type = v1.ServiceTypeNodePort @@ -1102,25 +1102,25 @@ var _ = SIGDescribe("Services", func() { framework.Failf("Created service with conflicting NodePort: %v", result2) } expectedErr := fmt.Sprintf("%d.*port is already allocated", port.NodePort) - Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) + gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr)) - By("deleting service " + serviceName1 + " to release NodePort") + ginkgo.By("deleting service " + serviceName1 + " to release NodePort") err = t.DeleteService(serviceName1) - Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName1, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName1, ns) - By("creating service " + serviceName2 + " with no-longer-conflicting NodePort") + ginkgo.By("creating service " + serviceName2 + " with no-longer-conflicting NodePort") _, err = t.CreateService(service2) - Expect(err).NotTo(HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName1, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName1, ns) }) - It("should check NodePort out-of-range", func() { + ginkgo.It("should check NodePort out-of-range", func() { // TODO: use the ServiceTestJig here serviceName := "nodeport-range-test" ns := f.Namespace.Name t := framework.NewServerTest(cs, ns, serviceName) defer func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { framework.Failf("errors in cleanup: %v", errs) @@ -1130,9 +1130,9 @@ var _ = SIGDescribe("Services", func() { service := t.BuildServiceSpec() service.Spec.Type = v1.ServiceTypeNodePort - By("creating service " + serviceName + " with type NodePort in namespace " + ns) + ginkgo.By("creating service " + serviceName + " with type NodePort in namespace " + ns) service, err := t.CreateService(service) - Expect(err).NotTo(HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) if service.Spec.Type != v1.ServiceTypeNodePort { framework.Failf("got unexpected Spec.Type for new service: %v", service) @@ -1156,7 +1156,7 @@ var _ = SIGDescribe("Services", func() { break } } - By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort)) + ginkgo.By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort)) result, err := framework.UpdateService(cs, ns, serviceName, func(s *v1.Service) { s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) }) @@ -1164,13 +1164,13 @@ var _ = SIGDescribe("Services", func() { framework.Failf("failed to prevent update of service with out-of-range NodePort: %v", result) } expectedErr := fmt.Sprintf("%d.*port is not in the valid range", outOfRangeNodePort) - Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) + gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr)) - By("deleting original service " + serviceName) + ginkgo.By("deleting original service " + serviceName) err = t.DeleteService(serviceName) - Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) - By(fmt.Sprintf("creating service "+serviceName+" with out-of-range NodePort %d", outOfRangeNodePort)) + ginkgo.By(fmt.Sprintf("creating service "+serviceName+" with out-of-range NodePort %d", outOfRangeNodePort)) service = t.BuildServiceSpec() service.Spec.Type = v1.ServiceTypeNodePort service.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) @@ -1178,17 +1178,17 @@ var _ = SIGDescribe("Services", func() { if err == nil { framework.Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service) } - Expect(fmt.Sprintf("%v", err)).To(MatchRegexp(expectedErr)) + gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr)) }) - It("should release NodePorts on delete", func() { + ginkgo.It("should release NodePorts on delete", func() { // TODO: use the ServiceTestJig here serviceName := "nodeport-reuse" ns := f.Namespace.Name t := framework.NewServerTest(cs, ns, serviceName) defer func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { framework.Failf("errors in cleanup: %v", errs) @@ -1198,9 +1198,9 @@ var _ = SIGDescribe("Services", func() { service := t.BuildServiceSpec() service.Spec.Type = v1.ServiceTypeNodePort - By("creating service " + serviceName + " with type NodePort in namespace " + ns) + ginkgo.By("creating service " + serviceName + " with type NodePort in namespace " + ns) service, err := t.CreateService(service) - Expect(err).NotTo(HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) if service.Spec.Type != v1.ServiceTypeNodePort { framework.Failf("got unexpected Spec.Type for new service: %v", service) @@ -1217,9 +1217,9 @@ var _ = SIGDescribe("Services", func() { } nodePort := port.NodePort - By("deleting original service " + serviceName) + ginkgo.By("deleting original service " + serviceName) err = t.DeleteService(serviceName) - Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns) hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "hostexec") cmd := fmt.Sprintf(`! ss -ant46 'sport = :%d' | tail -n +2 | grep LISTEN`, nodePort) @@ -1236,21 +1236,21 @@ var _ = SIGDescribe("Services", func() { framework.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, framework.KubeProxyLagTimeout, stdout) } - By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort)) + ginkgo.By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort)) service = t.BuildServiceSpec() service.Spec.Type = v1.ServiceTypeNodePort service.Spec.Ports[0].NodePort = nodePort service, err = t.CreateService(service) - Expect(err).NotTo(HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s in namespace: %s", serviceName, ns) }) - It("should create endpoints for unready pods", func() { + ginkgo.It("should create endpoints for unready pods", func() { serviceName := "tolerate-unready" ns := f.Namespace.Name t := framework.NewServerTest(cs, ns, serviceName) defer func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { framework.Failf("errors in cleanup: %v", errs) @@ -1299,19 +1299,19 @@ var _ = SIGDescribe("Services", func() { }, nil) rcSpec.Spec.Template.Spec.TerminationGracePeriodSeconds = &terminateSeconds - By(fmt.Sprintf("creating RC %v with selectors %v", rcSpec.Name, rcSpec.Spec.Selector)) + ginkgo.By(fmt.Sprintf("creating RC %v with selectors %v", rcSpec.Name, rcSpec.Spec.Selector)) _, err := t.CreateRC(rcSpec) framework.ExpectNoError(err) - By(fmt.Sprintf("creating Service %v with selectors %v", service.Name, service.Spec.Selector)) + ginkgo.By(fmt.Sprintf("creating Service %v with selectors %v", service.Name, service.Spec.Selector)) _, err = t.CreateService(service) framework.ExpectNoError(err) - By("Verifying pods for RC " + t.Name) + ginkgo.By("Verifying pods for RC " + t.Name) framework.ExpectNoError(framework.VerifyPods(t.Client, t.Namespace, t.Name, false, 1)) svcName := fmt.Sprintf("%v.%v.svc.%v", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) - By("Waiting for endpoints of Service with DNS name " + svcName) + ginkgo.By("Waiting for endpoints of Service with DNS name " + svcName) execPodName := framework.CreateExecPodOrFail(f.ClientSet, f.Namespace.Name, "execpod-", nil) cmd := fmt.Sprintf("wget -qO- http://%s:%d/", svcName, port) @@ -1328,16 +1328,16 @@ var _ = SIGDescribe("Services", func() { framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) } - By("Scaling down replication controller to zero") + ginkgo.By("Scaling down replication controller to zero") framework.ScaleRC(f.ClientSet, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false) - By("Update service to not tolerate unready services") + ginkgo.By("Update service to not tolerate unready services") _, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { s.ObjectMeta.Annotations[endpoint.TolerateUnreadyEndpointsAnnotation] = "false" }) framework.ExpectNoError(err) - By("Check if pod is unreachable") + ginkgo.By("Check if pod is unreachable") cmd = fmt.Sprintf("wget -qO- -T 2 http://%s:%d/; test \"$?\" -eq \"1\"", svcName, port) if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { var err error @@ -1351,13 +1351,13 @@ var _ = SIGDescribe("Services", func() { framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) } - By("Update service to tolerate unready services again") + ginkgo.By("Update service to tolerate unready services again") _, err = framework.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { s.ObjectMeta.Annotations[endpoint.TolerateUnreadyEndpointsAnnotation] = "true" }) framework.ExpectNoError(err) - By("Check if terminating pod is available through service") + ginkgo.By("Check if terminating pod is available through service") cmd = fmt.Sprintf("wget -qO- http://%s:%d/", svcName, port) if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { var err error @@ -1371,7 +1371,7 @@ var _ = SIGDescribe("Services", func() { framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) } - By("Remove pods immediately") + ginkgo.By("Remove pods immediately") label := labels.SelectorFromSet(labels.Set(t.Labels)) options := metav1.ListOptions{LabelSelector: label.String()} podClient := t.Client.CoreV1().Pods(f.Namespace.Name) @@ -1389,7 +1389,7 @@ var _ = SIGDescribe("Services", func() { } }) - It("should only allow access from service loadbalancer source ranges [Slow]", func() { + ginkgo.It("should only allow access from service loadbalancer source ranges [Slow]", func() { // this feature currently supported only on GCE/GKE/AWS framework.SkipUnlessProviderIs("gce", "gke", "aws") @@ -1406,18 +1406,18 @@ var _ = SIGDescribe("Services", func() { serviceName := "lb-sourcerange" jig := framework.NewServiceTestJig(cs, serviceName) - By("Prepare allow source ips") + ginkgo.By("Prepare allow source ips") // prepare the exec pods // acceptPod are allowed to access the loadbalancer acceptPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-accept", nil) dropPodName := framework.CreateExecPodOrFail(cs, namespace, "execpod-drop", nil) acceptPod, err := cs.CoreV1().Pods(namespace).Get(acceptPodName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to fetch pod: %s in namespace: %s", acceptPodName, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch pod: %s in namespace: %s", acceptPodName, namespace) dropPod, err := cs.CoreV1().Pods(namespace).Get(dropPodName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to fetch pod: %s in namespace: %s", dropPodName, namespace) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch pod: %s in namespace: %s", dropPodName, namespace) - By("creating a pod to be part of the service " + serviceName) + ginkgo.By("creating a pod to be part of the service " + serviceName) // This container is an nginx container listening on port 80 // See kubernetes/contrib/ingress/echoheaders/nginx.conf for content of response jig.RunOrFail(namespace, nil) @@ -1433,7 +1433,7 @@ var _ = SIGDescribe("Services", func() { svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.LoadBalancerSourceRanges = nil }) - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) }() svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, loadBalancerCreateTimeout) @@ -1442,14 +1442,14 @@ var _ = SIGDescribe("Services", func() { // timeout when we haven't just created the load balancer normalReachabilityTimeout := 2 * time.Minute - By("check reachability from different sources") + ginkgo.By("check reachability from different sources") svcIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) // Wait longer as this is our first request after creation. We can't check using a separate method, // because the LB should only be reachable from the "accept" pod framework.CheckReachabilityFromPod(true, loadBalancerLagTimeout, namespace, acceptPodName, svcIP) framework.CheckReachabilityFromPod(false, normalReachabilityTimeout, namespace, dropPodName, svcIP) - By("Update service LoadBalancerSourceRange and check reachability") + ginkgo.By("Update service LoadBalancerSourceRange and check reachability") jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { // only allow access from dropPod svc.Spec.LoadBalancerSourceRanges = []string{dropPod.Status.PodIP + "/32"} @@ -1457,7 +1457,7 @@ var _ = SIGDescribe("Services", func() { framework.CheckReachabilityFromPod(false, normalReachabilityTimeout, namespace, acceptPodName, svcIP) framework.CheckReachabilityFromPod(true, normalReachabilityTimeout, namespace, dropPodName, svcIP) - By("Delete LoadBalancerSourceRange field and check reachability") + ginkgo.By("Delete LoadBalancerSourceRange field and check reachability") jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.LoadBalancerSourceRanges = nil }) @@ -1466,7 +1466,7 @@ var _ = SIGDescribe("Services", func() { }) // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. - It("should be able to create an internal type load balancer [Slow] [DisabledForLargeClusters]", func() { + ginkgo.It("should be able to create an internal type load balancer [Slow] [DisabledForLargeClusters]", func() { framework.SkipUnlessProviderIs("azure", "gke", "gce") createTimeout := framework.LoadBalancerCreateTimeoutDefault @@ -1480,7 +1480,7 @@ var _ = SIGDescribe("Services", func() { serviceName := "lb-internal" jig := framework.NewServiceTestJig(cs, serviceName) - By("creating pod to be part of service " + serviceName) + ginkgo.By("creating pod to be part of service " + serviceName) jig.RunOrFail(namespace, nil) enableILB, disableILB := framework.EnableAndDisableInternalLB() @@ -1491,7 +1491,7 @@ var _ = SIGDescribe("Services", func() { return strings.HasPrefix(ingressEndpoint, "10.") } - By("creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled") + ginkgo.By("creating a service with type LoadBalancer and cloud specific Internal-LB annotation enabled") svc := jig.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer enableILB(svc) @@ -1501,11 +1501,11 @@ var _ = SIGDescribe("Services", func() { lbIngress := &svc.Status.LoadBalancer.Ingress[0] svcPort := int(svc.Spec.Ports[0].Port) // should have an internal IP. - Expect(isInternalEndpoint(lbIngress)).To(BeTrue()) + gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeTrue()) // ILBs are not accessible from the test orchestrator, so it's necessary to use // a pod to test the service. - By("hitting the internal load balancer from pod") + ginkgo.By("hitting the internal load balancer from pod") e2elog.Logf("creating pod with host network") hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec") @@ -1527,10 +1527,10 @@ var _ = SIGDescribe("Services", func() { e2elog.Logf("Successful curl; stdout: %v", stdout) return true, nil }); pollErr != nil { - framework.Failf("Failed to hit ILB IP, err: %v", pollErr) + framework.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr) } - By("switching to external type LoadBalancer") + ginkgo.By("switching to external type LoadBalancer") svc = jig.UpdateServiceOrFail(namespace, serviceName, func(svc *v1.Service) { disableILB(svc) }) @@ -1547,9 +1547,9 @@ var _ = SIGDescribe("Services", func() { } // should have an external IP. jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) - Expect(isInternalEndpoint(lbIngress)).To(BeFalse()) + gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeFalse()) - By("hitting the external load balancer") + ginkgo.By("hitting the external load balancer") e2elog.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName) tcpIngressIP = framework.GetIngressPoint(lbIngress) jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault) @@ -1557,7 +1557,7 @@ var _ = SIGDescribe("Services", func() { // GCE cannot test a specific IP because the test may not own it. This cloud specific condition // will be removed when GCP supports similar functionality. if framework.ProviderIs("azure") { - By("switching back to interal type LoadBalancer, with static IP specified.") + ginkgo.By("switching back to interal type LoadBalancer, with static IP specified.") internalStaticIP := "10.240.11.11" svc = jig.UpdateServiceOrFail(namespace, serviceName, func(svc *v1.Service) { svc.Spec.LoadBalancerIP = internalStaticIP @@ -1576,17 +1576,17 @@ var _ = SIGDescribe("Services", func() { } // should have the given static internal IP. jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) - Expect(framework.GetIngressPoint(lbIngress)).To(Equal(internalStaticIP)) + gomega.Expect(framework.GetIngressPoint(lbIngress)).To(gomega.Equal(internalStaticIP)) } - By("switching to ClusterIP type to destroy loadbalancer") + ginkgo.By("switching to ClusterIP type to destroy loadbalancer") jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, createTimeout) }) // This test creates a load balancer, make sure its health check interval // equals to gceHcCheckIntervalSeconds. Then the interval is manipulated // to be something else, see if the interval will be reconciled. - It("should reconcile LB health check interval [Slow][Serial]", func() { + ginkgo.It("should reconcile LB health check interval [Slow][Serial]", func() { const gceHcCheckIntervalSeconds = int64(8) // This test is for clusters on GCE. // (It restarts kube-controller-manager, which we don't support on GKE) @@ -1604,7 +1604,7 @@ var _ = SIGDescribe("Services", func() { serviceName := "lb-hc-int" jig := framework.NewServiceTestJig(cs, serviceName) - By("create load balancer service") + ginkgo.By("create load balancer service") // Create loadbalancer service with source range from node[0] and podAccept svc := jig.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeLoadBalancer @@ -1615,7 +1615,7 @@ var _ = SIGDescribe("Services", func() { jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.Type = v1.ServiceTypeNodePort }) - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) }() svc = jig.WaitForLoadBalancerOrFail(namespace, serviceName, framework.LoadBalancerCreateTimeoutDefault) @@ -1625,15 +1625,15 @@ var _ = SIGDescribe("Services", func() { if err != nil { framework.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err) } - Expect(hc.CheckIntervalSec).To(Equal(gceHcCheckIntervalSeconds)) + gomega.Expect(hc.CheckIntervalSec).To(gomega.Equal(gceHcCheckIntervalSeconds)) - By("modify the health check interval") + ginkgo.By("modify the health check interval") hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1 if err = gceCloud.UpdateHTTPHealthCheck(hc); err != nil { framework.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err) } - By("restart kube-controller-manager") + ginkgo.By("restart kube-controller-manager") if err := framework.RestartControllerManager(); err != nil { framework.Failf("framework.RestartControllerManager() = %v; want nil", err) } @@ -1641,12 +1641,12 @@ var _ = SIGDescribe("Services", func() { framework.Failf("framework.WaitForControllerManagerUp() = %v; want nil", err) } - By("health check should be reconciled") + ginkgo.By("health check should be reconciled") pollInterval := framework.Poll * 10 if pollErr := wait.PollImmediate(pollInterval, framework.LoadBalancerCreateTimeoutDefault, func() (bool, error) { hc, err := gceCloud.GetHTTPHealthCheck(hcName) if err != nil { - e2elog.Logf("Failed to get HttpHealthCheck(%q): %v", hcName, err) + e2elog.Logf("ginkgo.Failed to get HttpHealthCheck(%q): %v", hcName, err) return false, err } e2elog.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec) @@ -1656,32 +1656,32 @@ var _ = SIGDescribe("Services", func() { } }) - It("should have session affinity work for service with type clusterIP", func() { + ginkgo.It("should have session affinity work for service with type clusterIP", func() { svc := getServeHostnameService("service") svc.Spec.Type = v1.ServiceTypeClusterIP execAffinityTestForNonLBService(f, cs, svc) }) - It("should be able to switch session affinity for service with type clusterIP", func() { + ginkgo.It("should be able to switch session affinity for service with type clusterIP", func() { svc := getServeHostnameService("service") svc.Spec.Type = v1.ServiceTypeClusterIP execAffinityTestForNonLBServiceWithTransition(f, cs, svc) }) - It("should have session affinity work for NodePort service", func() { + ginkgo.It("should have session affinity work for NodePort service", func() { svc := getServeHostnameService("service") svc.Spec.Type = v1.ServiceTypeNodePort execAffinityTestForNonLBService(f, cs, svc) }) - It("should be able to switch session affinity for NodePort service", func() { + ginkgo.It("should be able to switch session affinity for NodePort service", func() { svc := getServeHostnameService("service") svc.Spec.Type = v1.ServiceTypeNodePort execAffinityTestForNonLBServiceWithTransition(f, cs, svc) }) // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. - It("should have session affinity work for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() { + ginkgo.It("should have session affinity work for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. framework.SkipIfProviderIs("aws") @@ -1692,7 +1692,7 @@ var _ = SIGDescribe("Services", func() { }) // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. - It("should be able to switch session affinity for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() { + ginkgo.It("should be able to switch session affinity for LoadBalancer service with ESIPP on [Slow] [DisabledForLargeClusters]", func() { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. framework.SkipIfProviderIs("aws") @@ -1703,7 +1703,7 @@ var _ = SIGDescribe("Services", func() { }) // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. - It("should have session affinity work for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() { + ginkgo.It("should have session affinity work for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. framework.SkipIfProviderIs("aws") @@ -1714,7 +1714,7 @@ var _ = SIGDescribe("Services", func() { }) // TODO: Get rid of [DisabledForLargeClusters] tag when issue #56138 is fixed. - It("should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() { + ginkgo.It("should be able to switch session affinity for LoadBalancer service with ESIPP off [Slow] [DisabledForLargeClusters]", func() { // L4 load balancer affinity `ClientIP` is not supported on AWS ELB. framework.SkipIfProviderIs("aws") @@ -1724,7 +1724,7 @@ var _ = SIGDescribe("Services", func() { execAffinityTestForLBServiceWithTransition(f, cs, svc) }) - It("should implement service.kubernetes.io/service-proxy-name", func() { + ginkgo.It("should implement service.kubernetes.io/service-proxy-name", func() { // this test uses e2essh.NodeSSHHosts that does not work if a Node only reports LegacyHostIP framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...) // this test does not work if the Node does not support SSH Key @@ -1739,53 +1739,53 @@ var _ = SIGDescribe("Services", func() { // test again late to make sure it never becomes available. // svcToggled: Created without the label then the label is toggled verifying reachability at each step. - By("creating service-disabled in namespace " + ns) + ginkgo.By("creating service-disabled in namespace " + ns) svcDisabled := getServeHostnameService("service-disabled") svcDisabled.ObjectMeta.Labels = serviceProxyNameLabels _, svcDisabledIP, err := framework.StartServeHostnameService(cs, svcDisabled, ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcDisabledIP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcDisabledIP, ns) - By("creating service in namespace " + ns) + ginkgo.By("creating service in namespace " + ns) svcToggled := getServeHostnameService("service") podToggledNames, svcToggledIP, err := framework.StartServeHostnameService(cs, svcToggled, ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcToggledIP, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service: %s in the namespace: %s", svcToggledIP, ns) jig := framework.NewServiceTestJig(cs, svcToggled.ObjectMeta.Name) hosts, err := e2essh.NodeSSHHosts(cs) - Expect(err).NotTo(HaveOccurred(), "failed to find external/internal IPs for every node") + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to find external/internal IPs for every node") if len(hosts) == 0 { framework.Failf("No ssh-able nodes") } host := hosts[0] - By("verifying service is up") + ginkgo.By("verifying service is up") framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podToggledNames, svcToggledIP, servicePort)) - By("verifying service-disabled is not up") + ginkgo.By("verifying service-disabled is not up") framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort)) - By("adding service-proxy-name label") + ginkgo.By("adding service-proxy-name label") jig.UpdateServiceOrFail(ns, svcToggled.ObjectMeta.Name, func(svc *v1.Service) { svc.ObjectMeta.Labels = serviceProxyNameLabels }) - By("verifying service is not up") + ginkgo.By("verifying service is not up") framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svcToggledIP, servicePort)) - By("removing service-proxy-name annotation") + ginkgo.By("removing service-proxy-name annotation") jig.UpdateServiceOrFail(ns, svcToggled.ObjectMeta.Name, func(svc *v1.Service) { svc.ObjectMeta.Labels = nil }) - By("verifying service is up") + ginkgo.By("verifying service is up") framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podToggledNames, svcToggledIP, servicePort)) - By("verifying service-disabled is still not up") + ginkgo.By("verifying service-disabled is still not up") framework.ExpectNoError(framework.VerifyServeHostnameServiceDown(cs, host, svcDisabledIP, servicePort)) }) - It("should be rejected when no endpoints exist", func() { + ginkgo.It("should be rejected when no endpoints exist", func() { namespace := f.Namespace.Name serviceName := "no-pods" jig := framework.NewServiceTestJig(cs, serviceName) @@ -1799,16 +1799,16 @@ var _ = SIGDescribe("Services", func() { TargetPort: intstr.FromInt(80), }} - By("creating a service with no endpoints") + ginkgo.By("creating a service with no endpoints") _, err := jig.CreateServiceWithServicePort(labels, namespace, ports) if err != nil { - framework.Failf("Failed to create service: %v", err) + framework.Failf("ginkgo.Failed to create service: %v", err) } nodeName := nodes.Items[0].Name podName := "execpod-noendpoints" - By(fmt.Sprintf("creating %v on node %v", podName, nodeName)) + ginkgo.By(fmt.Sprintf("creating %v on node %v", podName, nodeName)) execPodName := framework.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) { pod.Spec.NodeName = nodeName }) @@ -1819,7 +1819,7 @@ var _ = SIGDescribe("Services", func() { e2elog.Logf("waiting up to %v wget %v", framework.KubeProxyEndpointLagTimeout, serviceAddress) cmd := fmt.Sprintf(`wget -T 3 -qO- %v`, serviceAddress) - By(fmt.Sprintf("hitting service %v from pod %v on node %v", serviceAddress, podName, nodeName)) + ginkgo.By(fmt.Sprintf("hitting service %v from pod %v on node %v", serviceAddress, podName, nodeName)) expectedErr := "connection refused" if pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyEndpointLagTimeout, func() (bool, error) { _, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) @@ -1828,15 +1828,13 @@ var _ = SIGDescribe("Services", func() { if strings.Contains(strings.ToLower(err.Error()), expectedErr) { e2elog.Logf("error contained '%s', as expected: %s", expectedErr, err.Error()) return true, nil - } else { - e2elog.Logf("error didn't contain '%s', keep trying: %s", expectedErr, err.Error()) - return false, nil } - } else { - return true, errors.New("expected wget call to fail") + e2elog.Logf("error didn't contain '%s', keep trying: %s", expectedErr, err.Error()) + return false, nil } + return true, errors.New("expected wget call to fail") }); pollErr != nil { - Expect(pollErr).NotTo(HaveOccurred()) + gomega.Expect(pollErr).NotTo(gomega.HaveOccurred()) } }) @@ -1850,7 +1848,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { var cs clientset.Interface serviceLBNames := []string{} - BeforeEach(func() { + ginkgo.BeforeEach(func() { // requires cloud load-balancer support - this feature currently supported only on GCE/GKE framework.SkipUnlessProviderIs("gce", "gke") @@ -1860,8 +1858,8 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { } }) - AfterEach(func() { - if CurrentGinkgoTestDescription().Failed { + ginkgo.AfterEach(func() { + if ginkgo.CurrentGinkgoTestDescription().Failed { framework.DescribeSvc(f.Namespace.Name) } for _, lb := range serviceLBNames { @@ -1872,7 +1870,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { serviceLBNames = []string{} }) - It("should work for type=LoadBalancer", func() { + ginkgo.It("should work for type=LoadBalancer", func() { namespace := f.Namespace.Name serviceName := "external-local" jig := framework.NewServiceTestJig(cs, serviceName) @@ -1889,33 +1887,33 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { // Make sure we didn't leak the health check node port. threshold := 2 for _, ips := range jig.GetEndpointNodes(svc) { - Expect(jig.TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", framework.KubeProxyEndpointLagTimeout, false, threshold)).NotTo(HaveOccurred()) + gomega.Expect(jig.TestHTTPHealthCheckNodePort(ips[0], healthCheckNodePort, "/healthz", framework.KubeProxyEndpointLagTimeout, false, threshold)).NotTo(gomega.HaveOccurred()) } - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) }() svcTCPPort := int(svc.Spec.Ports[0].Port) ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) - By("reading clientIP using the TCP service's service port via its external VIP") + ginkgo.By("reading clientIP using the TCP service's service port via its external VIP") content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, "/clientip") clientIP := content.String() e2elog.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP) - By("checking if Source IP is preserved") + ginkgo.By("checking if Source IP is preserved") if strings.HasPrefix(clientIP, "10.") { framework.Failf("Source IP was NOT preserved") } }) - It("should work for type=NodePort", func() { + ginkgo.It("should work for type=NodePort", func() { namespace := f.Namespace.Name serviceName := "external-local" jig := framework.NewServiceTestJig(cs, serviceName) svc := jig.CreateOnlyLocalNodePortService(namespace, serviceName, true) defer func() { - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) }() tcpNodePort := int(svc.Spec.Ports[0].NodePort) @@ -1924,7 +1922,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { for nodeName, nodeIPs := range endpointsNodeMap { nodeIP := nodeIPs[0] - By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v%v%v", nodeName, nodeIP, tcpNodePort, path)) + ginkgo.By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v%v%v", nodeName, nodeIP, tcpNodePort, path)) content := jig.GetHTTPContent(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout, path) clientIP := content.String() e2elog.Logf("ClientIP detected by target pod using NodePort is %s", clientIP) @@ -1934,7 +1932,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { } }) - It("should only target nodes with endpoints", func() { + ginkgo.It("should only target nodes with endpoints", func() { namespace := f.Namespace.Name serviceName := "external-local" jig := framework.NewServiceTestJig(cs, serviceName) @@ -1953,7 +1951,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) }() healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) @@ -1971,7 +1969,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { for i := 0; i < len(nodes.Items); i++ { endpointNodeName := nodes.Items[i].Name - By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName) + ginkgo.By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName) jig.RunOrFail(namespace, func(rc *v1.ReplicationController) { rc.Name = serviceName if endpointNodeName != "" { @@ -1979,7 +1977,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { } }) - By(fmt.Sprintf("waiting for service endpoint on node %v", endpointNodeName)) + ginkgo.By(fmt.Sprintf("waiting for service endpoint on node %v", endpointNodeName)) jig.WaitForEndpointOnNode(namespace, serviceName, endpointNodeName) // HealthCheck should pass only on the node where num(endpoints) > 0 @@ -1992,13 +1990,13 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { port := strconv.Itoa(healthCheckNodePort) ipPort := net.JoinHostPort(publicIP, port) e2elog.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess) - Expect(jig.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, framework.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)).NotTo(HaveOccurred()) + gomega.Expect(jig.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, framework.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)).NotTo(gomega.HaveOccurred()) } framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName)) } }) - It("should work from pods", func() { + ginkgo.It("should work from pods", func() { namespace := f.Namespace.Name serviceName := "external-local" jig := framework.NewServiceTestJig(cs, serviceName) @@ -2008,7 +2006,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) }() ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) @@ -2018,13 +2016,13 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { nodeName := nodes.Items[0].Name podName := "execpod-sourceip" - By(fmt.Sprintf("Creating %v on node %v", podName, nodeName)) + ginkgo.By(fmt.Sprintf("Creating %v on node %v", podName, nodeName)) execPodName := framework.CreateExecPodOrFail(f.ClientSet, namespace, podName, func(pod *v1.Pod) { pod.Spec.NodeName = nodeName }) defer func() { err := cs.CoreV1().Pods(namespace).Delete(execPodName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s", execPodName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s", execPodName) }() execPod, err := f.ClientSet.CoreV1().Pods(namespace).Get(execPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -2033,7 +2031,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { cmd := fmt.Sprintf(`wget -T 30 -qO- %v`, path) var srcIP string - By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, podName, nodeName)) + ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, podName, nodeName)) if pollErr := wait.PollImmediate(framework.Poll, framework.LoadBalancerCreateTimeoutDefault, func() (bool, error) { stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd) if err != nil { @@ -2047,7 +2045,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { } }) - It("should handle updates to ExternalTrafficPolicy field", func() { + ginkgo.It("should handle updates to ExternalTrafficPolicy field", func() { namespace := f.Namespace.Name serviceName := "external-local" jig := framework.NewServiceTestJig(cs, serviceName) @@ -2061,13 +2059,13 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) - Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) + gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred()) }() // save the health check node port because it disappears when ESIPP is turned off. healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) - By("turning ESIPP off") + ginkgo.By("turning ESIPP off") svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster }) @@ -2089,14 +2087,14 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0]) path := "/clientip" - By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap)) + ginkgo.By(fmt.Sprintf("endpoints present on nodes %v, absent on nodes %v", endpointNodeMap, noEndpointNodeMap)) for nodeName, nodeIPs := range noEndpointNodeMap { - By(fmt.Sprintf("Checking %v (%v:%v%v) proxies to endpoints on another node", nodeName, nodeIPs[0], svcNodePort, path)) + ginkgo.By(fmt.Sprintf("Checking %v (%v:%v%v) proxies to endpoints on another node", nodeName, nodeIPs[0], svcNodePort, path)) jig.GetHTTPContent(nodeIPs[0], svcNodePort, framework.KubeProxyLagTimeout, path) } for nodeName, nodeIPs := range endpointNodeMap { - By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIPs[0])) + ginkgo.By(fmt.Sprintf("checking kube-proxy health check fails on node with endpoint (%s), public IP %s", nodeName, nodeIPs[0])) var body bytes.Buffer pollfn := func() (bool, error) { result := framework.PokeHTTP(nodeIPs[0], healthCheckNodePort, "/healthz", nil) @@ -2114,7 +2112,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { } // Poll till kube-proxy re-adds the MASQUERADE rule on the node. - By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP)) + ginkgo.By(fmt.Sprintf("checking source ip is NOT preserved through loadbalancer %v", ingressIP)) var clientIP string pollErr := wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, "/clientip") @@ -2134,7 +2132,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { // If the health check nodePort has NOT been freed, the new service // creation will fail. - By("setting ExternalTraffic field back to OnlyLocal") + ginkgo.By("setting ExternalTraffic field back to OnlyLocal") svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal // Request the same healthCheckNodePort as before, to test the user-requested allocation path @@ -2143,7 +2141,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { pollErr = wait.PollImmediate(framework.Poll, framework.KubeProxyLagTimeout, func() (bool, error) { content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, path) clientIP = content.String() - By(fmt.Sprintf("Endpoint %v:%v%v returned client ip %v", ingressIP, svcTCPPort, path, clientIP)) + ginkgo.By(fmt.Sprintf("Endpoint %v:%v%v returned client ip %v", ingressIP, svcTCPPort, path, clientIP)) if !strings.HasPrefix(clientIP, "10.") { return true, nil } @@ -2163,7 +2161,7 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam defer func() { e2elog.Logf("Cleaning up the exec pod") err := c.CoreV1().Pods(ns).Delete(execPodName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s", execPodName) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s", execPodName) }() execPod, err := f.ClientSet.CoreV1().Pods(ns).Get(execPodName, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -2193,7 +2191,7 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam // Desired stdout in this format: client_address=x.x.x.x outputs := strings.Split(strings.TrimSpace(stdout), "=") if len(outputs) != 2 { - // Fail the test if output format is unexpected. + // ginkgo.Fail the test if output format is unexpected. framework.Failf("exec pod returned unexpected stdout format: [%v]\n", stdout) } return execPod.Status.PodIP, outputs[1] @@ -2215,49 +2213,49 @@ func execAffinityTestForNonLBService(f *framework.Framework, cs clientset.Interf func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) { ns := f.Namespace.Name numPods, servicePort, serviceName := 3, defaultServeHostnameServicePort, svc.ObjectMeta.Name - By("creating service in namespace " + ns) + ginkgo.By("creating service in namespace " + ns) serviceType := svc.Spec.Type svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP _, _, err := framework.StartServeHostnameService(cs, svc, ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns) defer func() { framework.StopServeHostnameService(cs, ns, serviceName) }() jig := framework.NewServiceTestJig(cs, serviceName) svc, err = jig.Client.CoreV1().Services(ns).Get(serviceName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to fetch service: %s in namespace: %s", serviceName, ns) - var svcIp string + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch service: %s in namespace: %s", serviceName, ns) + var svcIP string if serviceType == v1.ServiceTypeNodePort { nodes := framework.GetReadySchedulableNodesOrDie(cs) addrs := framework.CollectAddresses(nodes, v1.NodeInternalIP) - Expect(len(addrs)).To(BeNumerically(">", 0), "Failed to get Node internal IP") - svcIp = addrs[0] + gomega.Expect(len(addrs)).To(gomega.BeNumerically(">", 0), "ginkgo.Failed to get Node internal IP") + svcIP = addrs[0] servicePort = int(svc.Spec.Ports[0].NodePort) } else { - svcIp = svc.Spec.ClusterIP + svcIP = svc.Spec.ClusterIP } execPodName := framework.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil) defer func() { e2elog.Logf("Cleaning up the exec pod") err := cs.CoreV1().Pods(ns).Delete(execPodName, nil) - Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s in namespace: %s", execPodName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s in namespace: %s", execPodName, ns) }() execPod, err := cs.CoreV1().Pods(ns).Get(execPodName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "failed to fetch pod: %s in namespace: %s", execPodName, ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to fetch pod: %s in namespace: %s", execPodName, ns) if !isTransitionTest { - Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, true)).To(BeTrue()) + gomega.Expect(framework.CheckAffinity(jig, execPod, svcIP, servicePort, true)).To(gomega.BeTrue()) } if isTransitionTest { svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityNone }) - Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, false)).To(BeTrue()) + gomega.Expect(framework.CheckAffinity(jig, execPod, svcIP, servicePort, false)).To(gomega.BeTrue()) svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP }) - Expect(framework.CheckAffinity(jig, execPod, svcIp, servicePort, true)).To(BeTrue()) + gomega.Expect(framework.CheckAffinity(jig, execPod, svcIP, servicePort, true)).To(gomega.BeTrue()) } } @@ -2275,12 +2273,12 @@ func execAffinityTestForLBService(f *framework.Framework, cs clientset.Interface func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework, cs clientset.Interface, svc *v1.Service, isTransitionTest bool) { numPods, ns, serviceName := 3, f.Namespace.Name, svc.ObjectMeta.Name - By("creating service in namespace " + ns) + ginkgo.By("creating service in namespace " + ns) svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP _, _, err := framework.StartServeHostnameService(cs, svc, ns, numPods) - Expect(err).NotTo(HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create replication controller with service in the namespace: %s", ns) jig := framework.NewServiceTestJig(cs, serviceName) - By("waiting for loadbalancer for service " + ns + "/" + serviceName) + ginkgo.By("waiting for loadbalancer for service " + ns + "/" + serviceName) svc = jig.WaitForLoadBalancerOrFail(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault) jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) defer func() { @@ -2295,16 +2293,16 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework, port := int(svc.Spec.Ports[0].Port) if !isTransitionTest { - Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true)).To(BeTrue()) + gomega.Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true)).To(gomega.BeTrue()) } if isTransitionTest { svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityNone }) - Expect(framework.CheckAffinity(jig, nil, ingressIP, port, false)).To(BeTrue()) + gomega.Expect(framework.CheckAffinity(jig, nil, ingressIP, port, false)).To(gomega.BeTrue()) svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP }) - Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true)).To(BeTrue()) + gomega.Expect(framework.CheckAffinity(jig, nil, ingressIP, port, true)).To(gomega.BeTrue()) } } diff --git a/test/e2e/network/service_latency.go b/test/e2e/network/service_latency.go index e374fa60503..1946e68c344 100644 --- a/test/e2e/network/service_latency.go +++ b/test/e2e/network/service_latency.go @@ -35,7 +35,7 @@ import ( testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" ) type durations []time.Duration @@ -161,7 +161,7 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int, acceptab blocker := make(chan struct{}, inParallel) for i := 0; i < total; i++ { go func() { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() blocker <- struct{}{} defer func() { <-blocker }() if d, err := singleServiceLatency(f, cfg.Name, endpointQueries); err != nil { diff --git a/test/e2e/network/util_iperf.go b/test/e2e/network/util_iperf.go index 05b02dcfe0b..8f47199de0d 100644 --- a/test/e2e/network/util_iperf.go +++ b/test/e2e/network/util_iperf.go @@ -28,6 +28,7 @@ import ( e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) +// IPerfResults is a struct that stores some IPerfResult type IPerfResults struct { BandwidthMap map[string]int64 } @@ -62,8 +63,8 @@ func (i *IPerfResults) ToTSV() string { var buffer bytes.Buffer for node, bandwidth := range i.BandwidthMap { - asJson, _ := json.Marshal(node) - buffer.WriteString("\t " + string(asJson) + "\t " + fmt.Sprintf("%E", float64(bandwidth))) + asJSON, _ := json.Marshal(node) + buffer.WriteString("\t " + string(asJSON) + "\t " + fmt.Sprintf("%E", float64(bandwidth))) } return buffer.String() } @@ -88,6 +89,7 @@ func NewIPerf(csvLine string) *IPerfResult { return &i } +// StrSlice represents a string slice type StrSlice []string func (s StrSlice) get(i int) string {