fix golint failures of test/e2e/network

This commit is contained in:
SataQiu 2019-05-09 23:17:57 +08:00
parent e9af72c6e9
commit 515f8342d3
20 changed files with 858 additions and 859 deletions

View File

@ -608,7 +608,6 @@ test/e2e/chaosmonkey
test/e2e/common test/e2e/common
test/e2e/framework test/e2e/framework
test/e2e/lifecycle/bootstrap test/e2e/lifecycle/bootstrap
test/e2e/network
test/e2e/node test/e2e/node
test/e2e/scalability test/e2e/scalability
test/e2e/scheduling test/e2e/scheduling

View File

@ -27,8 +27,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
const dnsTestPodHostName = "dns-querier-1" const dnsTestPodHostName = "dns-querier-1"
@ -60,16 +60,16 @@ var _ = SIGDescribe("DNS", func() {
} }
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n") ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP. // Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS") ginkgo.By("creating a pod to probe DNS")
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
}) })
It("should resolve DNS of partial qualified names for the cluster ", func() { ginkgo.It("should resolve DNS of partial qualified names for the cluster ", func() {
// All the names we need to be able to resolve. // All the names we need to be able to resolve.
// TODO: Spin up a separate test service and test that dns works for that service. // TODO: Spin up a separate test service and test that dns works for that service.
namesToResolve := []string{ namesToResolve := []string{
@ -89,11 +89,11 @@ var _ = SIGDescribe("DNS", func() {
hostEntries := []string{hostFQDN, dnsTestPodHostName} hostEntries := []string{hostFQDN, dnsTestPodHostName}
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostEntries, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostEntries, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostEntries, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostEntries, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n") ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP. // Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS") ginkgo.By("creating a pod to probe DNS")
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
}) })
@ -108,11 +108,11 @@ var _ = SIGDescribe("DNS", func() {
hostEntries := []string{hostFQDN, dnsTestPodHostName} hostEntries := []string{hostFQDN, dnsTestPodHostName}
wheezyProbeCmd, wheezyFileNames := createProbeCommand(nil, hostEntries, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) wheezyProbeCmd, wheezyFileNames := createProbeCommand(nil, hostEntries, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
jessieProbeCmd, jessieFileNames := createProbeCommand(nil, hostEntries, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(nil, hostEntries, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n") ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes /etc/hosts and exposes the results by HTTP. // Run a pod which probes /etc/hosts and exposes the results by HTTP.
By("creating a pod to probe /etc/hosts") ginkgo.By("creating a pod to probe /etc/hosts")
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
}) })
@ -125,27 +125,27 @@ var _ = SIGDescribe("DNS", func() {
framework.ConformanceIt("should provide DNS for services ", func() { framework.ConformanceIt("should provide DNS for services ", func() {
// NOTE: This only contains the FQDN and the Host name, for testing partial name, see the test below // NOTE: This only contains the FQDN and the Host name, for testing partial name, see the test below
// Create a test headless service. // Create a test headless service.
By("Creating a test headless service") ginkgo.By("Creating a test headless service")
testServiceSelector := map[string]string{ testServiceSelector := map[string]string{
"dns-test": "true", "dns-test": "true",
} }
headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName)
defer func() { defer func() {
By("deleting the test headless service") ginkgo.By("deleting the test headless service")
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
}() }()
regularServiceName := "test-service-2" regularServiceName := "test-service-2"
regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService) regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService)
Expect(err).NotTo(HaveOccurred(), "failed to create regular service: %s", regularServiceName) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create regular service: %s", regularServiceName)
defer func() { defer func() {
By("deleting the test service") ginkgo.By("deleting the test service")
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(regularService.Name, nil) f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(regularService.Name, nil)
}() }()
@ -160,39 +160,39 @@ var _ = SIGDescribe("DNS", func() {
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n") ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP. // Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS") ginkgo.By("creating a pod to probe DNS")
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
pod.ObjectMeta.Labels = testServiceSelector pod.ObjectMeta.Labels = testServiceSelector
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
}) })
It("should resolve DNS of partial qualified names for services ", func() { ginkgo.It("should resolve DNS of partial qualified names for services ", func() {
// Create a test headless service. // Create a test headless service.
By("Creating a test headless service") ginkgo.By("Creating a test headless service")
testServiceSelector := map[string]string{ testServiceSelector := map[string]string{
"dns-test": "true", "dns-test": "true",
} }
headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector) headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName)
defer func() { defer func() {
By("deleting the test headless service") ginkgo.By("deleting the test headless service")
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
}() }()
regularServiceName := "test-service-2" regularServiceName := "test-service-2"
regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector) regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService) regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService)
Expect(err).NotTo(HaveOccurred(), "failed to create regular service: %s", regularServiceName) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create regular service: %s", regularServiceName)
defer func() { defer func() {
By("deleting the test service") ginkgo.By("deleting the test service")
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(regularService.Name, nil) f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(regularService.Name, nil)
}() }()
@ -209,20 +209,20 @@ var _ = SIGDescribe("DNS", func() {
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n") ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP. // Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS") ginkgo.By("creating a pod to probe DNS")
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
pod.ObjectMeta.Labels = testServiceSelector pod.ObjectMeta.Labels = testServiceSelector
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...)) validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
}) })
It("should provide DNS for pods for Hostname [LinuxOnly]", func() { ginkgo.It("should provide DNS for pods for Hostname [LinuxOnly]", func() {
// Create a test headless service. // Create a test headless service.
By("Creating a test headless service") ginkgo.By("Creating a test headless service")
testServiceSelector := map[string]string{ testServiceSelector := map[string]string{
"dns-test-hostname-attribute": "true", "dns-test-hostname-attribute": "true",
} }
@ -230,11 +230,11 @@ var _ = SIGDescribe("DNS", func() {
podHostname := "dns-querier-2" podHostname := "dns-querier-2"
headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector) headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", serviceName) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", serviceName)
defer func() { defer func() {
By("deleting the test headless service") ginkgo.By("deleting the test headless service")
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
}() }()
@ -242,11 +242,11 @@ var _ = SIGDescribe("DNS", func() {
hostNames := []string{hostFQDN, podHostname} hostNames := []string{hostFQDN, podHostname}
wheezyProbeCmd, wheezyFileNames := createProbeCommand(nil, hostNames, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) wheezyProbeCmd, wheezyFileNames := createProbeCommand(nil, hostNames, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
jessieProbeCmd, jessieFileNames := createProbeCommand(nil, hostNames, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(nil, hostNames, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n") ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP. // Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS") ginkgo.By("creating a pod to probe DNS")
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
pod1.ObjectMeta.Labels = testServiceSelector pod1.ObjectMeta.Labels = testServiceSelector
pod1.Spec.Hostname = podHostname pod1.Spec.Hostname = podHostname
@ -255,9 +255,9 @@ var _ = SIGDescribe("DNS", func() {
validateDNSResults(f, pod1, append(wheezyFileNames, jessieFileNames...)) validateDNSResults(f, pod1, append(wheezyFileNames, jessieFileNames...))
}) })
It("should provide DNS for pods for Subdomain", func() { ginkgo.It("should provide DNS for pods for Subdomain", func() {
// Create a test headless service. // Create a test headless service.
By("Creating a test headless service") ginkgo.By("Creating a test headless service")
testServiceSelector := map[string]string{ testServiceSelector := map[string]string{
"dns-test-hostname-attribute": "true", "dns-test-hostname-attribute": "true",
} }
@ -265,11 +265,11 @@ var _ = SIGDescribe("DNS", func() {
podHostname := "dns-querier-2" podHostname := "dns-querier-2"
headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector) headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
Expect(err).NotTo(HaveOccurred(), "failed to create headless service: %s", serviceName) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", serviceName)
defer func() { defer func() {
By("deleting the test headless service") ginkgo.By("deleting the test headless service")
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil) f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
}() }()
@ -277,11 +277,11 @@ var _ = SIGDescribe("DNS", func() {
namesToResolve := []string{hostFQDN} namesToResolve := []string{hostFQDN}
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, "", "wheezy", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain) jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, "", "jessie", f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n") ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP. // Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS") ginkgo.By("creating a pod to probe DNS")
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
pod1.ObjectMeta.Labels = testServiceSelector pod1.ObjectMeta.Labels = testServiceSelector
pod1.Spec.Hostname = podHostname pod1.Spec.Hostname = podHostname
@ -298,72 +298,72 @@ var _ = SIGDescribe("DNS", func() {
*/ */
framework.ConformanceIt("should provide DNS for ExternalName services", func() { framework.ConformanceIt("should provide DNS for ExternalName services", func() {
// Create a test ExternalName service. // Create a test ExternalName service.
By("Creating a test externalName service") ginkgo.By("Creating a test externalName service")
serviceName := "dns-test-service-3" serviceName := "dns-test-service-3"
externalNameService := framework.CreateServiceSpec(serviceName, "foo.example.com", false, nil) externalNameService := framework.CreateServiceSpec(serviceName, "foo.example.com", false, nil)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService)
Expect(err).NotTo(HaveOccurred(), "failed to create ExternalName service: %s", serviceName) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create ExternalName service: %s", serviceName)
defer func() { defer func() {
By("deleting the test externalName service") ginkgo.By("deleting the test externalName service")
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil) f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil)
}() }()
hostFQDN := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain) hostFQDN := fmt.Sprintf("%s.%s.svc.%s", serviceName, f.Namespace.Name, framework.TestContext.ClusterDNSDomain)
wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy") wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
jessieProbeCmd, jessieFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "jessie") jessieProbeCmd, jessieFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n") ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP. // Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS") ginkgo.By("creating a pod to probe DNS")
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
validateTargetedProbeOutput(f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.") validateTargetedProbeOutput(f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.")
// Test changing the externalName field // Test changing the externalName field
By("changing the externalName to bar.example.com") ginkgo.By("changing the externalName to bar.example.com")
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { _, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
s.Spec.ExternalName = "bar.example.com" s.Spec.ExternalName = "bar.example.com"
}) })
Expect(err).NotTo(HaveOccurred(), "failed to change externalName of service: %s", serviceName) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to change externalName of service: %s", serviceName)
wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy") wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "jessie") jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n") ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP. // Run a pod which probes DNS and exposes the results by HTTP.
By("creating a second pod to probe DNS") ginkgo.By("creating a second pod to probe DNS")
pod2 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod2 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
validateTargetedProbeOutput(f, pod2, []string{wheezyFileName, jessieFileName}, "bar.example.com.") validateTargetedProbeOutput(f, pod2, []string{wheezyFileName, jessieFileName}, "bar.example.com.")
// Test changing type from ExternalName to ClusterIP // Test changing type from ExternalName to ClusterIP
By("changing the service to type=ClusterIP") ginkgo.By("changing the service to type=ClusterIP")
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) { _, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
s.Spec.Type = v1.ServiceTypeClusterIP s.Spec.Type = v1.ServiceTypeClusterIP
s.Spec.Ports = []v1.ServicePort{ s.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP}, {Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
} }
}) })
Expect(err).NotTo(HaveOccurred(), "failed to change service type to ClusterIP for service: %s", serviceName) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to change service type to ClusterIP for service: %s", serviceName)
wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "A", "wheezy") wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "A", "wheezy")
jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "A", "jessie") jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "A", "jessie")
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n") ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n") ginkgo.By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP. // Run a pod which probes DNS and exposes the results by HTTP.
By("creating a third pod to probe DNS") ginkgo.By("creating a third pod to probe DNS")
pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName) pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(externalNameService.Name, metav1.GetOptions{}) svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(externalNameService.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to get service: %s", externalNameService.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get service: %s", externalNameService.Name)
validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP) validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP)
}) })
It("should support configurable pod DNS nameservers", func() { ginkgo.It("should support configurable pod DNS nameservers", func() {
By("Creating a pod with dnsPolicy=None and customized dnsConfig...") ginkgo.By("Creating a pod with dnsPolicy=None and customized dnsConfig...")
testServerIP := "1.1.1.1" testServerIP := "1.1.1.1"
testSearchPath := "resolv.conf.local" testSearchPath := "resolv.conf.local"
testAgnhostPod := f.NewAgnhostPod(f.Namespace.Name, "pause") testAgnhostPod := f.NewAgnhostPod(f.Namespace.Name, "pause")
@ -373,15 +373,15 @@ var _ = SIGDescribe("DNS", func() {
Searches: []string{testSearchPath}, Searches: []string{testSearchPath},
} }
testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testAgnhostPod) testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testAgnhostPod)
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testAgnhostPod.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testAgnhostPod.Name)
framework.Logf("Created pod %v", testAgnhostPod) framework.Logf("Created pod %v", testAgnhostPod)
defer func() { defer func() {
framework.Logf("Deleting pod %s...", testAgnhostPod.Name) framework.Logf("Deleting pod %s...", testAgnhostPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil { if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("Failed to delete pod %s: %v", testAgnhostPod.Name, err) framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err)
} }
}() }()
Expect(f.WaitForPodRunning(testAgnhostPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testAgnhostPod.Name) gomega.Expect(f.WaitForPodRunning(testAgnhostPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testAgnhostPod.Name)
runCommand := func(arg string) string { runCommand := func(arg string) string {
cmd := []string{"/agnhost", arg} cmd := []string{"/agnhost", arg}
@ -393,25 +393,25 @@ var _ = SIGDescribe("DNS", func() {
CaptureStdout: true, CaptureStdout: true,
CaptureStderr: true, CaptureStderr: true,
}) })
Expect(err).NotTo(HaveOccurred(), "failed to run command '/agnhost %s' on pod, stdout: %v, stderr: %v, err: %v", arg, stdout, stderr, err) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to run command '/agnhost %s' on pod, stdout: %v, stderr: %v, err: %v", arg, stdout, stderr, err)
return stdout return stdout
} }
By("Verifying customized DNS suffix list is configured on pod...") ginkgo.By("Verifying customized DNS suffix list is configured on pod...")
stdout := runCommand("dns-suffix") stdout := runCommand("dns-suffix")
if !strings.Contains(stdout, testSearchPath) { if !strings.Contains(stdout, testSearchPath) {
framework.Failf("customized DNS suffix list not found configured in pod, expected to contain: %s, got: %s", testSearchPath, stdout) framework.Failf("customized DNS suffix list not found configured in pod, expected to contain: %s, got: %s", testSearchPath, stdout)
} }
By("Verifying customized DNS server is configured on pod...") ginkgo.By("Verifying customized DNS server is configured on pod...")
stdout = runCommand("dns-server-list") stdout = runCommand("dns-server-list")
if !strings.Contains(stdout, testServerIP) { if !strings.Contains(stdout, testServerIP) {
framework.Failf("customized DNS server not found in configured in pod, expected to contain: %s, got: %s", testServerIP, stdout) framework.Failf("customized DNS server not found in configured in pod, expected to contain: %s, got: %s", testServerIP, stdout)
} }
}) })
It("should support configurable pod resolv.conf", func() { ginkgo.It("should support configurable pod resolv.conf", func() {
By("Preparing a test DNS service with injected DNS names...") ginkgo.By("Preparing a test DNS service with injected DNS names...")
testInjectedIP := "1.1.1.1" testInjectedIP := "1.1.1.1"
testDNSNameShort := "notexistname" testDNSNameShort := "notexistname"
testSearchPath := "resolv.conf.local" testSearchPath := "resolv.conf.local"
@ -421,23 +421,23 @@ var _ = SIGDescribe("DNS", func() {
testDNSNameFull: testInjectedIP, testDNSNameFull: testInjectedIP,
}) })
testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testServerPod) testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testServerPod)
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testServerPod.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testServerPod.Name)
e2elog.Logf("Created pod %v", testServerPod) e2elog.Logf("Created pod %v", testServerPod)
defer func() { defer func() {
e2elog.Logf("Deleting pod %s...", testServerPod.Name) e2elog.Logf("Deleting pod %s...", testServerPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("Failed to delete pod %s: %v", testServerPod.Name, err) framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err)
} }
}() }()
Expect(f.WaitForPodRunning(testServerPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testServerPod.Name) gomega.Expect(f.WaitForPodRunning(testServerPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testServerPod.Name)
// Retrieve server pod IP. // Retrieve server pod IP.
testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(testServerPod.Name, metav1.GetOptions{}) testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(testServerPod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to get pod %v", testServerPod.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod %v", testServerPod.Name)
testServerIP := testServerPod.Status.PodIP testServerIP := testServerPod.Status.PodIP
e2elog.Logf("testServerIP is %s", testServerIP) e2elog.Logf("testServerIP is %s", testServerIP)
By("Creating a pod with dnsPolicy=None and customized dnsConfig...") ginkgo.By("Creating a pod with dnsPolicy=None and customized dnsConfig...")
testUtilsPod := generateDNSUtilsPod() testUtilsPod := generateDNSUtilsPod()
testUtilsPod.Spec.DNSPolicy = v1.DNSNone testUtilsPod.Spec.DNSPolicy = v1.DNSNone
testNdotsValue := "2" testNdotsValue := "2"
@ -452,17 +452,17 @@ var _ = SIGDescribe("DNS", func() {
}, },
} }
testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod) testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod)
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testUtilsPod.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testUtilsPod.Name)
e2elog.Logf("Created pod %v", testUtilsPod) e2elog.Logf("Created pod %v", testUtilsPod)
defer func() { defer func() {
e2elog.Logf("Deleting pod %s...", testUtilsPod.Name) e2elog.Logf("Deleting pod %s...", testUtilsPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil { if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil {
framework.Failf("Failed to delete pod %s: %v", testUtilsPod.Name, err) framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err)
} }
}() }()
Expect(f.WaitForPodRunning(testUtilsPod.Name)).NotTo(HaveOccurred(), "failed to wait for pod %s to be running", testUtilsPod.Name) gomega.Expect(f.WaitForPodRunning(testUtilsPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testUtilsPod.Name)
By("Verifying customized DNS option is configured on pod...") ginkgo.By("Verifying customized DNS option is configured on pod...")
// TODO: Figure out a better way other than checking the actual resolv,conf file. // TODO: Figure out a better way other than checking the actual resolv,conf file.
cmd := []string{"cat", "/etc/resolv.conf"} cmd := []string{"cat", "/etc/resolv.conf"}
stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{ stdout, stderr, err := f.ExecWithOptions(framework.ExecOptions{
@ -473,12 +473,12 @@ var _ = SIGDescribe("DNS", func() {
CaptureStdout: true, CaptureStdout: true,
CaptureStderr: true, CaptureStderr: true,
}) })
Expect(err).NotTo(HaveOccurred(), "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err)
if !strings.Contains(stdout, "ndots:2") { if !strings.Contains(stdout, "ndots:2") {
framework.Failf("customized DNS options not found in resolv.conf, got: %s", stdout) framework.Failf("customized DNS options not found in resolv.conf, got: %s", stdout)
} }
By("Verifying customized name server and search path are working...") ginkgo.By("Verifying customized name server and search path are working...")
// Do dig on not-exist-dns-name and see if the injected DNS record is returned. // Do dig on not-exist-dns-name and see if the injected DNS record is returned.
// This verifies both: // This verifies both:
// - Custom search path is appended. // - Custom search path is appended.
@ -494,7 +494,7 @@ var _ = SIGDescribe("DNS", func() {
CaptureStderr: true, CaptureStderr: true,
}) })
if err != nil { if err != nil {
e2elog.Logf("Failed to execute dig command, stdout:%v, stderr: %v, err: %v", stdout, stderr, err) e2elog.Logf("ginkgo.Failed to execute dig command, stdout:%v, stderr: %v, err: %v", stdout, stderr, err)
return false, nil return false, nil
} }
res := strings.Split(stdout, "\n") res := strings.Split(stdout, "\n")
@ -505,7 +505,7 @@ var _ = SIGDescribe("DNS", func() {
return true, nil return true, nil
} }
err = wait.PollImmediate(5*time.Second, 3*time.Minute, digFunc) err = wait.PollImmediate(5*time.Second, 3*time.Minute, digFunc)
Expect(err).NotTo(HaveOccurred(), "failed to verify customized name server and search path") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to verify customized name server and search path")
// TODO: Add more test cases for other DNSPolicies. // TODO: Add more test cases for other DNSPolicies.
}) })

View File

@ -35,8 +35,8 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
type dnsTestCommon struct { type dnsTestCommon struct {
@ -62,14 +62,14 @@ func newDNSTestCommon() dnsTestCommon {
} }
func (t *dnsTestCommon) init() { func (t *dnsTestCommon) init() {
By("Finding a DNS pod") ginkgo.By("Finding a DNS pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"}))
options := metav1.ListOptions{LabelSelector: label.String()} options := metav1.ListOptions{LabelSelector: label.String()}
namespace := "kube-system" namespace := "kube-system"
pods, err := t.f.ClientSet.CoreV1().Pods(namespace).List(options) pods, err := t.f.ClientSet.CoreV1().Pods(namespace).List(options)
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", namespace) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s", namespace)
Expect(len(pods.Items)).Should(BeNumerically(">=", 1)) gomega.Expect(len(pods.Items)).Should(gomega.BeNumerically(">=", 1))
t.dnsPod = &pods.Items[0] t.dnsPod = &pods.Items[0]
e2elog.Logf("Using DNS pod: %v", t.dnsPod.Name) e2elog.Logf("Using DNS pod: %v", t.dnsPod.Name)
@ -157,23 +157,23 @@ func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) {
}.AsSelector().String(), }.AsSelector().String(),
} }
cmList, err := t.c.CoreV1().ConfigMaps(t.ns).List(options) cmList, err := t.c.CoreV1().ConfigMaps(t.ns).List(options)
Expect(err).NotTo(HaveOccurred(), "failed to list ConfigMaps in namespace: %s", t.ns) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list ConfigMaps in namespace: %s", t.ns)
if len(cmList.Items) == 0 { if len(cmList.Items) == 0 {
By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)) ginkgo.By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm))
_, err := t.c.CoreV1().ConfigMaps(t.ns).Create(cm) _, err := t.c.CoreV1().ConfigMaps(t.ns).Create(cm)
Expect(err).NotTo(HaveOccurred(), "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)
} else { } else {
By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)) ginkgo.By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm))
_, err := t.c.CoreV1().ConfigMaps(t.ns).Update(cm) _, err := t.c.CoreV1().ConfigMaps(t.ns).Update(cm)
Expect(err).NotTo(HaveOccurred(), "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)
} }
} }
func (t *dnsTestCommon) fetchDNSConfigMapData() map[string]string { func (t *dnsTestCommon) fetchDNSConfigMapData() map[string]string {
if t.name == "coredns" { if t.name == "coredns" {
pcm, err := t.c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(t.name, metav1.GetOptions{}) pcm, err := t.c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(t.name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to get DNS ConfigMap: %s", t.name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get DNS ConfigMap: %s", t.name)
return pcm.Data return pcm.Data
} }
return nil return nil
@ -189,10 +189,10 @@ func (t *dnsTestCommon) restoreDNSConfigMap(configMapData map[string]string) {
} }
func (t *dnsTestCommon) deleteConfigMap() { func (t *dnsTestCommon) deleteConfigMap() {
By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name)) ginkgo.By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name))
t.cm = nil t.cm = nil
err := t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil) err := t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil)
Expect(err).NotTo(HaveOccurred(), "failed to delete config map: %s", t.name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete config map: %s", t.name)
} }
func (t *dnsTestCommon) createUtilPodLabel(baseName string) { func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
@ -224,9 +224,9 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
var err error var err error
t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.utilPod) t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.utilPod)
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %v", t.utilPod) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %v", t.utilPod)
e2elog.Logf("Created pod %v", t.utilPod) e2elog.Logf("Created pod %v", t.utilPod)
Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(HaveOccurred(), "pod failed to start running: %v", t.utilPod) gomega.Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(gomega.HaveOccurred(), "pod failed to start running: %v", t.utilPod)
t.utilService = &v1.Service{ t.utilService = &v1.Service{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
@ -249,7 +249,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
} }
t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(t.utilService) t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(t.utilService)
Expect(err).NotTo(HaveOccurred(), "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name)
e2elog.Logf("Created service %v", t.utilService) e2elog.Logf("Created service %v", t.utilService)
} }
@ -272,7 +272,7 @@ func (t *dnsTestCommon) deleteCoreDNSPods() {
for _, pod := range pods.Items { for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s", pod.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s", pod.Name)
} }
} }
@ -315,13 +315,13 @@ func (t *dnsTestCommon) createDNSPodFromObj(pod *v1.Pod) {
var err error var err error
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod) t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod)
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %v", t.dnsServerPod) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %v", t.dnsServerPod)
e2elog.Logf("Created pod %v", t.dnsServerPod) e2elog.Logf("Created pod %v", t.dnsServerPod)
Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(HaveOccurred(), "pod failed to start running: %v", t.dnsServerPod) gomega.Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(gomega.HaveOccurred(), "pod failed to start running: %v", t.dnsServerPod)
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get( t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get(
t.dnsServerPod.Name, metav1.GetOptions{}) t.dnsServerPod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to get pod: %s", t.dnsServerPod.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod: %s", t.dnsServerPod.Name)
} }
func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) { func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {
@ -539,30 +539,30 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client
e2elog.Logf("Lookups using %s/%s failed for: %v\n", pod.Namespace, pod.Name, failed) e2elog.Logf("Lookups using %s/%s failed for: %v\n", pod.Namespace, pod.Name, failed)
return false, nil return false, nil
})) }))
Expect(len(failed)).To(Equal(0)) gomega.Expect(len(failed)).To(gomega.Equal(0))
} }
func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) { func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) {
By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
defer func() { defer func() {
By("deleting the pod") ginkgo.By("deleting the pod")
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}() }()
if _, err := podClient.Create(pod); err != nil { if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
} }
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("retrieving the pod") ginkgo.By("retrieving the pod")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
} }
// Try to find results for each expected name. // Try to find results for each expected name.
By("looking for the results for each expected name from probers") ginkgo.By("looking for the results for each expected name from probers")
assertFilesExist(fileNames, "results", pod, f.ClientSet) assertFilesExist(fileNames, "results", pod, f.ClientSet)
// TODO: probe from the host, too. // TODO: probe from the host, too.
@ -571,26 +571,26 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
} }
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) { func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name) podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
defer func() { defer func() {
By("deleting the pod") ginkgo.By("deleting the pod")
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}() }()
if _, err := podClient.Create(pod); err != nil { if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
} }
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("retrieving the pod") ginkgo.By("retrieving the pod")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
framework.Failf("Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
} }
// Try to find the expected value for each expected name. // Try to find the expected value for each expected name.
By("looking for the results for each expected name from probers") ginkgo.By("looking for the results for each expected name from probers")
assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value) assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)
e2elog.Logf("DNS probes using %s succeeded\n", pod.Name) e2elog.Logf("DNS probes using %s succeeded\n", pod.Name)

View File

@ -24,7 +24,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
type dnsFederationsConfigMapTest struct { type dnsFederationsConfigMapTest struct {
@ -45,7 +45,7 @@ var _ = SIGDescribe("DNS configMap federations [Feature:Federation]", func() {
t := &dnsFederationsConfigMapTest{dnsTestCommon: newDNSTestCommon()} t := &dnsFederationsConfigMapTest{dnsTestCommon: newDNSTestCommon()}
It("should be able to change federation configuration [Slow][Serial]", func() { ginkgo.It("should be able to change federation configuration [Slow][Serial]", func() {
t.c = t.f.ClientSet t.c = t.f.ClientSet
t.run() t.run()
}) })
@ -96,17 +96,17 @@ func (t *dnsFederationsConfigMapTest) run() {
}`, framework.TestContext.ClusterDNSDomain, framework.TestContext.ClusterDNSDomain)} }`, framework.TestContext.ClusterDNSDomain, framework.TestContext.ClusterDNSDomain)}
valid2m := map[string]string{t.labels[1]: "xyz.com"} valid2m := map[string]string{t.labels[1]: "xyz.com"}
By("default -> valid1") ginkgo.By("default -> valid1")
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true) t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
t.deleteCoreDNSPods() t.deleteCoreDNSPods()
t.validate(framework.TestContext.ClusterDNSDomain) t.validate(framework.TestContext.ClusterDNSDomain)
By("valid1 -> valid2") ginkgo.By("valid1 -> valid2")
t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true) t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true)
t.deleteCoreDNSPods() t.deleteCoreDNSPods()
t.validate(framework.TestContext.ClusterDNSDomain) t.validate(framework.TestContext.ClusterDNSDomain)
By("valid2 -> default") ginkgo.By("valid2 -> default")
t.setConfigMap(&v1.ConfigMap{Data: originalConfigMapData}, nil, false) t.setConfigMap(&v1.ConfigMap{Data: originalConfigMapData}, nil, false)
t.deleteCoreDNSPods() t.deleteCoreDNSPods()
t.validate(framework.TestContext.ClusterDNSDomain) t.validate(framework.TestContext.ClusterDNSDomain)
@ -121,27 +121,27 @@ func (t *dnsFederationsConfigMapTest) run() {
valid2m := map[string]string{t.labels[1]: "xyz"} valid2m := map[string]string{t.labels[1]: "xyz"}
invalid := map[string]string{"federations": "invalid.map=xyz"} invalid := map[string]string{"federations": "invalid.map=xyz"}
By("empty -> valid1") ginkgo.By("empty -> valid1")
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true) t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
t.validate(framework.TestContext.ClusterDNSDomain) t.validate(framework.TestContext.ClusterDNSDomain)
By("valid1 -> valid2") ginkgo.By("valid1 -> valid2")
t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true) t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true)
t.validate(framework.TestContext.ClusterDNSDomain) t.validate(framework.TestContext.ClusterDNSDomain)
By("valid2 -> invalid") ginkgo.By("valid2 -> invalid")
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false) t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
t.validate(framework.TestContext.ClusterDNSDomain) t.validate(framework.TestContext.ClusterDNSDomain)
By("invalid -> valid1") ginkgo.By("invalid -> valid1")
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true) t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
t.validate(framework.TestContext.ClusterDNSDomain) t.validate(framework.TestContext.ClusterDNSDomain)
By("valid1 -> deleted") ginkgo.By("valid1 -> deleted")
t.deleteConfigMap() t.deleteConfigMap()
t.validate(framework.TestContext.ClusterDNSDomain) t.validate(framework.TestContext.ClusterDNSDomain)
By("deleted -> invalid") ginkgo.By("deleted -> invalid")
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false) t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
t.validate(framework.TestContext.ClusterDNSDomain) t.validate(framework.TestContext.ClusterDNSDomain)
} }
@ -151,7 +151,7 @@ func (t *dnsFederationsConfigMapTest) validate(dnsDomain string) {
federations := t.fedMap federations := t.fedMap
if len(federations) == 0 { if len(federations) == 0 {
By(fmt.Sprintf("Validating federation labels %v do not exist", t.labels)) ginkgo.By(fmt.Sprintf("Validating federation labels %v do not exist", t.labels))
for _, label := range t.labels { for _, label := range t.labels {
var federationDNS = fmt.Sprintf("e2e-dns-configmap.%s.%s.svc.%s.", var federationDNS = fmt.Sprintf("e2e-dns-configmap.%s.%s.svc.%s.",
@ -173,7 +173,7 @@ func (t *dnsFederationsConfigMapTest) validate(dnsDomain string) {
// Check local mapping. Checking a remote mapping requires // Check local mapping. Checking a remote mapping requires
// creating an arbitrary DNS record which is not possible at the // creating an arbitrary DNS record which is not possible at the
// moment. // moment.
By(fmt.Sprintf("Validating federation record %v", label)) ginkgo.By(fmt.Sprintf("Validating federation record %v", label))
predicate := func(actual []string) bool { predicate := func(actual []string) bool {
for _, v := range actual { for _, v := range actual {
if v == localDNS { if v == localDNS {
@ -407,16 +407,16 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
serviceName := "dns-externalname-upstream-test" serviceName := "dns-externalname-upstream-test"
externalNameService := framework.CreateServiceSpec(serviceName, googleDNSHostname, false, nil) externalNameService := framework.CreateServiceSpec(serviceName, googleDNSHostname, false, nil)
if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService); err != nil { if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService); err != nil {
Fail(fmt.Sprintf("Failed when creating service: %v", err)) ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err))
} }
serviceNameLocal := "dns-externalname-upstream-local" serviceNameLocal := "dns-externalname-upstream-local"
externalNameServiceLocal := framework.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil) externalNameServiceLocal := framework.CreateServiceSpec(serviceNameLocal, fooHostname, false, nil)
if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameServiceLocal); err != nil { if _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameServiceLocal); err != nil {
Fail(fmt.Sprintf("Failed when creating service: %v", err)) ginkgo.Fail(fmt.Sprintf("ginkgo.Failed when creating service: %v", err))
} }
defer func() { defer func() {
By("deleting the test externalName service") ginkgo.By("deleting the test externalName service")
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil) f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameService.Name, nil)
f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameServiceLocal.Name, nil) f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(externalNameServiceLocal.Name, nil)
}() }()
@ -482,28 +482,28 @@ func (t *dnsExternalNameTest) run(isIPv6 bool) {
var _ = SIGDescribe("DNS configMap nameserver [IPv4]", func() { var _ = SIGDescribe("DNS configMap nameserver [IPv4]", func() {
Context("Change stubDomain", func() { ginkgo.Context("Change stubDomain", func() {
nsTest := &dnsNameserverTest{dnsTestCommon: newDNSTestCommon()} nsTest := &dnsNameserverTest{dnsTestCommon: newDNSTestCommon()}
It("should be able to change stubDomain configuration [Slow][Serial]", func() { ginkgo.It("should be able to change stubDomain configuration [Slow][Serial]", func() {
nsTest.c = nsTest.f.ClientSet nsTest.c = nsTest.f.ClientSet
nsTest.run(false) nsTest.run(false)
}) })
}) })
Context("Forward PTR lookup", func() { ginkgo.Context("Forward PTR lookup", func() {
fwdTest := &dnsPtrFwdTest{dnsTestCommon: newDNSTestCommon()} fwdTest := &dnsPtrFwdTest{dnsTestCommon: newDNSTestCommon()}
It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() { ginkgo.It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() {
fwdTest.c = fwdTest.f.ClientSet fwdTest.c = fwdTest.f.ClientSet
fwdTest.run(false) fwdTest.run(false)
}) })
}) })
Context("Forward external name lookup", func() { ginkgo.Context("Forward external name lookup", func() {
externalNameTest := &dnsExternalNameTest{dnsTestCommon: newDNSTestCommon()} externalNameTest := &dnsExternalNameTest{dnsTestCommon: newDNSTestCommon()}
It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() { ginkgo.It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() {
externalNameTest.c = externalNameTest.f.ClientSet externalNameTest.c = externalNameTest.f.ClientSet
externalNameTest.run(false) externalNameTest.run(false)
}) })
@ -512,28 +512,28 @@ var _ = SIGDescribe("DNS configMap nameserver [IPv4]", func() {
var _ = SIGDescribe("DNS configMap nameserver [Feature:Networking-IPv6]", func() { var _ = SIGDescribe("DNS configMap nameserver [Feature:Networking-IPv6]", func() {
Context("Change stubDomain", func() { ginkgo.Context("Change stubDomain", func() {
nsTest := &dnsNameserverTest{dnsTestCommon: newDNSTestCommon()} nsTest := &dnsNameserverTest{dnsTestCommon: newDNSTestCommon()}
It("should be able to change stubDomain configuration [Slow][Serial]", func() { ginkgo.It("should be able to change stubDomain configuration [Slow][Serial]", func() {
nsTest.c = nsTest.f.ClientSet nsTest.c = nsTest.f.ClientSet
nsTest.run(true) nsTest.run(true)
}) })
}) })
Context("Forward PTR lookup", func() { ginkgo.Context("Forward PTR lookup", func() {
fwdTest := &dnsPtrFwdTest{dnsTestCommon: newDNSTestCommon()} fwdTest := &dnsPtrFwdTest{dnsTestCommon: newDNSTestCommon()}
It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() { ginkgo.It("should forward PTR records lookup to upstream nameserver [Slow][Serial]", func() {
fwdTest.c = fwdTest.f.ClientSet fwdTest.c = fwdTest.f.ClientSet
fwdTest.run(true) fwdTest.run(true)
}) })
}) })
Context("Forward external name lookup", func() { ginkgo.Context("Forward external name lookup", func() {
externalNameTest := &dnsExternalNameTest{dnsTestCommon: newDNSTestCommon()} externalNameTest := &dnsExternalNameTest{dnsTestCommon: newDNSTestCommon()}
It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() { ginkgo.It("should forward externalname lookup to upstream nameserver [Slow][Serial]", func() {
externalNameTest.c = externalNameTest.f.ClientSet externalNameTest.c = externalNameTest.f.ClientSet
externalNameTest.run(true) externalNameTest.run(true)
}) })

View File

@ -30,7 +30,7 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
const ( const (
@ -43,7 +43,7 @@ const (
var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() { var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
f := framework.NewDefaultFramework("performancedns") f := framework.NewDefaultFramework("performancedns")
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout))
framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute) framework.WaitForAllNodesHealthy(f.ClientSet, time.Minute)
@ -52,7 +52,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
}) })
// answers dns for service - creates the maximum number of services, and then check dns record for one // answers dns for service - creates the maximum number of services, and then check dns record for one
It("Should answer DNS query for maximum number of services per cluster", func() { ginkgo.It("Should answer DNS query for maximum number of services per cluster", func() {
// get integer ceiling of maxServicesPerCluster / maxServicesPerNamespace // get integer ceiling of maxServicesPerCluster / maxServicesPerNamespace
numNs := (maxServicesPerCluster + maxServicesPerNamespace - 1) / maxServicesPerNamespace numNs := (maxServicesPerCluster + maxServicesPerNamespace - 1) / maxServicesPerNamespace
@ -64,7 +64,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
services := generateServicesInNamespaces(namespaces, maxServicesPerCluster) services := generateServicesInNamespaces(namespaces, maxServicesPerCluster)
createService := func(i int) { createService := func(i int) {
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
framework.ExpectNoError(testutils.CreateServiceWithRetries(f.ClientSet, services[i].Namespace, services[i])) framework.ExpectNoError(testutils.CreateServiceWithRetries(f.ClientSet, services[i].Namespace, services[i]))
} }
e2elog.Logf("Creating %v test services", maxServicesPerCluster) e2elog.Logf("Creating %v test services", maxServicesPerCluster)

View File

@ -32,8 +32,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
const ( const (
@ -52,11 +52,11 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
f := framework.NewDefaultFramework("cluster-dns") f := framework.NewDefaultFramework("cluster-dns")
var c clientset.Interface var c clientset.Interface
BeforeEach(func() { ginkgo.BeforeEach(func() {
c = f.ClientSet c = f.ClientSet
}) })
It("should create pod that uses dns", func() { ginkgo.It("should create pod that uses dns", func() {
mkpath := func(file string) string { mkpath := func(file string) string {
return filepath.Join(os.Getenv("GOPATH"), "src/k8s.io/examples/staging/cluster-dns", file) return filepath.Join(os.Getenv("GOPATH"), "src/k8s.io/examples/staging/cluster-dns", file)
} }
@ -84,7 +84,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
var err error var err error
namespaceName := fmt.Sprintf("dnsexample%d", i) namespaceName := fmt.Sprintf("dnsexample%d", i)
namespaces[i], err = f.CreateNamespace(namespaceName, nil) namespaces[i], err = f.CreateNamespace(namespaceName, nil)
Expect(err).NotTo(HaveOccurred(), "failed to create namespace: %s", namespaceName) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
} }
for _, ns := range namespaces { for _, ns := range namespaces {
@ -106,13 +106,13 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
options := metav1.ListOptions{LabelSelector: label.String()} options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns.Name).List(options) pods, err := c.CoreV1().Pods(ns.Name).List(options)
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", ns.Name) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s", ns.Name)
err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods) err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for all pods to respond")
e2elog.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name) e2elog.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
err = framework.ServiceResponding(c, ns.Name, backendSvcName) err = framework.ServiceResponding(c, ns.Name, backendSvcName)
Expect(err).NotTo(HaveOccurred(), "waiting for the service to respond") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for the service to respond")
} }
// Now another tricky part: // Now another tricky part:
@ -134,7 +134,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
queryDNS := fmt.Sprintf(queryDNSPythonTemplate, backendSvcName+"."+namespaces[0].Name) queryDNS := fmt.Sprintf(queryDNSPythonTemplate, backendSvcName+"."+namespaces[0].Name)
_, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDNS}, "ok", dnsReadyTimeout) _, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDNS}, "ok", dnsReadyTimeout)
Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for output from pod exec")
updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, fmt.Sprintf("dns-backend.development.svc.%s", framework.TestContext.ClusterDNSDomain), fmt.Sprintf("dns-backend.%s.svc.%s", namespaces[0].Name, framework.TestContext.ClusterDNSDomain)) updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, fmt.Sprintf("dns-backend.development.svc.%s", framework.TestContext.ClusterDNSDomain), fmt.Sprintf("dns-backend.%s.svc.%s", namespaces[0].Name, framework.TestContext.ClusterDNSDomain))
@ -153,7 +153,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
// wait for pods to print their result // wait for pods to print their result
for _, ns := range namespaces { for _, ns := range namespaces {
_, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout) _, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred(), "pod %s failed to print result in logs", frontendPodName) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "pod %s failed to print result in logs", frontendPodName)
} }
}) })
}) })
@ -165,10 +165,10 @@ func getNsCmdFlag(ns *v1.Namespace) string {
// pass enough context with the 'old' parameter so that it replaces what your really intended. // pass enough context with the 'old' parameter so that it replaces what your really intended.
func prepareResourceWithReplacedString(inputFile, old, new string) string { func prepareResourceWithReplacedString(inputFile, old, new string) string {
f, err := os.Open(inputFile) f, err := os.Open(inputFile)
Expect(err).NotTo(HaveOccurred(), "failed to open file: %s", inputFile) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to open file: %s", inputFile)
defer f.Close() defer f.Close()
data, err := ioutil.ReadAll(f) data, err := ioutil.ReadAll(f)
Expect(err).NotTo(HaveOccurred(), "failed to read from file: %s", inputFile) gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to read from file: %s", inputFile)
podYaml := strings.Replace(string(data), old, new, 1) podYaml := strings.Replace(string(data), old, new, 1)
return podYaml return podYaml
} }

View File

@ -30,8 +30,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/framework/providers/gce"
gcecloud "k8s.io/legacy-cloud-providers/gce" gcecloud "k8s.io/legacy-cloud-providers/gce"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
const ( const (
@ -49,38 +49,38 @@ var _ = SIGDescribe("Firewall rule", func() {
var cloudConfig framework.CloudConfig var cloudConfig framework.CloudConfig
var gceCloud *gcecloud.Cloud var gceCloud *gcecloud.Cloud
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
var err error var err error
cs = f.ClientSet cs = f.ClientSet
cloudConfig = framework.TestContext.CloudConfig cloudConfig = framework.TestContext.CloudConfig
gceCloud, err = gce.GetGCECloud() gceCloud, err = gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
}) })
// This test takes around 6 minutes to run // This test takes around 6 minutes to run
It("[Slow] [Serial] should create valid firewall rules for LoadBalancer type service", func() { ginkgo.It("[Slow] [Serial] should create valid firewall rules for LoadBalancer type service", func() {
ns := f.Namespace.Name ns := f.Namespace.Name
// This source ranges is just used to examine we have exact same things on LB firewall rules // This source ranges is just used to examine we have exact same things on LB firewall rules
firewallTestSourceRanges := []string{"0.0.0.0/1", "128.0.0.0/1"} firewallTestSourceRanges := []string{"0.0.0.0/1", "128.0.0.0/1"}
serviceName := "firewall-test-loadbalancer" serviceName := "firewall-test-loadbalancer"
By("Getting cluster ID") ginkgo.By("Getting cluster ID")
clusterID, err := gce.GetClusterID(cs) clusterID, err := gce.GetClusterID(cs)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2elog.Logf("Got cluster ID: %v", clusterID) e2elog.Logf("Got cluster ID: %v", clusterID)
jig := framework.NewServiceTestJig(cs, serviceName) jig := framework.NewServiceTestJig(cs, serviceName)
nodeList := jig.GetNodes(framework.MaxNodesForEndpointsTests) nodeList := jig.GetNodes(framework.MaxNodesForEndpointsTests)
Expect(nodeList).NotTo(BeNil()) gomega.Expect(nodeList).NotTo(gomega.BeNil())
nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests) nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests)
if len(nodesNames) <= 0 { if len(nodesNames) <= 0 {
framework.Failf("Expect at least 1 node, got: %v", nodesNames) framework.Failf("Expect at least 1 node, got: %v", nodesNames)
} }
nodesSet := sets.NewString(nodesNames...) nodesSet := sets.NewString(nodesNames...)
By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global") ginkgo.By("Creating a LoadBalancer type service with ExternalTrafficPolicy=Global")
svc := jig.CreateLoadBalancerService(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) { svc := jig.CreateLoadBalancerService(ns, serviceName, framework.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) {
svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: firewallTestHTTPPort}} svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: firewallTestHTTPPort}}
svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges
@ -90,61 +90,61 @@ var _ = SIGDescribe("Firewall rule", func() {
svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.Type = v1.ServiceTypeNodePort
svc.Spec.LoadBalancerSourceRanges = nil svc.Spec.LoadBalancerSourceRanges = nil
}) })
Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred()) gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred())
By("Waiting for the local traffic health check firewall rule to be deleted") ginkgo.By("Waiting for the local traffic health check firewall rule to be deleted")
localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false) localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false)
_, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout) _, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
}() }()
svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP
By("Checking if service's firewall rule is correct") ginkgo.By("Checking if service's firewall rule is correct")
lbFw := gce.ConstructFirewallForLBService(svc, cloudConfig.NodeTag) lbFw := gce.ConstructFirewallForLBService(svc, cloudConfig.NodeTag)
fw, err := gceCloud.GetFirewall(lbFw.Name) fw, err := gceCloud.GetFirewall(lbFw.Name)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) gomega.Expect(gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred())
By("Checking if service's nodes health check firewall rule is correct") ginkgo.By("Checking if service's nodes health check firewall rule is correct")
nodesHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true) nodesHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true)
fw, err = gceCloud.GetFirewall(nodesHCFw.Name) fw, err = gceCloud.GetFirewall(nodesHCFw.Name)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) gomega.Expect(gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred())
// OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE // OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE
By("Updating LoadBalancer service to ExternalTrafficPolicy=Local") ginkgo.By("Updating LoadBalancer service to ExternalTrafficPolicy=Local")
svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) { svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
}) })
By("Waiting for the nodes health check firewall rule to be deleted") ginkgo.By("Waiting for the nodes health check firewall rule to be deleted")
_, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout) _, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Waiting for the correct local traffic health check firewall rule to be created") ginkgo.By("Waiting for the correct local traffic health check firewall rule to be created")
localHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false) localHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false)
fw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault) fw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) gomega.Expect(gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred())
By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests)) ginkgo.By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests))
for i, nodeName := range nodesNames { for i, nodeName := range nodesNames {
podName := fmt.Sprintf("netexec%v", i) podName := fmt.Sprintf("netexec%v", i)
jig.LaunchNetexecPodOnNode(f, nodeName, podName, firewallTestHTTPPort, firewallTestUDPPort, true) jig.LaunchNetexecPodOnNode(f, nodeName, podName, firewallTestHTTPPort, firewallTestUDPPort, true)
defer func() { defer func() {
e2elog.Logf("Cleaning up the netexec pod: %v", podName) e2elog.Logf("Cleaning up the netexec pod: %v", podName)
Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(HaveOccurred()) gomega.Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(gomega.HaveOccurred())
}() }()
} }
// Send requests from outside of the cluster because internal traffic is whitelisted // Send requests from outside of the cluster because internal traffic is whitelisted
By("Accessing the external service ip from outside, all non-master nodes should be reached") ginkgo.By("Accessing the external service ip from outside, all non-master nodes should be reached")
Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) gomega.Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(gomega.HaveOccurred())
// Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster // Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster
// by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect // by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect
// simulation, we really want to check that traffic doesn't reach a vm outside the GKE cluster, but // simulation, we really want to check that traffic doesn't reach a vm outside the GKE cluster, but
// that's much harder to do in the current e2e framework. // that's much harder to do in the current e2e framework.
By(fmt.Sprintf("Removing tags from one of the nodes: %v", nodesNames[0])) ginkgo.By(fmt.Sprintf("Removing tags from one of the nodes: %v", nodesNames[0]))
nodesSet.Delete(nodesNames[0]) nodesSet.Delete(nodesNames[0])
// Instance could run in a different zone in multi-zone test. Figure out which zone // Instance could run in a different zone in multi-zone test. Figure out which zone
// it is in before proceeding. // it is in before proceeding.
@ -154,31 +154,31 @@ var _ = SIGDescribe("Firewall rule", func() {
} }
removedTags := gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{}) removedTags := gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, []string{})
defer func() { defer func() {
By("Adding tags back to the node and wait till the traffic is recovered") ginkgo.By("Adding tags back to the node and wait till the traffic is recovered")
nodesSet.Insert(nodesNames[0]) nodesSet.Insert(nodesNames[0])
gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags) gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)
// Make sure traffic is recovered before exit // Make sure traffic is recovered before exit
Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(HaveOccurred()) gomega.Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(gomega.HaveOccurred())
}() }()
By("Accessing serivce through the external ip and examine got no response from the node without tags") ginkgo.By("Accessing serivce through the external ip and examine got no response from the node without tags")
Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred()) gomega.Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet, 15)).NotTo(gomega.HaveOccurred())
}) })
It("should have correct firewall rules for e2e cluster", func() { ginkgo.It("should have correct firewall rules for e2e cluster", func() {
nodes := framework.GetReadySchedulableNodesOrDie(cs) nodes := framework.GetReadySchedulableNodesOrDie(cs)
if len(nodes.Items) <= 0 { if len(nodes.Items) <= 0 {
framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items)) framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items))
} }
By("Checking if e2e firewall rules are correct") ginkgo.By("Checking if e2e firewall rules are correct")
for _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) { for _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) {
fw, err := gceCloud.GetFirewall(expFw.Name) fw, err := gceCloud.GetFirewall(expFw.Name)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred()) gomega.Expect(gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred())
} }
By("Checking well known ports on master and nodes are not exposed externally") ginkgo.By("Checking well known ports on master and nodes are not exposed externally")
nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP) nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP)
if len(nodeAddrs) == 0 { if len(nodeAddrs) == 0 {
framework.Failf("did not find any node addresses") framework.Failf("did not find any node addresses")

View File

@ -18,6 +18,7 @@ package network
import "github.com/onsi/ginkgo" import "github.com/onsi/ginkgo"
// SIGDescribe annotates the test with the SIG label.
func SIGDescribe(text string, body func()) bool { func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-network] "+text, body) return ginkgo.Describe("[sig-network] "+text, body)
} }

View File

@ -40,8 +40,8 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/framework/providers/gce"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
const ( const (
@ -50,7 +50,7 @@ const (
) )
var _ = SIGDescribe("Loadbalancing: L7", func() { var _ = SIGDescribe("Loadbalancing: L7", func() {
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
var ( var (
ns string ns string
jig *ingress.TestJig jig *ingress.TestJig
@ -58,7 +58,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
) )
f := framework.NewDefaultFramework("ingress") f := framework.NewDefaultFramework("ingress")
BeforeEach(func() { ginkgo.BeforeEach(func() {
jig = ingress.NewIngressTestJig(f.ClientSet) jig = ingress.NewIngressTestJig(f.ClientSet)
ns = f.Namespace.Name ns = f.Namespace.Name
@ -81,59 +81,59 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// //
// Slow by design ~10m for each "It" block dominated by loadbalancer setup time // Slow by design ~10m for each "It" block dominated by loadbalancer setup time
// TODO: write similar tests for nginx, haproxy and AWS Ingress. // TODO: write similar tests for nginx, haproxy and AWS Ingress.
Describe("GCE [Slow] [Feature:Ingress]", func() { ginkgo.Describe("GCE [Slow] [Feature:Ingress]", func() {
var gceController *gce.IngressController var gceController *gce.IngressController
// Platform specific setup // Platform specific setup
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
By("Initializing gce controller") ginkgo.By("Initializing gce controller")
gceController = &gce.IngressController{ gceController = &gce.IngressController{
Ns: ns, Ns: ns,
Client: jig.Client, Client: jig.Client,
Cloud: framework.TestContext.CloudConfig, Cloud: framework.TestContext.CloudConfig,
} }
err := gceController.Init() err := gceController.Init()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
}) })
// Platform specific cleanup // Platform specific cleanup
AfterEach(func() { ginkgo.AfterEach(func() {
if CurrentGinkgoTestDescription().Failed { if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.DescribeIng(ns) framework.DescribeIng(ns)
} }
if jig.Ingress == nil { if jig.Ingress == nil {
By("No ingress created, no cleanup necessary") ginkgo.By("No ingress created, no cleanup necessary")
return return
} }
By("Deleting ingress") ginkgo.By("Deleting ingress")
jig.TryDeleteIngress() jig.TryDeleteIngress()
By("Cleaning up cloud resources") ginkgo.By("Cleaning up cloud resources")
Expect(gceController.CleanupIngressController()).NotTo(HaveOccurred()) gomega.Expect(gceController.CleanupIngressController()).NotTo(gomega.HaveOccurred())
}) })
It("should conform to Ingress spec", func() { ginkgo.It("should conform to Ingress spec", func() {
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{})
for _, t := range conformanceTests { for _, t := range conformanceTests {
By(t.EntryLog) ginkgo.By(t.EntryLog)
t.Execute() t.Execute()
By(t.ExitLog) ginkgo.By(t.ExitLog)
jig.WaitForIngress(true) jig.WaitForIngress(true)
} }
}) })
It("should create ingress with pre-shared certificate", func() { ginkgo.It("should create ingress with pre-shared certificate", func() {
executePresharedCertTest(f, jig, "") executePresharedCertTest(f, jig, "")
}) })
It("should support multiple TLS certs", func() { ginkgo.It("should support multiple TLS certs", func() {
By("Creating an ingress with no certs.") ginkgo.By("Creating an ingress with no certs.")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "multiple-certs"), ns, map[string]string{ jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "multiple-certs"), ns, map[string]string{
ingress.IngressStaticIPKey: ns, ingress.IngressStaticIPKey: ns,
}, map[string]string{}) }, map[string]string{})
By("Adding multiple certs to the ingress.") ginkgo.By("Adding multiple certs to the ingress.")
hosts := []string{"test1.ingress.com", "test2.ingress.com", "test3.ingress.com", "test4.ingress.com"} hosts := []string{"test1.ingress.com", "test2.ingress.com", "test3.ingress.com", "test4.ingress.com"}
secrets := []string{"tls-secret-1", "tls-secret-2", "tls-secret-3", "tls-secret-4"} secrets := []string{"tls-secret-1", "tls-secret-2", "tls-secret-3", "tls-secret-4"}
certs := [][]byte{} certs := [][]byte{}
@ -143,33 +143,33 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
} }
for i, host := range hosts { for i, host := range hosts {
err := jig.WaitForIngressWithCert(true, []string{host}, certs[i]) err := jig.WaitForIngressWithCert(true, []string{host}, certs[i])
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
} }
By("Remove all but one of the certs on the ingress.") ginkgo.By("Remove all but one of the certs on the ingress.")
jig.RemoveHTTPS(secrets[1]) jig.RemoveHTTPS(secrets[1])
jig.RemoveHTTPS(secrets[2]) jig.RemoveHTTPS(secrets[2])
jig.RemoveHTTPS(secrets[3]) jig.RemoveHTTPS(secrets[3])
By("Test that the remaining cert is properly served.") ginkgo.By("Test that the remaining cert is properly served.")
err := jig.WaitForIngressWithCert(true, []string{hosts[0]}, certs[0]) err := jig.WaitForIngressWithCert(true, []string{hosts[0]}, certs[0])
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
By("Add back one of the certs that was removed and check that all certs are served.") ginkgo.By("Add back one of the certs that was removed and check that all certs are served.")
jig.AddHTTPS(secrets[1], hosts[1]) jig.AddHTTPS(secrets[1], hosts[1])
for i, host := range hosts[:2] { for i, host := range hosts[:2] {
err := jig.WaitForIngressWithCert(true, []string{host}, certs[i]) err := jig.WaitForIngressWithCert(true, []string{host}, certs[i])
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
} }
}) })
It("multicluster ingress should get instance group annotation", func() { ginkgo.It("multicluster ingress should get instance group annotation", func() {
name := "echomap" name := "echomap"
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, map[string]string{ jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "http"), ns, map[string]string{
ingress.IngressClassKey: ingress.MulticlusterIngressClassValue, ingress.IngressClassKey: ingress.MulticlusterIngressClassValue,
}, map[string]string{}) }, map[string]string{})
By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name)) ginkgo.By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name))
pollErr := wait.Poll(2*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { pollErr := wait.Poll(2*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) ing, err := f.ClientSet.ExtensionsV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -237,118 +237,118 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// zone based on pod labels. // zone based on pod labels.
}) })
Describe("GCE [Slow] [Feature:NEG]", func() { ginkgo.Describe("GCE [Slow] [Feature:NEG]", func() {
var gceController *gce.IngressController var gceController *gce.IngressController
// Platform specific setup // Platform specific setup
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
By("Initializing gce controller") ginkgo.By("Initializing gce controller")
gceController = &gce.IngressController{ gceController = &gce.IngressController{
Ns: ns, Ns: ns,
Client: jig.Client, Client: jig.Client,
Cloud: framework.TestContext.CloudConfig, Cloud: framework.TestContext.CloudConfig,
} }
err := gceController.Init() err := gceController.Init()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
}) })
// Platform specific cleanup // Platform specific cleanup
AfterEach(func() { ginkgo.AfterEach(func() {
if CurrentGinkgoTestDescription().Failed { if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.DescribeIng(ns) framework.DescribeIng(ns)
} }
if jig.Ingress == nil { if jig.Ingress == nil {
By("No ingress created, no cleanup necessary") ginkgo.By("No ingress created, no cleanup necessary")
return return
} }
By("Deleting ingress") ginkgo.By("Deleting ingress")
jig.TryDeleteIngress() jig.TryDeleteIngress()
By("Cleaning up cloud resources") ginkgo.By("Cleaning up cloud resources")
Expect(gceController.CleanupIngressController()).NotTo(HaveOccurred()) gomega.Expect(gceController.CleanupIngressController()).NotTo(gomega.HaveOccurred())
}) })
It("should conform to Ingress spec", func() { ginkgo.It("should conform to Ingress spec", func() {
jig.PollInterval = 5 * time.Second jig.PollInterval = 5 * time.Second
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{ conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{
ingress.NEGAnnotation: `{"ingress": true}`, ingress.NEGAnnotation: `{"ingress": true}`,
}) })
for _, t := range conformanceTests { for _, t := range conformanceTests {
By(t.EntryLog) ginkgo.By(t.EntryLog)
t.Execute() t.Execute()
By(t.ExitLog) ginkgo.By(t.ExitLog)
jig.WaitForIngress(true) jig.WaitForIngress(true)
Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred())
} }
}) })
It("should be able to switch between IG and NEG modes", func() { ginkgo.It("should be able to switch between IG and NEG modes", func() {
var err error var err error
By("Create a basic HTTP ingress using NEG") ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true) jig.WaitForIngress(true)
Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred())
By("Switch backend service to use IG") ginkgo.By("Switch backend service to use IG")
svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, svc := range svcList.Items { for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress": false}` svc.Annotations[ingress.NEGAnnotation] = `{"ingress": false}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)); err != nil { if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)); err != nil {
e2elog.Logf("Failed to verify IG backend service: %v", err) e2elog.Logf("ginkgo.Failed to verify IG backend service: %v", err)
return false, nil return false, nil
} }
return true, nil return true, nil
}) })
Expect(err).NotTo(HaveOccurred(), "Expect backend service to target IG, but failed to observe") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Expect backend service to target IG, but failed to observe")
jig.WaitForIngress(true) jig.WaitForIngress(true)
By("Switch backend service to use NEG") ginkgo.By("Switch backend service to use NEG")
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, svc := range svcList.Items { for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress": true}` svc.Annotations[ingress.NEGAnnotation] = `{"ingress": true}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)); err != nil { if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)); err != nil {
e2elog.Logf("Failed to verify NEG backend service: %v", err) e2elog.Logf("ginkgo.Failed to verify NEG backend service: %v", err)
return false, nil return false, nil
} }
return true, nil return true, nil
}) })
Expect(err).NotTo(HaveOccurred(), "Expect backend service to target NEG, but failed to observe") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Expect backend service to target NEG, but failed to observe")
jig.WaitForIngress(true) jig.WaitForIngress(true)
}) })
It("should be able to create a ClusterIP service", func() { ginkgo.It("should be able to create a ClusterIP service", func() {
By("Create a basic HTTP ingress using NEG") ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{}) jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-clusterip"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true) jig.WaitForIngress(true)
svcPorts := jig.GetServicePorts(false) svcPorts := jig.GetServicePorts(false)
Expect(gceController.WaitForNegBackendService(svcPorts)).NotTo(HaveOccurred()) gomega.Expect(gceController.WaitForNegBackendService(svcPorts)).NotTo(gomega.HaveOccurred())
// ClusterIP ServicePorts have no NodePort // ClusterIP ServicePorts have no NodePort
for _, sp := range svcPorts { for _, sp := range svcPorts {
Expect(sp.NodePort).To(Equal(int32(0))) gomega.Expect(sp.NodePort).To(gomega.Equal(int32(0)))
} }
}) })
It("should sync endpoints to NEG", func() { ginkgo.It("should sync endpoints to NEG", func() {
name := "hostname" name := "hostname"
scaleAndValidateNEG := func(num int) { scaleAndValidateNEG := func(num int) {
scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
if scale.Spec.Replicas != int32(num) { if scale.Spec.Replicas != int32(num) {
scale.Spec.Replicas = int32(num) scale.Spec.Replicas = int32(num)
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) {
res, err := jig.GetDistinctResponseFromIngress() res, err := jig.GetDistinctResponseFromIngress()
@ -358,45 +358,45 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
e2elog.Logf("Expecting %d backends, got %d", num, res.Len()) e2elog.Logf("Expecting %d backends, got %d", num, res.Len())
return res.Len() == num, nil return res.Len() == num, nil
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
By("Create a basic HTTP ingress using NEG") ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true) jig.WaitForIngress(true)
jig.WaitForIngressToStable() jig.WaitForIngressToStable()
Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred())
// initial replicas number is 1 // initial replicas number is 1
scaleAndValidateNEG(1) scaleAndValidateNEG(1)
By("Scale up number of backends to 5") ginkgo.By("Scale up number of backends to 5")
scaleAndValidateNEG(5) scaleAndValidateNEG(5)
By("Scale down number of backends to 3") ginkgo.By("Scale down number of backends to 3")
scaleAndValidateNEG(3) scaleAndValidateNEG(3)
By("Scale up number of backends to 6") ginkgo.By("Scale up number of backends to 6")
scaleAndValidateNEG(6) scaleAndValidateNEG(6)
By("Scale down number of backends to 2") ginkgo.By("Scale down number of backends to 2")
scaleAndValidateNEG(3) scaleAndValidateNEG(3)
}) })
It("rolling update backend pods should not cause service disruption", func() { ginkgo.It("rolling update backend pods should not cause service disruption", func() {
name := "hostname" name := "hostname"
replicas := 8 replicas := 8
By("Create a basic HTTP ingress using NEG") ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{}) jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true) jig.WaitForIngress(true)
jig.WaitForIngressToStable() jig.WaitForIngressToStable()
Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred())
By(fmt.Sprintf("Scale backend replicas to %d", replicas)) ginkgo.By(fmt.Sprintf("Scale backend replicas to %d", replicas))
scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
scale.Spec.Replicas = int32(replicas) scale.Spec.Replicas = int32(replicas)
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
res, err := jig.GetDistinctResponseFromIngress() res, err := jig.GetDistinctResponseFromIngress()
@ -405,21 +405,21 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
} }
return res.Len() == replicas, nil return res.Len() == replicas, nil
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
By("Trigger rolling update and observe service disruption") ginkgo.By("Trigger rolling update and observe service disruption")
deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{}) deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
// trigger by changing graceful termination period to 60 seconds // trigger by changing graceful termination period to 60 seconds
gracePeriod := int64(60) gracePeriod := int64(60)
deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod
_, err = f.ClientSet.AppsV1().Deployments(ns).Update(deploy) _, err = f.ClientSet.AppsV1().Deployments(ns).Update(deploy)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) { err = wait.Poll(10*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
res, err := jig.GetDistinctResponseFromIngress() res, err := jig.GetDistinctResponseFromIngress()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{}) deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
if int(deploy.Status.UpdatedReplicas) == replicas { if int(deploy.Status.UpdatedReplicas) == replicas {
if res.Len() == replicas { if res.Len() == replicas {
return true, nil return true, nil
@ -427,29 +427,28 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
e2elog.Logf("Expecting %d different responses, but got %d.", replicas, res.Len()) e2elog.Logf("Expecting %d different responses, but got %d.", replicas, res.Len())
return false, nil return false, nil
} else { }
e2elog.Logf("Waiting for rolling update to finished. Keep sending traffic.") e2elog.Logf("Waiting for rolling update to finished. Keep sending traffic.")
return false, nil return false, nil
}
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
}) })
It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func() { ginkgo.It("should sync endpoints for both Ingress-referenced NEG and standalone NEG", func() {
name := "hostname" name := "hostname"
expectedKeys := []int32{80, 443} expectedKeys := []int32{80, 443}
scaleAndValidateExposedNEG := func(num int) { scaleAndValidateExposedNEG := func(num int) {
scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{}) scale, err := f.ClientSet.AppsV1().Deployments(ns).GetScale(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
if scale.Spec.Replicas != int32(num) { if scale.Spec.Replicas != int32(num) {
scale.Spec.Replicas = int32(num) scale.Spec.Replicas = int32(num)
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale) _, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(name, scale)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) { err = wait.Poll(10*time.Second, negUpdateTimeout, func() (bool, error) {
svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{}) svc, err := f.ClientSet.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
var status ingress.NegStatus var status ingress.NegStatus
v, ok := svc.Annotations[ingress.NEGStatusAnnotation] v, ok := svc.Annotations[ingress.NEGStatusAnnotation]
@ -482,10 +481,10 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
} }
gceCloud, err := gce.GetGCECloud() gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, neg := range status.NetworkEndpointGroups { for _, neg := range status.NetworkEndpointGroups {
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false) networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
if len(networkEndpoints) != num { if len(networkEndpoints) != num {
e2elog.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints)) e2elog.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints))
return false, nil return false, nil
@ -494,31 +493,31 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
return true, nil return true, nil
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
By("Create a basic HTTP ingress using NEG") ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true) jig.WaitForIngress(true)
Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(HaveOccurred()) gomega.Expect(gceController.WaitForNegBackendService(jig.GetServicePorts(false))).NotTo(gomega.HaveOccurred())
// initial replicas number is 1 // initial replicas number is 1
scaleAndValidateExposedNEG(1) scaleAndValidateExposedNEG(1)
By("Scale up number of backends to 5") ginkgo.By("Scale up number of backends to 5")
scaleAndValidateExposedNEG(5) scaleAndValidateExposedNEG(5)
By("Scale down number of backends to 3") ginkgo.By("Scale down number of backends to 3")
scaleAndValidateExposedNEG(3) scaleAndValidateExposedNEG(3)
By("Scale up number of backends to 6") ginkgo.By("Scale up number of backends to 6")
scaleAndValidateExposedNEG(6) scaleAndValidateExposedNEG(6)
By("Scale down number of backends to 2") ginkgo.By("Scale down number of backends to 2")
scaleAndValidateExposedNEG(3) scaleAndValidateExposedNEG(3)
}) })
It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func() { ginkgo.It("should create NEGs for all ports with the Ingress annotation, and NEGs for the standalone annotation otherwise", func() {
By("Create a basic HTTP ingress using standalone NEG") ginkgo.By("Create a basic HTTP ingress using standalone NEG")
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{}) jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "neg-exposed"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true) jig.WaitForIngress(true)
@ -526,120 +525,120 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
detectNegAnnotation(f, jig, gceController, ns, name, 2) detectNegAnnotation(f, jig, gceController, ns, name, 2)
// Add Ingress annotation - NEGs should stay the same. // Add Ingress annotation - NEGs should stay the same.
By("Adding NEG Ingress annotation") ginkgo.By("Adding NEG Ingress annotation")
svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) svcList, err := f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, svc := range svcList.Items { for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}` svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"80":{},"443":{}}}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
detectNegAnnotation(f, jig, gceController, ns, name, 2) detectNegAnnotation(f, jig, gceController, ns, name, 2)
// Modify exposed NEG annotation, but keep ingress annotation // Modify exposed NEG annotation, but keep ingress annotation
By("Modifying exposed NEG annotation, but keep Ingress annotation") ginkgo.By("Modifying exposed NEG annotation, but keep Ingress annotation")
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, svc := range svcList.Items { for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}` svc.Annotations[ingress.NEGAnnotation] = `{"ingress":true,"exposed_ports":{"443":{}}}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
detectNegAnnotation(f, jig, gceController, ns, name, 2) detectNegAnnotation(f, jig, gceController, ns, name, 2)
// Remove Ingress annotation. Expect 1 NEG // Remove Ingress annotation. Expect 1 NEG
By("Disabling Ingress annotation, but keeping one standalone NEG") ginkgo.By("Disabling Ingress annotation, but keeping one standalone NEG")
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, svc := range svcList.Items { for _, svc := range svcList.Items {
svc.Annotations[ingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}` svc.Annotations[ingress.NEGAnnotation] = `{"ingress":false,"exposed_ports":{"443":{}}}`
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
detectNegAnnotation(f, jig, gceController, ns, name, 1) detectNegAnnotation(f, jig, gceController, ns, name, 1)
// Remove NEG annotation entirely. Expect 0 NEGs. // Remove NEG annotation entirely. Expect 0 NEGs.
By("Removing NEG annotation") ginkgo.By("Removing NEG annotation")
svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{}) svcList, err = f.ClientSet.CoreV1().Services(ns).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, svc := range svcList.Items { for _, svc := range svcList.Items {
delete(svc.Annotations, ingress.NEGAnnotation) delete(svc.Annotations, ingress.NEGAnnotation)
// Service cannot be ClusterIP if it's using Instance Groups. // Service cannot be ClusterIP if it's using Instance Groups.
svc.Spec.Type = v1.ServiceTypeNodePort svc.Spec.Type = v1.ServiceTypeNodePort
_, err = f.ClientSet.CoreV1().Services(ns).Update(&svc) _, err = f.ClientSet.CoreV1().Services(ns).Update(&svc)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
detectNegAnnotation(f, jig, gceController, ns, name, 0) detectNegAnnotation(f, jig, gceController, ns, name, 0)
}) })
}) })
Describe("GCE [Slow] [Feature:kubemci]", func() { ginkgo.Describe("GCE [Slow] [Feature:kubemci]", func() {
var gceController *gce.IngressController var gceController *gce.IngressController
var ipName, ipAddress string var ipName, ipAddress string
// Platform specific setup // Platform specific setup
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
jig.Class = ingress.MulticlusterIngressClassValue jig.Class = ingress.MulticlusterIngressClassValue
jig.PollInterval = 5 * time.Second jig.PollInterval = 5 * time.Second
By("Initializing gce controller") ginkgo.By("Initializing gce controller")
gceController = &gce.IngressController{ gceController = &gce.IngressController{
Ns: ns, Ns: ns,
Client: jig.Client, Client: jig.Client,
Cloud: framework.TestContext.CloudConfig, Cloud: framework.TestContext.CloudConfig,
} }
err := gceController.Init() err := gceController.Init()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
// TODO(https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/19): // TODO(https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/19):
// Kubemci should reserve a static ip if user has not specified one. // Kubemci should reserve a static ip if user has not specified one.
ipName = "kubemci-" + string(uuid.NewUUID()) ipName = "kubemci-" + string(uuid.NewUUID())
// ip released when the rest of lb resources are deleted in CleanupIngressController // ip released when the rest of lb resources are deleted in CleanupIngressController
ipAddress = gceController.CreateStaticIP(ipName) ipAddress = gceController.CreateStaticIP(ipName)
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ipName, ipAddress)) ginkgo.By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ipName, ipAddress))
}) })
// Platform specific cleanup // Platform specific cleanup
AfterEach(func() { ginkgo.AfterEach(func() {
if CurrentGinkgoTestDescription().Failed { if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.DescribeIng(ns) framework.DescribeIng(ns)
} }
if jig.Ingress == nil { if jig.Ingress == nil {
By("No ingress created, no cleanup necessary") ginkgo.By("No ingress created, no cleanup necessary")
} else { } else {
By("Deleting ingress") ginkgo.By("Deleting ingress")
jig.TryDeleteIngress() jig.TryDeleteIngress()
} }
By("Cleaning up cloud resources") ginkgo.By("Cleaning up cloud resources")
Expect(gceController.CleanupIngressController()).NotTo(HaveOccurred()) gomega.Expect(gceController.CleanupIngressController()).NotTo(gomega.HaveOccurred())
}) })
It("should conform to Ingress spec", func() { ginkgo.It("should conform to Ingress spec", func() {
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{ conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{
ingress.IngressStaticIPKey: ipName, ingress.IngressStaticIPKey: ipName,
}) })
for _, t := range conformanceTests { for _, t := range conformanceTests {
By(t.EntryLog) ginkgo.By(t.EntryLog)
t.Execute() t.Execute()
By(t.ExitLog) ginkgo.By(t.ExitLog)
jig.WaitForIngress(false /*waitForNodePort*/) jig.WaitForIngress(false /*waitForNodePort*/)
} }
}) })
It("should create ingress with pre-shared certificate", func() { ginkgo.It("should create ingress with pre-shared certificate", func() {
executePresharedCertTest(f, jig, ipName) executePresharedCertTest(f, jig, ipName)
}) })
It("should create ingress with backend HTTPS", func() { ginkgo.It("should create ingress with backend HTTPS", func() {
executeBacksideBacksideHTTPSTest(f, jig, ipName) executeBacksideBacksideHTTPSTest(f, jig, ipName)
}) })
It("should support https-only annotation", func() { ginkgo.It("should support https-only annotation", func() {
executeStaticIPHttpsOnlyTest(f, jig, ipName, ipAddress) executeStaticIPHttpsOnlyTest(f, jig, ipName, ipAddress)
}) })
It("should remove clusters as expected", func() { ginkgo.It("should remove clusters as expected", func() {
ingAnnotations := map[string]string{ ingAnnotations := map[string]string{
ingress.IngressStaticIPKey: ipName, ingress.IngressStaticIPKey: ipName,
} }
@ -668,8 +667,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
verifyKubemciStatusHas(name, "is spread across 0 cluster") verifyKubemciStatusHas(name, "is spread across 0 cluster")
}) })
It("single and multi-cluster ingresses should be able to exist together", func() { ginkgo.It("single and multi-cluster ingresses should be able to exist together", func() {
By("Creating a single cluster ingress first") ginkgo.By("Creating a single cluster ingress first")
jig.Class = "" jig.Class = ""
singleIngFilePath := filepath.Join(ingress.GCEIngressManifestPath, "static-ip-2") singleIngFilePath := filepath.Join(ingress.GCEIngressManifestPath, "static-ip-2")
jig.CreateIngress(singleIngFilePath, ns, map[string]string{}, map[string]string{}) jig.CreateIngress(singleIngFilePath, ns, map[string]string{}, map[string]string{})
@ -678,7 +677,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
singleIng := jig.Ingress singleIng := jig.Ingress
// Create the multi-cluster ingress next. // Create the multi-cluster ingress next.
By("Creating a multi-cluster ingress next") ginkgo.By("Creating a multi-cluster ingress next")
jig.Class = ingress.MulticlusterIngressClassValue jig.Class = ingress.MulticlusterIngressClassValue
ingAnnotations := map[string]string{ ingAnnotations := map[string]string{
ingress.IngressStaticIPKey: ipName, ingress.IngressStaticIPKey: ipName,
@ -688,7 +687,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
jig.WaitForIngress(false /*waitForNodePort*/) jig.WaitForIngress(false /*waitForNodePort*/)
mciIngress := jig.Ingress mciIngress := jig.Ingress
By("Deleting the single cluster ingress and verifying that multi-cluster ingress continues to work") ginkgo.By("Deleting the single cluster ingress and verifying that multi-cluster ingress continues to work")
jig.Ingress = singleIng jig.Ingress = singleIng
jig.Class = "" jig.Class = ""
jig.TryDeleteIngress() jig.TryDeleteIngress()
@ -696,18 +695,18 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
jig.Class = ingress.MulticlusterIngressClassValue jig.Class = ingress.MulticlusterIngressClassValue
jig.WaitForIngress(false /*waitForNodePort*/) jig.WaitForIngress(false /*waitForNodePort*/)
By("Cleanup: Deleting the multi-cluster ingress") ginkgo.By("Cleanup: Deleting the multi-cluster ingress")
jig.TryDeleteIngress() jig.TryDeleteIngress()
}) })
}) })
// Time: borderline 5m, slow by design // Time: borderline 5m, slow by design
Describe("[Slow] Nginx", func() { ginkgo.Describe("[Slow] Nginx", func() {
var nginxController *ingress.NginxIngressController var nginxController *ingress.NginxIngressController
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
By("Initializing nginx controller") ginkgo.By("Initializing nginx controller")
jig.Class = "nginx" jig.Class = "nginx"
nginxController = &ingress.NginxIngressController{Ns: ns, Client: jig.Client} nginxController = &ingress.NginxIngressController{Ns: ns, Client: jig.Client}
@ -723,30 +722,30 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
nginxController.Init() nginxController.Init()
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
if framework.ProviderIs("gce", "gke") { if framework.ProviderIs("gce", "gke") {
framework.ExpectNoError(gce.GcloudComputeResourceDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID)) framework.ExpectNoError(gce.GcloudComputeResourceDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID))
} }
if CurrentGinkgoTestDescription().Failed { if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.DescribeIng(ns) framework.DescribeIng(ns)
} }
if jig.Ingress == nil { if jig.Ingress == nil {
By("No ingress created, no cleanup necessary") ginkgo.By("No ingress created, no cleanup necessary")
return return
} }
By("Deleting ingress") ginkgo.By("Deleting ingress")
jig.TryDeleteIngress() jig.TryDeleteIngress()
}) })
It("should conform to Ingress spec", func() { ginkgo.It("should conform to Ingress spec", func() {
// Poll more frequently to reduce e2e completion time. // Poll more frequently to reduce e2e completion time.
// This test runs in presubmit. // This test runs in presubmit.
jig.PollInterval = 5 * time.Second jig.PollInterval = 5 * time.Second
conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{}) conformanceTests = ingress.CreateIngressComformanceTests(jig, ns, map[string]string{})
for _, t := range conformanceTests { for _, t := range conformanceTests {
By(t.EntryLog) ginkgo.By(t.EntryLog)
t.Execute() t.Execute()
By(t.ExitLog) ginkgo.By(t.ExitLog)
jig.WaitForIngress(false) jig.WaitForIngress(false)
} }
}) })
@ -766,28 +765,28 @@ func verifyKubemciStatusHas(name, expectedSubStr string) {
func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) { func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) {
preSharedCertName := "test-pre-shared-cert" preSharedCertName := "test-pre-shared-cert"
By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName)) ginkgo.By(fmt.Sprintf("Creating ssl certificate %q on GCE", preSharedCertName))
testHostname := "test.ingress.com" testHostname := "test.ingress.com"
cert, key, err := ingress.GenerateRSACerts(testHostname, true) cert, key, err := ingress.GenerateRSACerts(testHostname, true)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
gceCloud, err := gce.GetGCECloud() gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer func() { defer func() {
// We would not be able to delete the cert until ingress controller // We would not be able to delete the cert until ingress controller
// cleans up the target proxy that references it. // cleans up the target proxy that references it.
By("Deleting ingress before deleting ssl certificate") ginkgo.By("Deleting ingress before deleting ssl certificate")
if jig.Ingress != nil { if jig.Ingress != nil {
jig.TryDeleteIngress() jig.TryDeleteIngress()
} }
By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName)) ginkgo.By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName))
err := wait.Poll(framework.LoadBalancerPollInterval, framework.LoadBalancerCleanupTimeout, func() (bool, error) { err := wait.Poll(framework.LoadBalancerPollInterval, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) { if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) {
e2elog.Logf("Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err) e2elog.Logf("ginkgo.Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
return false, nil return false, nil
} }
return true, nil return true, nil
}) })
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to delete ssl certificate %q: %v", preSharedCertName, err)) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("ginkgo.Failed to delete ssl certificate %q: %v", preSharedCertName, err))
}() }()
_, err = gceCloud.CreateSslCertificate(&compute.SslCertificate{ _, err = gceCloud.CreateSslCertificate(&compute.SslCertificate{
Name: preSharedCertName, Name: preSharedCertName,
@ -795,9 +794,9 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat
PrivateKey: string(key), PrivateKey: string(key),
Description: "pre-shared cert for ingress testing", Description: "pre-shared cert for ingress testing",
}) })
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create ssl certificate %q: %v", preSharedCertName, err)) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("ginkgo.Failed to create ssl certificate %q: %v", preSharedCertName, err))
By("Creating an ingress referencing the pre-shared certificate") ginkgo.By("Creating an ingress referencing the pre-shared certificate")
// Create an ingress referencing this cert using pre-shared-cert annotation. // Create an ingress referencing this cert using pre-shared-cert annotation.
ingAnnotations := map[string]string{ ingAnnotations := map[string]string{
ingress.IngressPreSharedCertKey: preSharedCertName, ingress.IngressPreSharedCertKey: preSharedCertName,
@ -810,9 +809,9 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat
} }
jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{}) jig.CreateIngress(filepath.Join(ingress.IngressManifestPath, "pre-shared-cert"), f.Namespace.Name, ingAnnotations, map[string]string{})
By("Test that ingress works with the pre-shared certificate") ginkgo.By("Test that ingress works with the pre-shared certificate")
err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert) err = jig.WaitForIngressWithCert(true, []string{testHostname}, cert)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err)) gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Unexpected error while waiting for ingress: %v", err))
} }
func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig, ipName, ip string) { func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig, ipName, ip string) {
@ -821,30 +820,30 @@ func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *ingress.TestJig,
ingress.IngressAllowHTTPKey: "false", ingress.IngressAllowHTTPKey: "false",
}, map[string]string{}) }, map[string]string{})
By("waiting for Ingress to come up with ip: " + ip) ginkgo.By("waiting for Ingress to come up with ip: " + ip)
httpClient := ingress.BuildInsecureClient(ingress.IngressReqTimeout) httpClient := ingress.BuildInsecureClient(ingress.IngressReqTimeout)
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false)) framework.ExpectNoError(framework.PollURL(fmt.Sprintf("https://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false))
By("should reject HTTP traffic") ginkgo.By("should reject HTTP traffic")
framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true)) framework.ExpectNoError(framework.PollURL(fmt.Sprintf("http://%s/", ip), "", framework.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true))
} }
func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) { func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJig, staticIPName string) {
By("Creating a set of ingress, service and deployment that have backside re-encryption configured") ginkgo.By("Creating a set of ingress, service and deployment that have backside re-encryption configured")
deployCreated, svcCreated, ingCreated, err := jig.SetUpBacksideHTTPSIngress(f.ClientSet, f.Namespace.Name, staticIPName) deployCreated, svcCreated, ingCreated, err := jig.SetUpBacksideHTTPSIngress(f.ClientSet, f.Namespace.Name, staticIPName)
defer func() { defer func() {
By("Cleaning up re-encryption ingress, service and deployment") ginkgo.By("Cleaning up re-encryption ingress, service and deployment")
if errs := jig.DeleteTestResource(f.ClientSet, deployCreated, svcCreated, ingCreated); len(errs) > 0 { if errs := jig.DeleteTestResource(f.ClientSet, deployCreated, svcCreated, ingCreated); len(errs) > 0 {
framework.Failf("Failed to cleanup re-encryption ingress: %v", errs) framework.Failf("ginkgo.Failed to cleanup re-encryption ingress: %v", errs)
} }
}() }()
Expect(err).NotTo(HaveOccurred(), "Failed to create re-encryption ingress") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "ginkgo.Failed to create re-encryption ingress")
By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name)) ginkgo.By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name))
ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, framework.LoadBalancerPollTimeout) ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, framework.LoadBalancerPollTimeout)
Expect(err).NotTo(HaveOccurred(), "Failed to wait for ingress IP") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "ginkgo.Failed to wait for ingress IP")
By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP)) ginkgo.By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP))
timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout} timeoutClient := &http.Client{Timeout: ingress.IngressReqTimeout}
err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) { err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) {
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "") resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
@ -858,7 +857,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ
e2elog.Logf("Poll succeeded, request was served by HTTPS") e2elog.Logf("Poll succeeded, request was served by HTTPS")
return true, nil return true, nil
}) })
Expect(err).NotTo(HaveOccurred(), "Failed to verify backside re-encryption ingress") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "ginkgo.Failed to verify backside re-encryption ingress")
} }
func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) { func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceController *gce.IngressController, ns, name string, negs int) {
@ -872,7 +871,7 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
if negs == 0 { if negs == 0 {
err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)) err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false))
if err != nil { if err != nil {
e2elog.Logf("Failed to validate IG backend service: %v", err) e2elog.Logf("ginkgo.Failed to validate IG backend service: %v", err)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -898,10 +897,10 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
} }
gceCloud, err := gce.GetGCECloud() gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
for _, neg := range status.NetworkEndpointGroups { for _, neg := range status.NetworkEndpointGroups {
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false) networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
if len(networkEndpoints) != 1 { if len(networkEndpoints) != 1 {
e2elog.Logf("Expect NEG %s to exist, but got %d", neg, len(networkEndpoints)) e2elog.Logf("Expect NEG %s to exist, but got %d", neg, len(networkEndpoints))
return false, nil return false, nil
@ -910,11 +909,11 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
err = gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)) err = gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
if err != nil { if err != nil {
e2elog.Logf("Failed to validate NEG backend service: %v", err) e2elog.Logf("ginkgo.Failed to validate NEG backend service: %v", err)
return false, nil return false, nil
} }
return true, nil return true, nil
}); err != nil { }); err != nil {
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
} }
} }

View File

@ -20,26 +20,26 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/network/scale" "k8s.io/kubernetes/test/e2e/network/scale"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() { var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() {
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
var ( var (
ns string ns string
) )
f := framework.NewDefaultFramework("ingress-scale") f := framework.NewDefaultFramework("ingress-scale")
BeforeEach(func() { ginkgo.BeforeEach(func() {
ns = f.Namespace.Name ns = f.Namespace.Name
}) })
Describe("GCE [Slow] [Serial] [Feature:IngressScale]", func() { ginkgo.Describe("GCE [Slow] [Serial] [Feature:IngressScale]", func() {
var ( var (
scaleFramework *scale.IngressScaleFramework scaleFramework *scale.IngressScaleFramework
) )
BeforeEach(func() { ginkgo.BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke") framework.SkipUnlessProviderIs("gce", "gke")
scaleFramework = scale.NewIngressScaleFramework(f.ClientSet, ns, framework.TestContext.CloudConfig) scaleFramework = scale.NewIngressScaleFramework(f.ClientSet, ns, framework.TestContext.CloudConfig)
@ -48,13 +48,13 @@ var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() {
} }
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
if errs := scaleFramework.CleanupScaleTest(); len(errs) != 0 { if errs := scaleFramework.CleanupScaleTest(); len(errs) != 0 {
framework.Failf("Unexpected error while cleaning up ingress scale test: %v", errs) framework.Failf("Unexpected error while cleaning up ingress scale test: %v", errs)
} }
}) })
It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() { ginkgo.It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() {
if errs := scaleFramework.RunScaleTest(); len(errs) != 0 { if errs := scaleFramework.RunScaleTest(); len(errs) != 0 {
framework.Failf("Unexpected error while running ingress scale test: %v", errs) framework.Failf("Unexpected error while running ingress scale test: %v", errs)
} }

View File

@ -33,8 +33,8 @@ import (
"k8s.io/kubernetes/test/images/net/nat" "k8s.io/kubernetes/test/images/net/nat"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
var kubeProxyE2eImage = imageutils.GetE2EImage(imageutils.Net) var kubeProxyE2eImage = imageutils.GetE2EImage(imageutils.Net)
@ -49,7 +49,7 @@ var _ = SIGDescribe("Network", func() {
fr := framework.NewDefaultFramework("network") fr := framework.NewDefaultFramework("network")
It("should set TCP CLOSE_WAIT timeout", func() { ginkgo.It("should set TCP CLOSE_WAIT timeout", func() {
nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet)
ips := framework.CollectAddresses(nodes, v1.NodeInternalIP) ips := framework.CollectAddresses(nodes, v1.NodeInternalIP)
@ -145,21 +145,21 @@ var _ = SIGDescribe("Network", func() {
}, },
} }
By(fmt.Sprintf( ginkgo.By(fmt.Sprintf(
"Launching a server daemon on node %v (node ip: %v, image: %v)", "Launching a server daemon on node %v (node ip: %v, image: %v)",
serverNodeInfo.name, serverNodeInfo.name,
serverNodeInfo.nodeIP, serverNodeInfo.nodeIP,
kubeProxyE2eImage)) kubeProxyE2eImage))
fr.PodClient().CreateSync(serverPodSpec) fr.PodClient().CreateSync(serverPodSpec)
By(fmt.Sprintf( ginkgo.By(fmt.Sprintf(
"Launching a client daemon on node %v (node ip: %v, image: %v)", "Launching a client daemon on node %v (node ip: %v, image: %v)",
clientNodeInfo.name, clientNodeInfo.name,
clientNodeInfo.nodeIP, clientNodeInfo.nodeIP,
kubeProxyE2eImage)) kubeProxyE2eImage))
fr.PodClient().CreateSync(clientPodSpec) fr.PodClient().CreateSync(clientPodSpec)
By("Make client connect") ginkgo.By("Make client connect")
options := nat.CloseWaitClientOptions{ options := nat.CloseWaitClientOptions{
RemoteAddr: fmt.Sprintf("%v:%v", RemoteAddr: fmt.Sprintf("%v:%v",
@ -179,7 +179,7 @@ var _ = SIGDescribe("Network", func() {
<-time.After(time.Duration(1) * time.Second) <-time.After(time.Duration(1) * time.Second)
By("Checking /proc/net/nf_conntrack for the timeout") ginkgo.By("Checking /proc/net/nf_conntrack for the timeout")
// If test flakes occur here, then this check should be performed // If test flakes occur here, then this check should be performed
// in a loop as there may be a race with the client connecting. // in a loop as there may be a race with the client connecting.
e2essh.IssueSSHCommandWithResult( e2essh.IssueSSHCommandWithResult(
@ -214,8 +214,8 @@ var _ = SIGDescribe("Network", func() {
e2elog.Logf("conntrack entry timeout was: %v, expected: %v", e2elog.Logf("conntrack entry timeout was: %v, expected: %v",
timeoutSeconds, expectedTimeoutSeconds) timeoutSeconds, expectedTimeoutSeconds)
Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should( gomega.Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should(
BeNumerically("<", (epsilonSeconds))) gomega.BeNumerically("<", (epsilonSeconds)))
}) })
// Regression test for #74839, where: // Regression test for #74839, where:
@ -223,7 +223,7 @@ var _ = SIGDescribe("Network", func() {
// a problem where spurious retransmits in a long-running TCP connection to a service // a problem where spurious retransmits in a long-running TCP connection to a service
// IP could result in the connection being closed with the error "Connection reset by // IP could result in the connection being closed with the error "Connection reset by
// peer" // peer"
It("should resolve connrection reset issue #74839 [Slow]", func() { ginkgo.It("should resolve connrection reset issue #74839 [Slow]", func() {
serverLabel := map[string]string{ serverLabel := map[string]string{
"app": "boom-server", "app": "boom-server",
} }
@ -265,7 +265,7 @@ var _ = SIGDescribe("Network", func() {
_, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(serverPod) _, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(serverPod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Server pod created") ginkgo.By("Server pod created")
svc := &v1.Service{ svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -284,7 +284,7 @@ var _ = SIGDescribe("Network", func() {
_, err = fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(svc) _, err = fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(svc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Server service created") ginkgo.By("Server service created")
pod := &v1.Pod{ pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -319,13 +319,13 @@ var _ = SIGDescribe("Network", func() {
_, err = fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(pod) _, err = fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("Client pod created") ginkgo.By("Client pod created")
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
time.Sleep(3 * time.Second) time.Sleep(3 * time.Second)
resultPod, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Get(serverPod.Name, metav1.GetOptions{}) resultPod, err := fr.ClientSet.CoreV1().Pods(fr.Namespace.Name).Get(serverPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(resultPod.Status.ContainerStatuses[0].LastTerminationState.Terminated).Should(BeNil()) gomega.Expect(resultPod.Status.ContainerStatuses[0].LastTerminationState.Terminated).Should(gomega.BeNil())
} }
}) })
}) })

View File

@ -27,8 +27,8 @@ import (
"fmt" "fmt"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
/* /*
@ -45,27 +45,27 @@ var _ = SIGDescribe("NetworkPolicy", func() {
var podServer *v1.Pod var podServer *v1.Pod
f := framework.NewDefaultFramework("network-policy") f := framework.NewDefaultFramework("network-policy")
Context("NetworkPolicy between server and client", func() { ginkgo.Context("NetworkPolicy between server and client", func() {
BeforeEach(func() { ginkgo.BeforeEach(func() {
By("Creating a simple server that serves on port 80 and 81.") ginkgo.By("Creating a simple server that serves on port 80 and 81.")
podServer, service = createServerPodAndService(f, f.Namespace, "server", []int{80, 81}) podServer, service = createServerPodAndService(f, f.Namespace, "server", []int{80, 81})
By("Waiting for pod ready", func() { ginkgo.By("Waiting for pod ready", func() {
err := f.WaitForPodReady(podServer.Name) err := f.WaitForPodReady(podServer.Name)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
}) })
// Create pods, which should be able to communicate with the server on port 80 and 81. // Create pods, which should be able to communicate with the server on port 80 and 81.
By("Testing pods can connect to both ports when no policy is present.") ginkgo.By("Testing pods can connect to both ports when no policy is present.")
testCanConnect(f, f.Namespace, "client-can-connect-80", service, 80) testCanConnect(f, f.Namespace, "client-can-connect-80", service, 80)
testCanConnect(f, f.Namespace, "client-can-connect-81", service, 81) testCanConnect(f, f.Namespace, "client-can-connect-81", service, 81)
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
cleanupServerPodAndService(f, podServer, service) cleanupServerPodAndService(f, podServer, service)
}) })
It("should support a 'default-deny' policy [Feature:NetworkPolicy]", func() { ginkgo.It("should support a 'default-deny' policy [Feature:NetworkPolicy]", func() {
policy := &networkingv1.NetworkPolicy{ policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "deny-all", Name: "deny-all",
@ -77,7 +77,7 @@ var _ = SIGDescribe("NetworkPolicy", func() {
} }
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer cleanupNetworkPolicy(f, policy) defer cleanupNetworkPolicy(f, policy)
// Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server, // Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server,
@ -85,8 +85,8 @@ var _ = SIGDescribe("NetworkPolicy", func() {
testCannotConnect(f, f.Namespace, "client-cannot-connect", service, 80) testCannotConnect(f, f.Namespace, "client-cannot-connect", service, 80)
}) })
It("should enforce policy based on PodSelector [Feature:NetworkPolicy]", func() { ginkgo.It("should enforce policy based on PodSelector [Feature:NetworkPolicy]", func() {
By("Creating a network policy for the server which allows traffic from the pod 'client-a'.") ginkgo.By("Creating a network policy for the server which allows traffic from the pod 'client-a'.")
policy := &networkingv1.NetworkPolicy{ policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "allow-client-a-via-pod-selector", Name: "allow-client-a-via-pod-selector",
@ -112,18 +112,18 @@ var _ = SIGDescribe("NetworkPolicy", func() {
} }
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer cleanupNetworkPolicy(f, policy) defer cleanupNetworkPolicy(f, policy)
By("Creating client-a which should be able to contact the server.", func() { ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-a", service, 80)
}) })
By("Creating client-b which should not be able to contact the server.", func() { ginkgo.By("Creating client-b which should not be able to contact the server.", func() {
testCannotConnect(f, f.Namespace, "client-b", service, 80) testCannotConnect(f, f.Namespace, "client-b", service, 80)
}) })
}) })
It("should enforce policy based on NamespaceSelector [Feature:NetworkPolicy]", func() { ginkgo.It("should enforce policy based on NamespaceSelector [Feature:NetworkPolicy]", func() {
nsA := f.Namespace nsA := f.Namespace
nsBName := f.BaseName + "-b" nsBName := f.BaseName + "-b"
// The CreateNamespace helper uses the input name as a Name Generator, so the namespace itself // The CreateNamespace helper uses the input name as a Name Generator, so the namespace itself
@ -132,15 +132,15 @@ var _ = SIGDescribe("NetworkPolicy", func() {
nsB, err := f.CreateNamespace(nsBName, map[string]string{ nsB, err := f.CreateNamespace(nsBName, map[string]string{
"ns-name": nsBName, "ns-name": nsBName,
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Create Server with Service in NS-B // Create Server with Service in NS-B
e2elog.Logf("Waiting for server to come up.") e2elog.Logf("Waiting for server to come up.")
err = framework.WaitForPodRunningInNamespace(f.ClientSet, podServer) err = framework.WaitForPodRunningInNamespace(f.ClientSet, podServer)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Create Policy for that service that allows traffic only via namespace B // Create Policy for that service that allows traffic only via namespace B
By("Creating a network policy for the server which allows traffic from namespace-b.") ginkgo.By("Creating a network policy for the server which allows traffic from namespace-b.")
policy := &networkingv1.NetworkPolicy{ policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "allow-ns-b-via-namespace-selector", Name: "allow-ns-b-via-namespace-selector",
@ -165,15 +165,15 @@ var _ = SIGDescribe("NetworkPolicy", func() {
}, },
} }
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(policy) policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(policy)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer cleanupNetworkPolicy(f, policy) defer cleanupNetworkPolicy(f, policy)
testCannotConnect(f, nsA, "client-a", service, 80) testCannotConnect(f, nsA, "client-a", service, 80)
testCanConnect(f, nsB, "client-b", service, 80) testCanConnect(f, nsB, "client-b", service, 80)
}) })
It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() { ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() {
By("Creating a network policy for the Service which allows traffic only to one port.") ginkgo.By("Creating a network policy for the Service which allows traffic only to one port.")
policy := &networkingv1.NetworkPolicy{ policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "allow-ingress-on-port-81", Name: "allow-ingress-on-port-81",
@ -194,16 +194,16 @@ var _ = SIGDescribe("NetworkPolicy", func() {
}, },
} }
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer cleanupNetworkPolicy(f, policy) defer cleanupNetworkPolicy(f, policy)
By("Testing pods can connect only to the port allowed by the policy.") ginkgo.By("Testing pods can connect only to the port allowed by the policy.")
testCannotConnect(f, f.Namespace, "client-a", service, 80) testCannotConnect(f, f.Namespace, "client-a", service, 80)
testCanConnect(f, f.Namespace, "client-b", service, 81) testCanConnect(f, f.Namespace, "client-b", service, 81)
}) })
It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() { ginkgo.It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() {
By("Creating a network policy for the Service which allows traffic only to one port.") ginkgo.By("Creating a network policy for the Service which allows traffic only to one port.")
policy := &networkingv1.NetworkPolicy{ policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "allow-ingress-on-port-80", Name: "allow-ingress-on-port-80",
@ -224,10 +224,10 @@ var _ = SIGDescribe("NetworkPolicy", func() {
}, },
} }
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer cleanupNetworkPolicy(f, policy) defer cleanupNetworkPolicy(f, policy)
By("Creating a network policy for the Service which allows traffic only to another port.") ginkgo.By("Creating a network policy for the Service which allows traffic only to another port.")
policy2 := &networkingv1.NetworkPolicy{ policy2 := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "allow-ingress-on-port-81", Name: "allow-ingress-on-port-81",
@ -248,16 +248,16 @@ var _ = SIGDescribe("NetworkPolicy", func() {
}, },
} }
policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy2) policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy2)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer cleanupNetworkPolicy(f, policy2) defer cleanupNetworkPolicy(f, policy2)
By("Testing pods can connect to both ports when both policies are present.") ginkgo.By("Testing pods can connect to both ports when both policies are present.")
testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-a", service, 80)
testCanConnect(f, f.Namespace, "client-b", service, 81) testCanConnect(f, f.Namespace, "client-b", service, 81)
}) })
It("should support allow-all policy [Feature:NetworkPolicy]", func() { ginkgo.It("should support allow-all policy [Feature:NetworkPolicy]", func() {
By("Creating a network policy which allows all traffic.") ginkgo.By("Creating a network policy which allows all traffic.")
policy := &networkingv1.NetworkPolicy{ policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "allow-all", Name: "allow-all",
@ -271,15 +271,15 @@ var _ = SIGDescribe("NetworkPolicy", func() {
}, },
} }
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer cleanupNetworkPolicy(f, policy) defer cleanupNetworkPolicy(f, policy)
By("Testing pods can connect to both ports when an 'allow-all' policy is present.") ginkgo.By("Testing pods can connect to both ports when an 'allow-all' policy is present.")
testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-a", service, 80)
testCanConnect(f, f.Namespace, "client-b", service, 81) testCanConnect(f, f.Namespace, "client-b", service, 81)
}) })
It("should allow ingress access on one named port [Feature:NetworkPolicy]", func() { ginkgo.It("should allow ingress access on one named port [Feature:NetworkPolicy]", func() {
policy := &networkingv1.NetworkPolicy{ policy := &networkingv1.NetworkPolicy{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: "allow-client-a-via-named-port-ingress-rule", Name: "allow-client-a-via-named-port-ingress-rule",
@ -301,18 +301,18 @@ var _ = SIGDescribe("NetworkPolicy", func() {
} }
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer cleanupNetworkPolicy(f, policy) defer cleanupNetworkPolicy(f, policy)
By("Creating client-a which should be able to contact the server.", func() { ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, "client-a", service, 80) testCanConnect(f, f.Namespace, "client-a", service, 80)
}) })
By("Creating client-b which should not be able to contact the server on port 81.", func() { ginkgo.By("Creating client-b which should not be able to contact the server on port 81.", func() {
testCannotConnect(f, f.Namespace, "client-b", service, 81) testCannotConnect(f, f.Namespace, "client-b", service, 81)
}) })
}) })
It("should allow egress access on one named port [Feature:NetworkPolicy]", func() { ginkgo.It("should allow egress access on one named port [Feature:NetworkPolicy]", func() {
clientPodName := "client-a" clientPodName := "client-a"
protocolUDP := v1.ProtocolUDP protocolUDP := v1.ProtocolUDP
policy := &networkingv1.NetworkPolicy{ policy := &networkingv1.NetworkPolicy{
@ -343,13 +343,13 @@ var _ = SIGDescribe("NetworkPolicy", func() {
} }
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy) policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
defer cleanupNetworkPolicy(f, policy) defer cleanupNetworkPolicy(f, policy)
By("Creating client-a which should be able to contact the server.", func() { ginkgo.By("Creating client-a which should be able to contact the server.", func() {
testCanConnect(f, f.Namespace, clientPodName, service, 80) testCanConnect(f, f.Namespace, clientPodName, service, 80)
}) })
By("Creating client-a which should not be able to contact the server on port 81.", func() { ginkgo.By("Creating client-a which should not be able to contact the server on port 81.", func() {
testCannotConnect(f, f.Namespace, clientPodName, service, 81) testCannotConnect(f, f.Namespace, clientPodName, service, 81)
}) })
}) })
@ -357,10 +357,10 @@ var _ = SIGDescribe("NetworkPolicy", func() {
}) })
func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) { func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) {
By(fmt.Sprintf("Creating client pod %s that should successfully connect to %s.", podName, service.Name)) ginkgo.By(fmt.Sprintf("Creating client pod %s that should successfully connect to %s.", podName, service.Name))
podClient := createNetworkClientPod(f, ns, podName, service, targetPort) podClient := createNetworkClientPod(f, ns, podName, service, targetPort)
defer func() { defer func() {
By(fmt.Sprintf("Cleaning up the pod %s", podName)) ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podName))
if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil { if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil {
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
} }
@ -368,7 +368,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se
e2elog.Logf("Waiting for %s to complete.", podClient.Name) e2elog.Logf("Waiting for %s to complete.", podClient.Name)
err := framework.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podClient.Name, ns.Name) err := framework.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podClient.Name, ns.Name)
Expect(err).NotTo(HaveOccurred(), "Pod did not finish as expected.") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Pod did not finish as expected.")
e2elog.Logf("Waiting for %s to complete.", podClient.Name) e2elog.Logf("Waiting for %s to complete.", podClient.Name)
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name) err = framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
@ -404,10 +404,10 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se
} }
func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) { func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) {
By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", podName, service.Name)) ginkgo.By(fmt.Sprintf("Creating client pod %s that should not be able to connect to %s.", podName, service.Name))
podClient := createNetworkClientPod(f, ns, podName, service, targetPort) podClient := createNetworkClientPod(f, ns, podName, service, targetPort)
defer func() { defer func() {
By(fmt.Sprintf("Cleaning up the pod %s", podName)) ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podName))
if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil { if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil {
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
} }
@ -495,7 +495,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
}) })
} }
By(fmt.Sprintf("Creating a server pod %s in namespace %s", podName, namespace.Name)) ginkgo.By(fmt.Sprintf("Creating a server pod %s in namespace %s", podName, namespace.Name))
pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(&v1.Pod{ pod, err := f.ClientSet.CoreV1().Pods(namespace.Name).Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podName, Name: podName,
@ -508,11 +508,11 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
RestartPolicy: v1.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2elog.Logf("Created pod %v", pod.ObjectMeta.Name) e2elog.Logf("Created pod %v", pod.ObjectMeta.Name)
svcName := fmt.Sprintf("svc-%s", podName) svcName := fmt.Sprintf("svc-%s", podName)
By(fmt.Sprintf("Creating a service %s for pod %s in namespace %s", svcName, podName, namespace.Name)) ginkgo.By(fmt.Sprintf("Creating a service %s for pod %s in namespace %s", svcName, podName, namespace.Name))
svc, err := f.ClientSet.CoreV1().Services(namespace.Name).Create(&v1.Service{ svc, err := f.ClientSet.CoreV1().Services(namespace.Name).Create(&v1.Service{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: svcName, Name: svcName,
@ -524,18 +524,18 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
}, },
}, },
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2elog.Logf("Created service %s", svc.Name) e2elog.Logf("Created service %s", svc.Name)
return pod, svc return pod, svc
} }
func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1.Service) { func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1.Service) {
By("Cleaning up the server.") ginkgo.By("Cleaning up the server.")
if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
framework.Failf("unable to cleanup pod %v: %v", pod.Name, err) framework.Failf("unable to cleanup pod %v: %v", pod.Name, err)
} }
By("Cleaning up the server's service.") ginkgo.By("Cleaning up the server's service.")
if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(service.Name, nil); err != nil { if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(service.Name, nil); err != nil {
framework.Failf("unable to cleanup svc %v: %v", service.Name, err) framework.Failf("unable to cleanup svc %v: %v", service.Name, err)
} }
@ -569,13 +569,13 @@ func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, pod
}, },
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
return pod return pod
} }
func cleanupNetworkPolicy(f *framework.Framework, policy *networkingv1.NetworkPolicy) { func cleanupNetworkPolicy(f *framework.Framework, policy *networkingv1.NetworkPolicy) {
By("Cleaning up the policy.") ginkgo.By("Cleaning up the policy.")
if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(policy.Name, nil); err != nil { if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(policy.Name, nil); err != nil {
framework.Failf("unable to cleanup policy %v: %v", policy.Name, err) framework.Failf("unable to cleanup policy %v: %v", policy.Name, err)
} }

View File

@ -33,8 +33,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/framework/providers/gce"
gcecloud "k8s.io/legacy-cloud-providers/gce" gcecloud "k8s.io/legacy-cloud-providers/gce"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() { var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
@ -43,14 +43,14 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
var cs clientset.Interface var cs clientset.Interface
serviceLBNames := []string{} serviceLBNames := []string{}
BeforeEach(func() { ginkgo.BeforeEach(func() {
// This test suite requires the GCE environment. // This test suite requires the GCE environment.
framework.SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
cs = f.ClientSet cs = f.ClientSet
}) })
AfterEach(func() { ginkgo.AfterEach(func() {
if CurrentGinkgoTestDescription().Failed { if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.DescribeSvc(f.Namespace.Name) framework.DescribeSvc(f.Namespace.Name)
} }
for _, lb := range serviceLBNames { for _, lb := range serviceLBNames {
@ -60,7 +60,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
//reset serviceLBNames //reset serviceLBNames
serviceLBNames = []string{} serviceLBNames = []string{}
}) })
It("should be able to create and tear down a standard-tier load balancer [Slow]", func() { ginkgo.It("should be able to create and tear down a standard-tier load balancer [Slow]", func() {
lagTimeout := framework.LoadBalancerLagTimeoutDefault lagTimeout := framework.LoadBalancerLagTimeoutDefault
createTimeout := framework.GetServiceLoadBalancerCreationTimeout(cs) createTimeout := framework.GetServiceLoadBalancerCreationTimeout(cs)
@ -68,19 +68,19 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
ns := f.Namespace.Name ns := f.Namespace.Name
jig := framework.NewServiceTestJig(cs, svcName) jig := framework.NewServiceTestJig(cs, svcName)
By("creating a pod to be part of the service " + svcName) ginkgo.By("creating a pod to be part of the service " + svcName)
jig.RunOrFail(ns, nil) jig.RunOrFail(ns, nil)
// Test 1: create a standard tiered LB for the Service. // Test 1: create a standard tiered LB for the Service.
By("creating a Service of type LoadBalancer using the standard network tier") ginkgo.By("creating a Service of type LoadBalancer using the standard network tier")
svc := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) { svc := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeLoadBalancer svc.Spec.Type = v1.ServiceTypeLoadBalancer
setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard)) setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard))
}) })
// Verify that service has been updated properly. // Verify that service has been updated properly.
svcTier, err := gcecloud.GetServiceNetworkTier(svc) svcTier, err := gcecloud.GetServiceNetworkTier(svc)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(svcTier).To(Equal(cloud.NetworkTierStandard)) gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierStandard))
// Record the LB name for test cleanup. // Record the LB name for test cleanup.
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
@ -88,26 +88,26 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
ingressIP := waitAndVerifyLBWithTier(jig, ns, svcName, "", createTimeout, lagTimeout) ingressIP := waitAndVerifyLBWithTier(jig, ns, svcName, "", createTimeout, lagTimeout)
// Test 2: re-create a LB of a different tier for the updated Service. // Test 2: re-create a LB of a different tier for the updated Service.
By("updating the Service to use the premium (default) tier") ginkgo.By("updating the Service to use the premium (default) tier")
svc = jig.UpdateServiceOrFail(ns, svcName, func(svc *v1.Service) { svc = jig.UpdateServiceOrFail(ns, svcName, func(svc *v1.Service) {
clearNetworkTier(svc) clearNetworkTier(svc)
}) })
// Verify that service has been updated properly. // Verify that service has been updated properly.
svcTier, err = gcecloud.GetServiceNetworkTier(svc) svcTier, err = gcecloud.GetServiceNetworkTier(svc)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(svcTier).To(Equal(cloud.NetworkTierDefault)) gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierDefault))
// Wait until the ingress IP changes. Each tier has its own pool of // Wait until the ingress IP changes. Each tier has its own pool of
// IPs, so changing tiers implies changing IPs. // IPs, so changing tiers implies changing IPs.
ingressIP = waitAndVerifyLBWithTier(jig, ns, svcName, ingressIP, createTimeout, lagTimeout) ingressIP = waitAndVerifyLBWithTier(jig, ns, svcName, ingressIP, createTimeout, lagTimeout)
// Test 3: create a standard-tierd LB with a user-requested IP. // Test 3: create a standard-tierd LB with a user-requested IP.
By("reserving a static IP for the load balancer") ginkgo.By("reserving a static IP for the load balancer")
requestedAddrName := fmt.Sprintf("e2e-ext-lb-net-tier-%s", framework.RunID) requestedAddrName := fmt.Sprintf("e2e-ext-lb-net-tier-%s", framework.RunID)
gceCloud, err := gce.GetGCECloud() gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
requestedIP, err := reserveAlphaRegionalAddress(gceCloud, requestedAddrName, cloud.NetworkTierStandard) requestedIP, err := reserveAlphaRegionalAddress(gceCloud, requestedAddrName, cloud.NetworkTierStandard)
Expect(err).NotTo(HaveOccurred(), "failed to reserve a STANDARD tiered address") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to reserve a STANDARD tiered address")
defer func() { defer func() {
if requestedAddrName != "" { if requestedAddrName != "" {
// Release GCE static address - this is not kube-managed and will not be automatically released. // Release GCE static address - this is not kube-managed and will not be automatically released.
@ -116,19 +116,19 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
} }
} }
}() }()
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
e2elog.Logf("Allocated static IP to be used by the load balancer: %q", requestedIP) e2elog.Logf("Allocated static IP to be used by the load balancer: %q", requestedIP)
By("updating the Service to use the standard tier with a requested IP") ginkgo.By("updating the Service to use the standard tier with a requested IP")
svc = jig.UpdateServiceOrFail(ns, svc.Name, func(svc *v1.Service) { svc = jig.UpdateServiceOrFail(ns, svc.Name, func(svc *v1.Service) {
svc.Spec.LoadBalancerIP = requestedIP svc.Spec.LoadBalancerIP = requestedIP
setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard)) setNetworkTier(svc, string(gcecloud.NetworkTierAnnotationStandard))
}) })
// Verify that service has been updated properly. // Verify that service has been updated properly.
Expect(svc.Spec.LoadBalancerIP).To(Equal(requestedIP)) gomega.Expect(svc.Spec.LoadBalancerIP).To(gomega.Equal(requestedIP))
svcTier, err = gcecloud.GetServiceNetworkTier(svc) svcTier, err = gcecloud.GetServiceNetworkTier(svc)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(svcTier).To(Equal(cloud.NetworkTierStandard)) gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierStandard))
// Wait until the ingress IP changes and verifies the LB. // Wait until the ingress IP changes and verifies the LB.
ingressIP = waitAndVerifyLBWithTier(jig, ns, svcName, ingressIP, createTimeout, lagTimeout) ingressIP = waitAndVerifyLBWithTier(jig, ns, svcName, ingressIP, createTimeout, lagTimeout)
@ -150,10 +150,10 @@ func waitAndVerifyLBWithTier(jig *framework.ServiceTestJig, ns, svcName, existin
lbIngress := &svc.Status.LoadBalancer.Ingress[0] lbIngress := &svc.Status.LoadBalancer.Ingress[0]
ingressIP := framework.GetIngressPoint(lbIngress) ingressIP := framework.GetIngressPoint(lbIngress)
By("running sanity and reachability checks") ginkgo.By("running sanity and reachability checks")
if svc.Spec.LoadBalancerIP != "" { if svc.Spec.LoadBalancerIP != "" {
// Verify that the new ingress IP is the requested IP if it's set. // Verify that the new ingress IP is the requested IP if it's set.
Expect(ingressIP).To(Equal(svc.Spec.LoadBalancerIP)) gomega.Expect(ingressIP).To(gomega.Equal(svc.Spec.LoadBalancerIP))
} }
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
// If the IP has been used by previous test, sometimes we get the lingering // If the IP has been used by previous test, sometimes we get the lingering
@ -163,10 +163,10 @@ func waitAndVerifyLBWithTier(jig *framework.ServiceTestJig, ns, svcName, existin
// Verify the network tier matches the desired. // Verify the network tier matches the desired.
svcNetTier, err := gcecloud.GetServiceNetworkTier(svc) svcNetTier, err := gcecloud.GetServiceNetworkTier(svc)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
netTier, err := getLBNetworkTierByIP(ingressIP) netTier, err := getLBNetworkTierByIP(ingressIP)
Expect(err).NotTo(HaveOccurred(), "failed to get the network tier of the load balancer") gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get the network tier of the load balancer")
Expect(netTier).To(Equal(svcNetTier)) gomega.Expect(netTier).To(gomega.Equal(svcNetTier))
return ingressIP return ingressIP
} }

View File

@ -24,18 +24,18 @@ import (
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
var _ = SIGDescribe("Networking", func() { var _ = SIGDescribe("Networking", func() {
var svcname = "nettest" var svcname = "nettest"
f := framework.NewDefaultFramework(svcname) f := framework.NewDefaultFramework(svcname)
BeforeEach(func() { ginkgo.BeforeEach(func() {
// Assert basic external connectivity. // Assert basic external connectivity.
// Since this is not really a test of kubernetes in any way, we // Since this is not really a test of kubernetes in any way, we
// leave it as a pre-test assertion, rather than a Ginko test. // leave it as a pre-test assertion, rather than a Ginko test.
By("Executing a successful http request from the external internet") ginkgo.By("Executing a successful http request from the external internet")
resp, err := http.Get("http://google.com") resp, err := http.Get("http://google.com")
if err != nil { if err != nil {
framework.Failf("Unable to connect/talk to the internet: %v", err) framework.Failf("Unable to connect/talk to the internet: %v", err)
@ -45,20 +45,20 @@ var _ = SIGDescribe("Networking", func() {
} }
}) })
It("should provide Internet connection for containers [Feature:Networking-IPv4]", func() { ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv4]", func() {
By("Running container which tries to ping 8.8.8.8") ginkgo.By("Running container which tries to ping 8.8.8.8")
framework.ExpectNoError( framework.ExpectNoError(
framework.CheckConnectivityToHost(f, "", "ping-test", "8.8.8.8", framework.IPv4PingCommand, 30)) framework.CheckConnectivityToHost(f, "", "ping-test", "8.8.8.8", framework.IPv4PingCommand, 30))
}) })
It("should provide Internet connection for containers [Feature:Networking-IPv6][Experimental]", func() { ginkgo.It("should provide Internet connection for containers [Feature:Networking-IPv6][Experimental]", func() {
By("Running container which tries to ping 2001:4860:4860::8888") ginkgo.By("Running container which tries to ping 2001:4860:4860::8888")
framework.ExpectNoError( framework.ExpectNoError(
framework.CheckConnectivityToHost(f, "", "ping-test", "2001:4860:4860::8888", framework.IPv6PingCommand, 30)) framework.CheckConnectivityToHost(f, "", "ping-test", "2001:4860:4860::8888", framework.IPv6PingCommand, 30))
}) })
// First test because it has no dependencies on variables created later on. // First test because it has no dependencies on variables created later on.
It("should provide unchanging, static URL paths for kubernetes api services", func() { ginkgo.It("should provide unchanging, static URL paths for kubernetes api services", func() {
tests := []struct { tests := []struct {
path string path string
}{ }{
@ -74,22 +74,22 @@ var _ = SIGDescribe("Networking", func() {
tests = append(tests, struct{ path string }{path: "/logs"}) tests = append(tests, struct{ path string }{path: "/logs"})
} }
for _, test := range tests { for _, test := range tests {
By(fmt.Sprintf("testing: %s", test.path)) ginkgo.By(fmt.Sprintf("testing: %s", test.path))
data, err := f.ClientSet.CoreV1().RESTClient().Get(). data, err := f.ClientSet.CoreV1().RESTClient().Get().
AbsPath(test.path). AbsPath(test.path).
DoRaw() DoRaw()
if err != nil { if err != nil {
framework.Failf("Failed: %v\nBody: %s", err, string(data)) framework.Failf("ginkgo.Failed: %v\nBody: %s", err, string(data))
} }
} }
}) })
It("should check kube-proxy urls", func() { ginkgo.It("should check kube-proxy urls", func() {
// TODO: this is overkill we just need the host networking pod // TODO: this is overkill we just need the host networking pod
// to hit kube-proxy urls. // to hit kube-proxy urls.
config := framework.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By("checking kube-proxy URLs") ginkgo.By("checking kube-proxy URLs")
config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "200 OK") config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "200 OK")
// Verify /healthz returns the proper content. // Verify /healthz returns the proper content.
config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "lastUpdated") config.GetSelfURL(ports.ProxyHealthzPort, "/healthz", "lastUpdated")
@ -98,116 +98,116 @@ var _ = SIGDescribe("Networking", func() {
}) })
// TODO: Remove [Slow] when this has had enough bake time to prove presubmit worthiness. // TODO: Remove [Slow] when this has had enough bake time to prove presubmit worthiness.
Describe("Granular Checks: Services [Slow]", func() { ginkgo.Describe("Granular Checks: Services [Slow]", func() {
It("should function for pod-Service: http", func() { ginkgo.It("should function for pod-Service: http", func() {
config := framework.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort))
config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeHTTPPort))
config.DialFromTestContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
It("should function for pod-Service: udp", func() { ginkgo.It("should function for pod-Service: udp", func() {
config := framework.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort))
config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (nodeIP)", config.TestContainerPod.Name, config.NodeIP, config.NodeUDPPort))
config.DialFromTestContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
It("should function for node-Service: http", func() { ginkgo.It("should function for node-Service: http", func() {
config := framework.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterHTTPPort))
config.DialFromNode("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort))
config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
It("should function for node-Service: udp", func() { ginkgo.It("should function for node-Service: udp", func() {
config := framework.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (config.clusterIP)", config.NodeIP, config.ClusterIP, framework.ClusterUDPPort))
config.DialFromNode("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort))
config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
It("should function for endpoint-Service: http", func() { ginkgo.It("should function for endpoint-Service: http", func() {
config := framework.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterHTTPPort))
config.DialFromEndpointContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromEndpointContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeHTTPPort))
config.DialFromEndpointContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromEndpointContainer("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
It("should function for endpoint-Service: udp", func() { ginkgo.It("should function for endpoint-Service: udp", func() {
config := framework.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (config.clusterIP)", config.EndpointPods[0].Name, config.ClusterIP, framework.ClusterUDPPort))
config.DialFromEndpointContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromEndpointContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v (endpoint) --> %v:%v (nodeIP)", config.EndpointPods[0].Name, config.NodeIP, config.NodeUDPPort))
config.DialFromEndpointContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromEndpointContainer("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
}) })
It("should update endpoints: http", func() { ginkgo.It("should update endpoints: http", func() {
config := framework.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort))
config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
config.DeleteNetProxyPod() config.DeleteNetProxyPod()
By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterHTTPPort))
config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) config.DialFromTestContainer("http", config.ClusterIP, framework.ClusterHTTPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
}) })
It("should update endpoints: udp", func() { ginkgo.It("should update endpoints: udp", func() {
config := framework.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort))
config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, 0, config.EndpointHostnames())
config.DeleteNetProxyPod() config.DeleteNetProxyPod()
By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v (config.clusterIP)", config.TestContainerPod.Name, config.ClusterIP, framework.ClusterUDPPort))
config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames()) config.DialFromTestContainer("udp", config.ClusterIP, framework.ClusterUDPPort, config.MaxTries, config.MaxTries, config.EndpointHostnames())
}) })
// Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
It("should update nodePort: http [Slow]", func() { ginkgo.It("should update nodePort: http [Slow]", func() {
config := framework.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort))
config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, 0, config.EndpointHostnames())
config.DeleteNodePortService() config.DeleteNodePortService()
By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeHTTPPort))
config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, config.MaxTries, sets.NewString()) config.DialFromNode("http", config.NodeIP, config.NodeHTTPPort, config.MaxTries, config.MaxTries, sets.NewString())
}) })
// Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling. // Slow because we confirm that the nodePort doesn't serve traffic, which requires a period of polling.
It("should update nodePort: udp [Slow]", func() { ginkgo.It("should update nodePort: udp [Slow]", func() {
config := framework.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort))
config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames()) config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, 0, config.EndpointHostnames())
config.DeleteNodePortService() config.DeleteNodePortService()
By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v (node) --> %v:%v (nodeIP)", config.NodeIP, config.NodeIP, config.NodeUDPPort))
config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, config.MaxTries, sets.NewString()) config.DialFromNode("udp", config.NodeIP, config.NodeUDPPort, config.MaxTries, config.MaxTries, sets.NewString())
}) })
It("should function for client IP based session affinity: http", func() { ginkgo.It("should function for client IP based session affinity: http", func() {
config := framework.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort)) ginkgo.By(fmt.Sprintf("dialing(http) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort))
// Check if number of endpoints returned are exactly one. // Check if number of endpoints returned are exactly one.
eps, err := config.GetEndpointsFromTestContainer("http", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort, framework.SessionAffinityChecks) eps, err := config.GetEndpointsFromTestContainer("http", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort, framework.SessionAffinityChecks)
if err != nil { if err != nil {
framework.Failf("Failed to get endpoints from test container, error: %v", err) framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
} }
if len(eps) == 0 { if len(eps) == 0 {
framework.Failf("Unexpected no endpoints return") framework.Failf("Unexpected no endpoints return")
@ -217,14 +217,14 @@ var _ = SIGDescribe("Networking", func() {
} }
}) })
It("should function for client IP based session affinity: udp", func() { ginkgo.It("should function for client IP based session affinity: udp", func() {
config := framework.NewNetworkingTestConfig(f) config := framework.NewNetworkingTestConfig(f)
By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort)) ginkgo.By(fmt.Sprintf("dialing(udp) %v --> %v:%v", config.TestContainerPod.Name, config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort))
// Check if number of endpoints returned are exactly one. // Check if number of endpoints returned are exactly one.
eps, err := config.GetEndpointsFromTestContainer("udp", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort, framework.SessionAffinityChecks) eps, err := config.GetEndpointsFromTestContainer("udp", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort, framework.SessionAffinityChecks)
if err != nil { if err != nil {
framework.Failf("Failed to get endpoints from test container, error: %v", err) framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
} }
if len(eps) == 0 { if len(eps) == 0 {
framework.Failf("Unexpected no endpoints return") framework.Failf("Unexpected no endpoints return")

View File

@ -22,8 +22,8 @@ import (
"math" "math"
"time" "time"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
@ -54,12 +54,12 @@ func networkingIPerfTest(isIPv6 bool) {
familyStr = "-V " familyStr = "-V "
} }
It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() { ginkgo.It(fmt.Sprintf("should transfer ~ 1GB onto the service endpoint %v servers (maximum of %v clients)", numServer, numClient), func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
totalPods := len(nodes.Items) totalPods := len(nodes.Items)
// for a single service, we expect to divide bandwidth between the network. Very crude estimate. // for a single service, we expect to divide bandwidth between the network. Very crude estimate.
expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods)) expectedBandwidth := int(float64(maxBandwidthBits) / float64(totalPods))
Expect(totalPods).NotTo(Equal(0)) gomega.Expect(totalPods).NotTo(gomega.Equal(0))
appName := "iperf-e2e" appName := "iperf-e2e"
_, err := f.CreateServiceForSimpleAppWithPods( _, err := f.CreateServiceForSimpleAppWithPods(
8001, 8001,

View File

@ -29,8 +29,8 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
// . "github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -94,11 +94,11 @@ var (
// Produces a pod spec that passes nip as NODE_IP env var using downward API // Produces a pod spec that passes nip as NODE_IP env var using downward API
func newTestPod(nodename string, nip string) *v1.Pod { func newTestPod(nodename string, nip string) *v1.Pod {
pod := testPod pod := testPod
node_ip := v1.EnvVar{ nodeIP := v1.EnvVar{
Name: "NODE_IP", Name: "NODE_IP",
Value: nip, Value: nip,
} }
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, node_ip) pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, nodeIP)
pod.Spec.NodeName = nodename pod.Spec.NodeName = nodename
return &pod return &pod
} }
@ -135,12 +135,12 @@ func checknosnatURL(proxy, pip string, ips []string) string {
// We use the [Feature:NoSNAT] tag so that most jobs will skip this test by default. // We use the [Feature:NoSNAT] tag so that most jobs will skip this test by default.
var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() { var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() {
f := framework.NewDefaultFramework("no-snat-test") f := framework.NewDefaultFramework("no-snat-test")
It("Should be able to send traffic between Pods without SNAT", func() { ginkgo.It("Should be able to send traffic between Pods without SNAT", func() {
cs := f.ClientSet cs := f.ClientSet
pc := cs.CoreV1().Pods(f.Namespace.Name) pc := cs.CoreV1().Pods(f.Namespace.Name)
nc := cs.CoreV1().Nodes() nc := cs.CoreV1().Nodes()
By("creating a test pod on each Node") ginkgo.By("creating a test pod on each Node")
nodes, err := nc.List(metav1.ListOptions{}) nodes, err := nc.List(metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(nodes.Items) == 0 { if len(nodes.Items) == 0 {
@ -167,7 +167,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() {
// on the master, but do allow this on the nodes. // on the master, but do allow this on the nodes.
node, err := getSchedulable(nodes.Items) node, err := getSchedulable(nodes.Items)
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("creating a no-snat-test-proxy Pod on Node " + node.Name + " port " + strconv.Itoa(testProxyPort) + ginkgo.By("creating a no-snat-test-proxy Pod on Node " + node.Name + " port " + strconv.Itoa(testProxyPort) +
" so we can target our test Pods through this Node's ExternalIP") " so we can target our test Pods through this Node's ExternalIP")
extIP, err := getIP(v1.NodeExternalIP, node) extIP, err := getIP(v1.NodeExternalIP, node)
@ -177,7 +177,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() {
_, err = pc.Create(newTestProxyPod(node.Name)) _, err = pc.Create(newTestProxyPod(node.Name))
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("waiting for all of the no-snat-test pods to be scheduled and running") ginkgo.By("waiting for all of the no-snat-test pods to be scheduled and running")
err = wait.PollImmediate(10*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(10*time.Second, 1*time.Minute, func() (bool, error) {
pods, err := pc.List(metav1.ListOptions{LabelSelector: "no-snat-test"}) pods, err := pc.List(metav1.ListOptions{LabelSelector: "no-snat-test"})
if err != nil { if err != nil {
@ -197,7 +197,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() {
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("waiting for the no-snat-test-proxy Pod to be scheduled and running") ginkgo.By("waiting for the no-snat-test-proxy Pod to be scheduled and running")
err = wait.PollImmediate(10*time.Second, 1*time.Minute, func() (bool, error) { err = wait.PollImmediate(10*time.Second, 1*time.Minute, func() (bool, error) {
pod, err := pc.Get("no-snat-test-proxy", metav1.GetOptions{}) pod, err := pc.Get("no-snat-test-proxy", metav1.GetOptions{})
if err != nil { if err != nil {
@ -213,7 +213,7 @@ var _ = SIGDescribe("NoSNAT [Feature:NoSNAT] [Slow]", func() {
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("sending traffic from each pod to the others and checking that SNAT does not occur") ginkgo.By("sending traffic from each pod to the others and checking that SNAT does not occur")
pods, err := pc.List(metav1.ListOptions{LabelSelector: "no-snat-test"}) pods, err := pc.List(metav1.ListOptions{LabelSelector: "no-snat-test"})
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -38,8 +38,8 @@ import (
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" "github.com/onsi/gomega"
) )
const ( const (
@ -55,7 +55,7 @@ const (
var _ = SIGDescribe("Proxy", func() { var _ = SIGDescribe("Proxy", func() {
version := "v1" version := "v1"
Context("version "+version, func() { ginkgo.Context("version "+version, func() {
options := framework.Options{ options := framework.Options{
ClientQPS: -1.0, ClientQPS: -1.0,
} }
@ -116,12 +116,12 @@ var _ = SIGDescribe("Proxy", func() {
}, },
}, },
}) })
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
// Make an RC with a single pod. The 'porter' image is // Make an RC with a single pod. The 'porter' image is
// a simple server which serves the values of the // a simple server which serves the values of the
// environmental variables below. // environmental variables below.
By("starting an echo server on multiple ports") ginkgo.By("starting an echo server on multiple ports")
pods := []*v1.Pod{} pods := []*v1.Pod{}
cfg := testutils.RCConfig{ cfg := testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
@ -160,10 +160,10 @@ var _ = SIGDescribe("Proxy", func() {
Labels: labels, Labels: labels,
CreatedPods: &pods, CreatedPods: &pods,
} }
Expect(framework.RunRC(cfg)).NotTo(HaveOccurred()) gomega.Expect(framework.RunRC(cfg)).NotTo(gomega.HaveOccurred())
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, cfg.Name) defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, cfg.Name)
Expect(endpoints.WaitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)).NotTo(HaveOccurred()) gomega.Expect(endpoints.WaitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)).NotTo(gomega.HaveOccurred())
// table constructors // table constructors
// Try proxying through the service and directly to through the pod. // Try proxying through the service and directly to through the pod.
@ -212,7 +212,7 @@ var _ = SIGDescribe("Proxy", func() {
e2elog.Logf("setup took %v, starting test cases", d) e2elog.Logf("setup took %v, starting test cases", d)
numberTestCases := len(expectations) numberTestCases := len(expectations)
totalAttempts := numberTestCases * proxyAttempts totalAttempts := numberTestCases * proxyAttempts
By(fmt.Sprintf("running %v cases, %v attempts per case, %v total attempts", numberTestCases, proxyAttempts, totalAttempts)) ginkgo.By(fmt.Sprintf("running %v cases, %v attempts per case, %v total attempts", numberTestCases, proxyAttempts, totalAttempts))
for i := 0; i < proxyAttempts; i++ { for i := 0; i < proxyAttempts; i++ {
wg.Add(numberTestCases) wg.Add(numberTestCases)
@ -297,25 +297,25 @@ func pickNode(cs clientset.Interface) (string, error) {
func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) { func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
node, err := pickNode(f.ClientSet) node, err := pickNode(f.ClientSet)
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
// TODO: Change it to test whether all requests succeeded when requests // TODO: Change it to test whether all requests succeeded when requests
// not reaching Kubelet issue is debugged. // not reaching Kubelet issue is debugged.
serviceUnavailableErrors := 0 serviceUnavailableErrors := 0
for i := 0; i < proxyAttempts; i++ { for i := 0; i < proxyAttempts; i++ {
_, status, d, err := doProxy(f, prefix+node+nodeDest, i) _, status, d, err := doProxy(f, prefix+node+nodeDest, i)
if status == http.StatusServiceUnavailable { if status == http.StatusServiceUnavailable {
e2elog.Logf("Failed proxying node logs due to service unavailable: %v", err) e2elog.Logf("ginkgo.Failed proxying node logs due to service unavailable: %v", err)
time.Sleep(time.Second) time.Sleep(time.Second)
serviceUnavailableErrors++ serviceUnavailableErrors++
} else { } else {
Expect(err).NotTo(HaveOccurred()) gomega.Expect(err).NotTo(gomega.HaveOccurred())
Expect(status).To(Equal(http.StatusOK)) gomega.Expect(status).To(gomega.Equal(http.StatusOK))
Expect(d).To(BeNumerically("<", proxyHTTPCallTimeout)) gomega.Expect(d).To(gomega.BeNumerically("<", proxyHTTPCallTimeout))
} }
} }
if serviceUnavailableErrors > 0 { if serviceUnavailableErrors > 0 {
e2elog.Logf("error: %d requests to proxy node logs failed", serviceUnavailableErrors) e2elog.Logf("error: %d requests to proxy node logs failed", serviceUnavailableErrors)
} }
maxFailures := int(math.Floor(0.1 * float64(proxyAttempts))) maxFailures := int(math.Floor(0.1 * float64(proxyAttempts)))
Expect(serviceUnavailableErrors).To(BeNumerically("<", maxFailures)) gomega.Expect(serviceUnavailableErrors).To(gomega.BeNumerically("<", maxFailures))
} }

File diff suppressed because it is too large Load Diff

View File

@ -35,7 +35,7 @@ import (
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
type durations []time.Duration type durations []time.Duration
@ -161,7 +161,7 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int, acceptab
blocker := make(chan struct{}, inParallel) blocker := make(chan struct{}, inParallel)
for i := 0; i < total; i++ { for i := 0; i < total; i++ {
go func() { go func() {
defer GinkgoRecover() defer ginkgo.GinkgoRecover()
blocker <- struct{}{} blocker <- struct{}{}
defer func() { <-blocker }() defer func() { <-blocker }()
if d, err := singleServiceLatency(f, cfg.Name, endpointQueries); err != nil { if d, err := singleServiceLatency(f, cfg.Name, endpointQueries); err != nil {

View File

@ -28,6 +28,7 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
// IPerfResults is a struct that stores some IPerfResult
type IPerfResults struct { type IPerfResults struct {
BandwidthMap map[string]int64 BandwidthMap map[string]int64
} }
@ -62,8 +63,8 @@ func (i *IPerfResults) ToTSV() string {
var buffer bytes.Buffer var buffer bytes.Buffer
for node, bandwidth := range i.BandwidthMap { for node, bandwidth := range i.BandwidthMap {
asJson, _ := json.Marshal(node) asJSON, _ := json.Marshal(node)
buffer.WriteString("\t " + string(asJson) + "\t " + fmt.Sprintf("%E", float64(bandwidth))) buffer.WriteString("\t " + string(asJSON) + "\t " + fmt.Sprintf("%E", float64(bandwidth)))
} }
return buffer.String() return buffer.String()
} }
@ -88,6 +89,7 @@ func NewIPerf(csvLine string) *IPerfResult {
return &i return &i
} }
// StrSlice represents a string slice
type StrSlice []string type StrSlice []string
func (s StrSlice) get(i int) string { func (s StrSlice) get(i int) string {