Merge pull request #77901 from k-toyoda-pi/use_expect_no_error_e2e_network_1

use framework.ExpectNoError() for e2e/network
This commit is contained in:
Kubernetes Prow Robot 2019-05-17 21:51:07 -07:00 committed by GitHub
commit a7f13bcaf0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 103 additions and 90 deletions

View File

@ -28,7 +28,6 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const dnsTestPodHostName = "dns-querier-1"
@ -132,7 +131,7 @@ var _ = SIGDescribe("DNS", func() {
}
headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName)
framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName)
defer func() {
ginkgo.By("deleting the test headless service")
defer ginkgo.GinkgoRecover()
@ -142,7 +141,7 @@ var _ = SIGDescribe("DNS", func() {
regularServiceName := "test-service-2"
regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create regular service: %s", regularServiceName)
framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName)
defer func() {
ginkgo.By("deleting the test service")
@ -181,7 +180,7 @@ var _ = SIGDescribe("DNS", func() {
}
headlessService := framework.CreateServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", dnsTestServiceName)
framework.ExpectNoError(err, "failed to create headless service: %s", dnsTestServiceName)
defer func() {
ginkgo.By("deleting the test headless service")
defer ginkgo.GinkgoRecover()
@ -191,7 +190,7 @@ var _ = SIGDescribe("DNS", func() {
regularServiceName := "test-service-2"
regularService := framework.CreateServiceSpec(regularServiceName, "", false, testServiceSelector)
regularService, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(regularService)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create regular service: %s", regularServiceName)
framework.ExpectNoError(err, "failed to create regular service: %s", regularServiceName)
defer func() {
ginkgo.By("deleting the test service")
defer ginkgo.GinkgoRecover()
@ -232,7 +231,7 @@ var _ = SIGDescribe("DNS", func() {
podHostname := "dns-querier-2"
headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", serviceName)
framework.ExpectNoError(err, "failed to create headless service: %s", serviceName)
defer func() {
ginkgo.By("deleting the test headless service")
@ -267,7 +266,7 @@ var _ = SIGDescribe("DNS", func() {
podHostname := "dns-querier-2"
headlessService := framework.CreateServiceSpec(serviceName, "", true, testServiceSelector)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create headless service: %s", serviceName)
framework.ExpectNoError(err, "failed to create headless service: %s", serviceName)
defer func() {
ginkgo.By("deleting the test headless service")
@ -305,7 +304,7 @@ var _ = SIGDescribe("DNS", func() {
serviceName := "dns-test-service-3"
externalNameService := framework.CreateServiceSpec(serviceName, "foo.example.com", false, nil)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(externalNameService)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create ExternalName service: %s", serviceName)
framework.ExpectNoError(err, "failed to create ExternalName service: %s", serviceName)
defer func() {
ginkgo.By("deleting the test externalName service")
@ -329,7 +328,7 @@ var _ = SIGDescribe("DNS", func() {
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
s.Spec.ExternalName = "bar.example.com"
})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to change externalName of service: %s", serviceName)
framework.ExpectNoError(err, "failed to change externalName of service: %s", serviceName)
wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
@ -349,7 +348,7 @@ var _ = SIGDescribe("DNS", func() {
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP},
}
})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to change service type to ClusterIP for service: %s", serviceName)
framework.ExpectNoError(err, "failed to change service type to ClusterIP for service: %s", serviceName)
wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "A", "wheezy")
jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "A", "jessie")
ginkgo.By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
@ -360,7 +359,7 @@ var _ = SIGDescribe("DNS", func() {
pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, dnsTestPodHostName, dnsTestServiceName)
svc, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Get(externalNameService.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get service: %s", externalNameService.Name)
framework.ExpectNoError(err, "failed to get service: %s", externalNameService.Name)
validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, svc.Spec.ClusterIP)
})
@ -376,7 +375,7 @@ var _ = SIGDescribe("DNS", func() {
Searches: []string{testSearchPath},
}
testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testAgnhostPod)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testAgnhostPod.Name)
framework.ExpectNoError(err, "failed to create pod: %s", testAgnhostPod.Name)
framework.Logf("Created pod %v", testAgnhostPod)
defer func() {
framework.Logf("Deleting pod %s...", testAgnhostPod.Name)
@ -384,7 +383,8 @@ var _ = SIGDescribe("DNS", func() {
framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err)
}
}()
gomega.Expect(f.WaitForPodRunning(testAgnhostPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testAgnhostPod.Name)
err = f.WaitForPodRunning(testAgnhostPod.Name)
framework.ExpectNoError(err, "failed to wait for pod %s to be running", testAgnhostPod.Name)
runCommand := func(arg string) string {
cmd := []string{"/agnhost", arg}
@ -396,7 +396,7 @@ var _ = SIGDescribe("DNS", func() {
CaptureStdout: true,
CaptureStderr: true,
})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to run command '/agnhost %s' on pod, stdout: %v, stderr: %v, err: %v", arg, stdout, stderr, err)
framework.ExpectNoError(err, "failed to run command '/agnhost %s' on pod, stdout: %v, stderr: %v, err: %v", arg, stdout, stderr, err)
return stdout
}
@ -424,7 +424,7 @@ var _ = SIGDescribe("DNS", func() {
testDNSNameFull: testInjectedIP,
})
testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testServerPod)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testServerPod.Name)
framework.ExpectNoError(err, "failed to create pod: %s", testServerPod.Name)
e2elog.Logf("Created pod %v", testServerPod)
defer func() {
e2elog.Logf("Deleting pod %s...", testServerPod.Name)
@ -432,11 +432,12 @@ var _ = SIGDescribe("DNS", func() {
framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err)
}
}()
gomega.Expect(f.WaitForPodRunning(testServerPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testServerPod.Name)
err = f.WaitForPodRunning(testServerPod.Name)
framework.ExpectNoError(err, "failed to wait for pod %s to be running", testServerPod.Name)
// Retrieve server pod IP.
testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(testServerPod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod %v", testServerPod.Name)
framework.ExpectNoError(err, "failed to get pod %v", testServerPod.Name)
testServerIP := testServerPod.Status.PodIP
e2elog.Logf("testServerIP is %s", testServerIP)
@ -455,7 +456,7 @@ var _ = SIGDescribe("DNS", func() {
},
}
testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %s", testUtilsPod.Name)
framework.ExpectNoError(err, "failed to create pod: %s", testUtilsPod.Name)
e2elog.Logf("Created pod %v", testUtilsPod)
defer func() {
e2elog.Logf("Deleting pod %s...", testUtilsPod.Name)
@ -463,7 +464,8 @@ var _ = SIGDescribe("DNS", func() {
framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err)
}
}()
gomega.Expect(f.WaitForPodRunning(testUtilsPod.Name)).NotTo(gomega.HaveOccurred(), "failed to wait for pod %s to be running", testUtilsPod.Name)
err = f.WaitForPodRunning(testUtilsPod.Name)
framework.ExpectNoError(err, "failed to wait for pod %s to be running", testUtilsPod.Name)
ginkgo.By("Verifying customized DNS option is configured on pod...")
// TODO: Figure out a better way other than checking the actual resolv,conf file.
@ -476,7 +478,7 @@ var _ = SIGDescribe("DNS", func() {
CaptureStdout: true,
CaptureStderr: true,
})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err)
framework.ExpectNoError(err, "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err)
if !strings.Contains(stdout, "ndots:2") {
framework.Failf("customized DNS options not found in resolv.conf, got: %s", stdout)
}
@ -508,7 +510,7 @@ var _ = SIGDescribe("DNS", func() {
return true, nil
}
err = wait.PollImmediate(5*time.Second, 3*time.Minute, digFunc)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to verify customized name server and search path")
framework.ExpectNoError(err, "failed to verify customized name server and search path")
// TODO: Add more test cases for other DNSPolicies.
})

View File

@ -68,7 +68,7 @@ func (t *dnsTestCommon) init() {
namespace := "kube-system"
pods, err := t.f.ClientSet.CoreV1().Pods(namespace).List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s", namespace)
framework.ExpectNoError(err, "failed to list pods in namespace: %s", namespace)
gomega.Expect(len(pods.Items)).Should(gomega.BeNumerically(">=", 1))
t.dnsPod = &pods.Items[0]
@ -157,23 +157,23 @@ func (t *dnsTestCommon) setConfigMap(cm *v1.ConfigMap) {
}.AsSelector().String(),
}
cmList, err := t.c.CoreV1().ConfigMaps(t.ns).List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list ConfigMaps in namespace: %s", t.ns)
framework.ExpectNoError(err, "failed to list ConfigMaps in namespace: %s", t.ns)
if len(cmList.Items) == 0 {
ginkgo.By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm))
_, err := t.c.CoreV1().ConfigMaps(t.ns).Create(cm)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)
framework.ExpectNoError(err, "failed to create ConfigMap (%s:%s) %+v", t.ns, t.name, *cm)
} else {
ginkgo.By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm))
_, err := t.c.CoreV1().ConfigMaps(t.ns).Update(cm)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)
framework.ExpectNoError(err, "failed to update ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm)
}
}
func (t *dnsTestCommon) fetchDNSConfigMapData() map[string]string {
if t.name == "coredns" {
pcm, err := t.c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(t.name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get DNS ConfigMap: %s", t.name)
framework.ExpectNoError(err, "failed to get DNS ConfigMap: %s", t.name)
return pcm.Data
}
return nil
@ -192,7 +192,7 @@ func (t *dnsTestCommon) deleteConfigMap() {
ginkgo.By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name))
t.cm = nil
err := t.c.CoreV1().ConfigMaps(t.ns).Delete(t.name, nil)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete config map: %s", t.name)
framework.ExpectNoError(err, "failed to delete config map: %s", t.name)
}
func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
@ -224,9 +224,10 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
var err error
t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.utilPod)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %v", t.utilPod)
framework.ExpectNoError(err, "failed to create pod: %v", t.utilPod)
e2elog.Logf("Created pod %v", t.utilPod)
gomega.Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(gomega.HaveOccurred(), "pod failed to start running: %v", t.utilPod)
err = t.f.WaitForPodRunning(t.utilPod.Name)
framework.ExpectNoError(err, "pod failed to start running: %v", t.utilPod)
t.utilService = &v1.Service{
TypeMeta: metav1.TypeMeta{
@ -249,7 +250,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
}
t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(t.utilService)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name)
framework.ExpectNoError(err, "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name)
e2elog.Logf("Created service %v", t.utilService)
}
@ -272,7 +273,7 @@ func (t *dnsTestCommon) deleteCoreDNSPods() {
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod: %s", pod.Name)
framework.ExpectNoError(err, "failed to delete pod: %s", pod.Name)
}
}
@ -315,13 +316,14 @@ func (t *dnsTestCommon) createDNSPodFromObj(pod *v1.Pod) {
var err error
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod: %v", t.dnsServerPod)
framework.ExpectNoError(err, "failed to create pod: %v", t.dnsServerPod)
e2elog.Logf("Created pod %v", t.dnsServerPod)
gomega.Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(gomega.HaveOccurred(), "pod failed to start running: %v", t.dnsServerPod)
err = t.f.WaitForPodRunning(t.dnsServerPod.Name)
framework.ExpectNoError(err, "pod failed to start running: %v", t.dnsServerPod)
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get(
t.dnsServerPod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod: %s", t.dnsServerPod.Name)
framework.ExpectNoError(err, "failed to get pod: %s", t.dnsServerPod.Name)
}
func (t *dnsTestCommon) createDNSServer(aRecords map[string]string) {

View File

@ -33,7 +33,6 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
@ -84,7 +83,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
var err error
namespaceName := fmt.Sprintf("dnsexample%d", i)
namespaces[i], err = f.CreateNamespace(namespaceName, nil)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create namespace: %s", namespaceName)
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
}
for _, ns := range namespaces {
@ -106,13 +105,13 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns.Name).List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to list pods in namespace: %s", ns.Name)
framework.ExpectNoError(err, "failed to list pods in namespace: %s", ns.Name)
err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for all pods to respond")
framework.ExpectNoError(err, "waiting for all pods to respond")
e2elog.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
err = framework.ServiceResponding(c, ns.Name, backendSvcName)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for the service to respond")
framework.ExpectNoError(err, "waiting for the service to respond")
}
// Now another tricky part:
@ -134,7 +133,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
queryDNS := fmt.Sprintf(queryDNSPythonTemplate, backendSvcName+"."+namespaces[0].Name)
_, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDNS}, "ok", dnsReadyTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "waiting for output from pod exec")
framework.ExpectNoError(err, "waiting for output from pod exec")
updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, fmt.Sprintf("dns-backend.development.svc.%s", framework.TestContext.ClusterDNSDomain), fmt.Sprintf("dns-backend.%s.svc.%s", namespaces[0].Name, framework.TestContext.ClusterDNSDomain))
@ -153,7 +152,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
// wait for pods to print their result
for _, ns := range namespaces {
_, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "pod %s failed to print result in logs", frontendPodName)
framework.ExpectNoError(err, "pod %s failed to print result in logs", frontendPodName)
}
})
})
@ -165,10 +164,10 @@ func getNsCmdFlag(ns *v1.Namespace) string {
// pass enough context with the 'old' parameter so that it replaces what your really intended.
func prepareResourceWithReplacedString(inputFile, old, new string) string {
f, err := os.Open(inputFile)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to open file: %s", inputFile)
framework.ExpectNoError(err, "failed to open file: %s", inputFile)
defer f.Close()
data, err := ioutil.ReadAll(f)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to read from file: %s", inputFile)
framework.ExpectNoError(err, "failed to read from file: %s", inputFile)
podYaml := strings.Replace(string(data), old, new, 1)
return podYaml
}

View File

@ -56,7 +56,7 @@ var _ = SIGDescribe("Firewall rule", func() {
cs = f.ClientSet
cloudConfig = framework.TestContext.CloudConfig
gceCloud, err = gce.GetGCECloud()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
// This test takes around 6 minutes to run
@ -68,7 +68,7 @@ var _ = SIGDescribe("Firewall rule", func() {
ginkgo.By("Getting cluster ID")
clusterID, err := gce.GetClusterID(cs)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
e2elog.Logf("Got cluster ID: %v", clusterID)
jig := framework.NewServiceTestJig(cs, serviceName)
@ -90,25 +90,28 @@ var _ = SIGDescribe("Firewall rule", func() {
svc.Spec.Type = v1.ServiceTypeNodePort
svc.Spec.LoadBalancerSourceRanges = nil
})
gomega.Expect(cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(gomega.HaveOccurred())
err = cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil)
framework.ExpectNoError(err)
ginkgo.By("Waiting for the local traffic health check firewall rule to be deleted")
localHCFwName := gce.MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), false)
_, err := gce.WaitForFirewallRule(gceCloud, localHCFwName, false, framework.LoadBalancerCleanupTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}()
svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP
ginkgo.By("Checking if service's firewall rule is correct")
lbFw := gce.ConstructFirewallForLBService(svc, cloudConfig.NodeTag)
fw, err := gceCloud.GetFirewall(lbFw.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = gce.VerifyFirewallRule(fw, lbFw, cloudConfig.Network, false)
framework.ExpectNoError(err)
ginkgo.By("Checking if service's nodes health check firewall rule is correct")
nodesHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, true)
fw, err = gceCloud.GetFirewall(nodesHCFw.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = gce.VerifyFirewallRule(fw, nodesHCFw, cloudConfig.Network, false)
framework.ExpectNoError(err)
// OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE
ginkgo.By("Updating LoadBalancer service to ExternalTrafficPolicy=Local")
@ -118,13 +121,14 @@ var _ = SIGDescribe("Firewall rule", func() {
ginkgo.By("Waiting for the nodes health check firewall rule to be deleted")
_, err = gce.WaitForFirewallRule(gceCloud, nodesHCFw.Name, false, framework.LoadBalancerCleanupTimeout)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Waiting for the correct local traffic health check firewall rule to be created")
localHCFw := gce.ConstructHealthCheckFirewallForLBService(clusterID, svc, cloudConfig.NodeTag, false)
fw, err = gce.WaitForFirewallRule(gceCloud, localHCFw.Name, true, framework.LoadBalancerCreateTimeoutDefault)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = gce.VerifyFirewallRule(fw, localHCFw, cloudConfig.Network, false)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests))
for i, nodeName := range nodesNames {
@ -132,13 +136,15 @@ var _ = SIGDescribe("Firewall rule", func() {
jig.LaunchNetexecPodOnNode(f, nodeName, podName, firewallTestHTTPPort, firewallTestUDPPort, true)
defer func() {
e2elog.Logf("Cleaning up the netexec pod: %v", podName)
gomega.Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(gomega.HaveOccurred())
err = cs.CoreV1().Pods(ns).Delete(podName, nil)
framework.ExpectNoError(err)
}()
}
// Send requests from outside of the cluster because internal traffic is whitelisted
ginkgo.By("Accessing the external service ip from outside, all non-master nodes should be reached")
gomega.Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(gomega.HaveOccurred())
err = framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)
framework.ExpectNoError(err)
// Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster
// by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect
@ -158,11 +164,13 @@ var _ = SIGDescribe("Firewall rule", func() {
nodesSet.Insert(nodesNames[0])
gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)
// Make sure traffic is recovered before exit
gomega.Expect(framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)).NotTo(gomega.HaveOccurred())
err = framework.TestHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet)
framework.ExpectNoError(err)
}()
ginkgo.By("Accessing serivce through the external ip and examine got no response from the node without tags")
gomega.Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet, 15)).NotTo(gomega.HaveOccurred())
err = framework.TestHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, framework.LoadBalancerCreateTimeoutDefault, nodesSet, 15)
framework.ExpectNoError(err)
})
ginkgo.It("should have correct firewall rules for e2e cluster", func() {
@ -174,8 +182,9 @@ var _ = SIGDescribe("Firewall rule", func() {
ginkgo.By("Checking if e2e firewall rules are correct")
for _, expFw := range gce.GetE2eFirewalls(cloudConfig.MasterName, cloudConfig.MasterTag, cloudConfig.NodeTag, cloudConfig.Network, cloudConfig.ClusterIPRange) {
fw, err := gceCloud.GetFirewall(expFw.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = gce.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)
framework.ExpectNoError(err)
}
ginkgo.By("Checking well known ports on master and nodes are not exposed externally")

View File

@ -28,7 +28,6 @@ import (
"fmt"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
/*
@ -52,7 +51,7 @@ var _ = SIGDescribe("NetworkPolicy", func() {
ginkgo.By("Waiting for pod ready", func() {
err := f.WaitForPodReady(podServer.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
// Create pods, which should be able to communicate with the server on port 80 and 81.
@ -77,7 +76,7 @@ var _ = SIGDescribe("NetworkPolicy", func() {
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
// Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server,
@ -112,7 +111,7 @@ var _ = SIGDescribe("NetworkPolicy", func() {
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
@ -132,12 +131,12 @@ var _ = SIGDescribe("NetworkPolicy", func() {
nsB, err := f.CreateNamespace(nsBName, map[string]string{
"ns-name": nsBName,
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Create Server with Service in NS-B
e2elog.Logf("Waiting for server to come up.")
err = framework.WaitForPodRunningInNamespace(f.ClientSet, podServer)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Create Policy for that service that allows traffic only via namespace B
ginkgo.By("Creating a network policy for the server which allows traffic from namespace-b.")
@ -165,7 +164,7 @@ var _ = SIGDescribe("NetworkPolicy", func() {
},
}
policy, err = f.ClientSet.NetworkingV1().NetworkPolicies(nsA.Name).Create(policy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
testCannotConnect(f, nsA, "client-a", service, 80)
@ -194,7 +193,7 @@ var _ = SIGDescribe("NetworkPolicy", func() {
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.By("Testing pods can connect only to the port allowed by the policy.")
@ -224,7 +223,7 @@ var _ = SIGDescribe("NetworkPolicy", func() {
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.By("Creating a network policy for the Service which allows traffic only to another port.")
@ -248,7 +247,7 @@ var _ = SIGDescribe("NetworkPolicy", func() {
},
}
policy2, err = f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy2)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy2)
ginkgo.By("Testing pods can connect to both ports when both policies are present.")
@ -271,7 +270,7 @@ var _ = SIGDescribe("NetworkPolicy", func() {
},
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.By("Testing pods can connect to both ports when an 'allow-all' policy is present.")
@ -301,7 +300,7 @@ var _ = SIGDescribe("NetworkPolicy", func() {
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
@ -343,7 +342,7 @@ var _ = SIGDescribe("NetworkPolicy", func() {
}
policy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(policy)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
defer cleanupNetworkPolicy(f, policy)
ginkgo.By("Creating client-a which should be able to contact the server.", func() {
@ -368,7 +367,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se
e2elog.Logf("Waiting for %s to complete.", podClient.Name)
err := framework.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podClient.Name, ns.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Pod did not finish as expected.")
framework.ExpectNoError(err, "Pod did not finish as expected.")
e2elog.Logf("Waiting for %s to complete.", podClient.Name)
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
@ -508,7 +507,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
RestartPolicy: v1.RestartPolicyNever,
},
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
e2elog.Logf("Created pod %v", pod.ObjectMeta.Name)
svcName := fmt.Sprintf("svc-%s", podName)
@ -524,7 +523,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
},
},
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
e2elog.Logf("Created service %s", svc.Name)
return pod, svc
@ -569,7 +568,7 @@ func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, pod
},
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
return pod
}

View File

@ -79,7 +79,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
})
// Verify that service has been updated properly.
svcTier, err := gcecloud.GetServiceNetworkTier(svc)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierStandard))
// Record the LB name for test cleanup.
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
@ -94,7 +94,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
})
// Verify that service has been updated properly.
svcTier, err = gcecloud.GetServiceNetworkTier(svc)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierDefault))
// Wait until the ingress IP changes. Each tier has its own pool of
@ -105,9 +105,9 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
ginkgo.By("reserving a static IP for the load balancer")
requestedAddrName := fmt.Sprintf("e2e-ext-lb-net-tier-%s", framework.RunID)
gceCloud, err := gce.GetGCECloud()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
requestedIP, err := reserveAlphaRegionalAddress(gceCloud, requestedAddrName, cloud.NetworkTierStandard)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to reserve a STANDARD tiered address")
framework.ExpectNoError(err, "failed to reserve a STANDARD tiered address")
defer func() {
if requestedAddrName != "" {
// Release GCE static address - this is not kube-managed and will not be automatically released.
@ -116,7 +116,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
}
}
}()
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
e2elog.Logf("Allocated static IP to be used by the load balancer: %q", requestedIP)
ginkgo.By("updating the Service to use the standard tier with a requested IP")
@ -127,7 +127,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
// Verify that service has been updated properly.
gomega.Expect(svc.Spec.LoadBalancerIP).To(gomega.Equal(requestedIP))
svcTier, err = gcecloud.GetServiceNetworkTier(svc)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(svcTier).To(gomega.Equal(cloud.NetworkTierStandard))
// Wait until the ingress IP changes and verifies the LB.
@ -163,9 +163,9 @@ func waitAndVerifyLBWithTier(jig *framework.ServiceTestJig, ns, svcName, existin
// Verify the network tier matches the desired.
svcNetTier, err := gcecloud.GetServiceNetworkTier(svc)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
netTier, err := getLBNetworkTierByIP(ingressIP)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get the network tier of the load balancer")
framework.ExpectNoError(err, "failed to get the network tier of the load balancer")
gomega.Expect(netTier).To(gomega.Equal(svcNetTier))
return ingressIP

View File

@ -116,7 +116,7 @@ var _ = SIGDescribe("Proxy", func() {
},
},
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Make an RC with a single pod. The 'porter' image is
// a simple server which serves the values of the
@ -160,10 +160,12 @@ var _ = SIGDescribe("Proxy", func() {
Labels: labels,
CreatedPods: &pods,
}
gomega.Expect(framework.RunRC(cfg)).NotTo(gomega.HaveOccurred())
err = framework.RunRC(cfg)
framework.ExpectNoError(err)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, cfg.Name)
gomega.Expect(endpoints.WaitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)).NotTo(gomega.HaveOccurred())
err = endpoints.WaitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)
framework.ExpectNoError(err)
// table constructors
// Try proxying through the service and directly to through the pod.
@ -297,7 +299,7 @@ func pickNode(cs clientset.Interface) (string, error) {
func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
node, err := pickNode(f.ClientSet)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// TODO: Change it to test whether all requests succeeded when requests
// not reaching Kubelet issue is debugged.
serviceUnavailableErrors := 0
@ -308,7 +310,7 @@ func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
time.Sleep(time.Second)
serviceUnavailableErrors++
} else {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
gomega.Expect(status).To(gomega.Equal(http.StatusOK))
gomega.Expect(d).To(gomega.BeNumerically("<", proxyHTTPCallTimeout))
}