Use framework.ExpectNoError() for e2e/common

The e2e test framework has ExpectNoError() for readable test code.
This replaces Expect(err).NotTo(HaveOccurred()) with it.
This commit is contained in:
Kenichi Omichi 2019-05-09 18:54:37 +00:00
parent 08afeb855f
commit b9fffd1571
3 changed files with 27 additions and 26 deletions

View File

@ -67,7 +67,7 @@ func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
t := time.Now() t := time.Now()
for { for {
p, err := podClient.Get(pod.Name, metav1.GetOptions{}) p, err := podClient.Get(pod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get pod %q", pod.Name) framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
if p.Status.HostIP != "" { if p.Status.HostIP != "" {
e2elog.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP) e2elog.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
break break
@ -218,7 +218,7 @@ var _ = framework.KubeDescribe("Pods", func() {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options) pods, err := podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") framework.ExpectNoError(err, "failed to query for pods")
gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
options = metav1.ListOptions{ options = metav1.ListOptions{
LabelSelector: selector.String(), LabelSelector: selector.String(),
@ -256,7 +256,7 @@ var _ = framework.KubeDescribe("Pods", func() {
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()} options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options) pods, err = podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") framework.ExpectNoError(err, "failed to query for pods")
gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
ginkgo.By("verifying pod creation was observed") ginkgo.By("verifying pod creation was observed")
@ -279,11 +279,11 @@ var _ = framework.KubeDescribe("Pods", func() {
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
// save the running pod // save the running pod
pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod") framework.ExpectNoError(err, "failed to GET scheduled pod")
ginkgo.By("deleting the pod gracefully") ginkgo.By("deleting the pod gracefully")
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(30)) err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(30))
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod") framework.ExpectNoError(err, "failed to delete pod")
ginkgo.By("verifying the kubelet observed the termination notice") ginkgo.By("verifying the kubelet observed the termination notice")
gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
@ -335,7 +335,7 @@ var _ = framework.KubeDescribe("Pods", func() {
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()} options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options) pods, err = podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") framework.ExpectNoError(err, "failed to query for pods")
gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
}) })
@ -373,7 +373,7 @@ var _ = framework.KubeDescribe("Pods", func() {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options) pods, err := podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") framework.ExpectNoError(err, "failed to query for pods")
gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
ginkgo.By("updating the pod") ginkgo.By("updating the pod")
@ -388,7 +388,7 @@ var _ = framework.KubeDescribe("Pods", func() {
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()} options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options) pods, err = podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") framework.ExpectNoError(err, "failed to query for pods")
gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
e2elog.Logf("Pod update OK") e2elog.Logf("Pod update OK")
}) })
@ -427,7 +427,7 @@ var _ = framework.KubeDescribe("Pods", func() {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options) pods, err := podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") framework.ExpectNoError(err, "failed to query for pods")
gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
ginkgo.By("updating the pod") ginkgo.By("updating the pod")
@ -491,7 +491,7 @@ var _ = framework.KubeDescribe("Pods", func() {
}, },
} }
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc) _, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service") framework.ExpectNoError(err, "failed to create service")
// Make a client pod that verifies that it has the service environment variables. // Make a client pod that verifies that it has the service environment variables.
podName := "client-envvars-" + string(uuid.NewUUID()) podName := "client-envvars-" + string(uuid.NewUUID())
@ -538,7 +538,7 @@ var _ = framework.KubeDescribe("Pods", func() {
*/ */
framework.ConformanceIt("should support remote command execution over websockets [NodeConformance]", func() { framework.ConformanceIt("should support remote command execution over websockets [NodeConformance]", func() {
config, err := framework.LoadConfig() config, err := framework.LoadConfig()
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config") framework.ExpectNoError(err, "unable to get base config")
ginkgo.By("creating the pod") ginkgo.By("creating the pod")
name := "pod-exec-websocket-" + string(uuid.NewUUID()) name := "pod-exec-websocket-" + string(uuid.NewUUID())
@ -620,7 +620,7 @@ var _ = framework.KubeDescribe("Pods", func() {
*/ */
framework.ConformanceIt("should support retrieving logs from the container over websockets [NodeConformance]", func() { framework.ConformanceIt("should support retrieving logs from the container over websockets [NodeConformance]", func() {
config, err := framework.LoadConfig() config, err := framework.LoadConfig()
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config") framework.ExpectNoError(err, "unable to get base config")
ginkgo.By("creating the pod") ginkgo.By("creating the pod")
name := "pod-logs-websocket-" + string(uuid.NewUUID()) name := "pod-logs-websocket-" + string(uuid.NewUUID())
@ -798,14 +798,15 @@ var _ = framework.KubeDescribe("Pods", func() {
} }
validatePodReadiness := func(expectReady bool) { validatePodReadiness := func(expectReady bool) {
gomega.Expect(wait.Poll(time.Second, maxReadyStatusUpdateTolerance, func() (bool, error) { err := wait.Poll(time.Second, maxReadyStatusUpdateTolerance, func() (bool, error) {
podReady := podClient.PodIsReady(podName) podReady := podClient.PodIsReady(podName)
res := expectReady == podReady res := expectReady == podReady
if !res { if !res {
e2elog.Logf("Expect the Ready condition of pod %q to be %v, but got %v", podName, expectReady, podReady) e2elog.Logf("Expect the Ready condition of pod %q to be %v, but got %v", podName, expectReady, podReady)
} }
return res, nil return res, nil
})).NotTo(gomega.HaveOccurred()) })
framework.ExpectNoError(err)
} }
ginkgo.By("submitting the pod to kubernetes") ginkgo.By("submitting the pod to kubernetes")

View File

@ -114,7 +114,7 @@ while true; do sleep 1; done
Expect(terminateContainer.IsReady()).Should(Equal(testCase.Ready)) Expect(terminateContainer.IsReady()).Should(Equal(testCase.Ready))
status, err := terminateContainer.GetStatus() status, err := terminateContainer.GetStatus()
Expect(err).ShouldNot(HaveOccurred()) framework.ExpectNoError(err)
By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name)) By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name))
Expect(GetContainerState(status.State)).To(Equal(testCase.State)) Expect(GetContainerState(status.State)).To(Equal(testCase.State))
@ -148,7 +148,7 @@ while true; do sleep 1; done
By("get the container status") By("get the container status")
status, err := c.GetStatus() status, err := c.GetStatus()
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("the container should be terminated") By("the container should be terminated")
Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated)) Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated))
@ -286,7 +286,7 @@ while true; do sleep 1; done
secret.Name = "image-pull-secret-" + string(uuid.NewUUID()) secret.Name = "image-pull-secret-" + string(uuid.NewUUID())
By("create image pull secret") By("create image pull secret")
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret) _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil) defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil)
container.ImagePullSecrets = []string{secret.Name} container.ImagePullSecrets = []string{secret.Name}
} }

View File

@ -77,7 +77,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
// failed pods without running containers. This would create a race as the pod // failed pods without running containers. This would create a race as the pod
// might have already been deleted here. // might have already been deleted here.
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod) ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
if ev != nil && ev.Reason == sysctl.UnsupportedReason { if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12") framework.Skipf("No sysctl support in Docker <1.12")
} }
@ -85,16 +85,16 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
By("Waiting for pod completion") By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name) err = f.WaitForPodNoLongerRunning(pod.Name)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Checking that the pod succeeded") By("Checking that the pod succeeded")
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded)) Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
By("Getting logs from the pod") By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Checking that the sysctl is actually updated") By("Checking that the sysctl is actually updated")
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1")) Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
@ -120,7 +120,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
// failed pods without running containers. This would create a race as the pod // failed pods without running containers. This would create a race as the pod
// might have already been deleted here. // might have already been deleted here.
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod) ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
if ev != nil && ev.Reason == sysctl.UnsupportedReason { if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12") framework.Skipf("No sysctl support in Docker <1.12")
} }
@ -128,16 +128,16 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
By("Waiting for pod completion") By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name) err = f.WaitForPodNoLongerRunning(pod.Name)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Checking that the pod succeeded") By("Checking that the pod succeeded")
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded)) Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
By("Getting logs from the pod") By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
By("Checking that the sysctl is actually updated") By("Checking that the sysctl is actually updated")
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1")) Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
@ -197,7 +197,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
// failed pods without running containers. This would create a race as the pod // failed pods without running containers. This would create a race as the pod
// might have already been deleted here. // might have already been deleted here.
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod) ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
Expect(err).NotTo(HaveOccurred()) framework.ExpectNoError(err)
if ev != nil && ev.Reason == sysctl.UnsupportedReason { if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12") framework.Skipf("No sysctl support in Docker <1.12")
} }