Merge pull request #77692 from oomichi/use-ExpectNoError-common

Use framework.ExpectNoError() for e2e/common
This commit is contained in:
Kubernetes Prow Robot 2019-05-16 01:10:34 -07:00 committed by GitHub
commit b7bc5dc9db
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 27 additions and 26 deletions

View File

@ -67,7 +67,7 @@ func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
t := time.Now()
for {
p, err := podClient.Get(pod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get pod %q", pod.Name)
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
if p.Status.HostIP != "" {
e2elog.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
break
@ -218,7 +218,7 @@ var _ = framework.KubeDescribe("Pods", func() {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
framework.ExpectNoError(err, "failed to query for pods")
gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
options = metav1.ListOptions{
LabelSelector: selector.String(),
@ -256,7 +256,7 @@ var _ = framework.KubeDescribe("Pods", func() {
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
framework.ExpectNoError(err, "failed to query for pods")
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
ginkgo.By("verifying pod creation was observed")
@ -279,11 +279,11 @@ var _ = framework.KubeDescribe("Pods", func() {
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
// save the running pod
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod")
framework.ExpectNoError(err, "failed to GET scheduled pod")
ginkgo.By("deleting the pod gracefully")
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(30))
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod")
framework.ExpectNoError(err, "failed to delete pod")
ginkgo.By("verifying the kubelet observed the termination notice")
gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
@ -335,7 +335,7 @@ var _ = framework.KubeDescribe("Pods", func() {
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
framework.ExpectNoError(err, "failed to query for pods")
gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
})
@ -373,7 +373,7 @@ var _ = framework.KubeDescribe("Pods", func() {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
framework.ExpectNoError(err, "failed to query for pods")
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
ginkgo.By("updating the pod")
@ -388,7 +388,7 @@ var _ = framework.KubeDescribe("Pods", func() {
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
framework.ExpectNoError(err, "failed to query for pods")
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
e2elog.Logf("Pod update OK")
})
@ -427,7 +427,7 @@ var _ = framework.KubeDescribe("Pods", func() {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods")
framework.ExpectNoError(err, "failed to query for pods")
gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
ginkgo.By("updating the pod")
@ -491,7 +491,7 @@ var _ = framework.KubeDescribe("Pods", func() {
},
}
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create service")
framework.ExpectNoError(err, "failed to create service")
// Make a client pod that verifies that it has the service environment variables.
podName := "client-envvars-" + string(uuid.NewUUID())
@ -538,7 +538,7 @@ var _ = framework.KubeDescribe("Pods", func() {
*/
framework.ConformanceIt("should support remote command execution over websockets [NodeConformance]", func() {
config, err := framework.LoadConfig()
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config")
framework.ExpectNoError(err, "unable to get base config")
ginkgo.By("creating the pod")
name := "pod-exec-websocket-" + string(uuid.NewUUID())
@ -620,7 +620,7 @@ var _ = framework.KubeDescribe("Pods", func() {
*/
framework.ConformanceIt("should support retrieving logs from the container over websockets [NodeConformance]", func() {
config, err := framework.LoadConfig()
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unable to get base config")
framework.ExpectNoError(err, "unable to get base config")
ginkgo.By("creating the pod")
name := "pod-logs-websocket-" + string(uuid.NewUUID())
@ -798,14 +798,15 @@ var _ = framework.KubeDescribe("Pods", func() {
}
validatePodReadiness := func(expectReady bool) {
gomega.Expect(wait.Poll(time.Second, maxReadyStatusUpdateTolerance, func() (bool, error) {
err := wait.Poll(time.Second, maxReadyStatusUpdateTolerance, func() (bool, error) {
podReady := podClient.PodIsReady(podName)
res := expectReady == podReady
if !res {
e2elog.Logf("Expect the Ready condition of pod %q to be %v, but got %v", podName, expectReady, podReady)
}
return res, nil
})).NotTo(gomega.HaveOccurred())
})
framework.ExpectNoError(err)
}
ginkgo.By("submitting the pod to kubernetes")

View File

@ -114,7 +114,7 @@ while true; do sleep 1; done
Expect(terminateContainer.IsReady()).Should(Equal(testCase.Ready))
status, err := terminateContainer.GetStatus()
Expect(err).ShouldNot(HaveOccurred())
framework.ExpectNoError(err)
By(fmt.Sprintf("Container '%s': should get the expected 'State'", testContainer.Name))
Expect(GetContainerState(status.State)).To(Equal(testCase.State))
@ -148,7 +148,7 @@ while true; do sleep 1; done
By("get the container status")
status, err := c.GetStatus()
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
By("the container should be terminated")
Expect(GetContainerState(status.State)).To(Equal(ContainerStateTerminated))
@ -286,7 +286,7 @@ while true; do sleep 1; done
secret.Name = "image-pull-secret-" + string(uuid.NewUUID())
By("create image pull secret")
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil)
container.ImagePullSecrets = []string{secret.Name}
}

View File

@ -77,7 +77,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
@ -85,16 +85,16 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
By("Checking that the pod succeeded")
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
By("Checking that the sysctl is actually updated")
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
@ -120,7 +120,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
@ -128,16 +128,16 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
By("Checking that the pod succeeded")
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
By("Checking that the sysctl is actually updated")
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
@ -197,7 +197,7 @@ var _ = framework.KubeDescribe("Sysctls [NodeFeature:Sysctls]", func() {
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}