Use framework.ExpectNoError() for e2e/node

This commit is contained in:
s-ito-ts 2019-05-15 05:31:38 +00:00
parent b066e0d783
commit f2a7650ede
6 changed files with 44 additions and 43 deletions

View File

@ -166,13 +166,13 @@ func createPodUsingNfs(f *framework.Framework, c clientset.Interface, ns, nfsIP,
}, },
} }
rtnPod, err := c.CoreV1().Pods(ns).Create(pod) rtnPod, err := c.CoreV1().Pods(ns).Create(pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
err = f.WaitForPodReady(rtnPod.Name) // running & ready err = f.WaitForPodReady(rtnPod.Name) // running & ready
gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
rtnPod, err = c.CoreV1().Pods(ns).Get(rtnPod.Name, metav1.GetOptions{}) // return fresh pod rtnPod, err = c.CoreV1().Pods(ns).Get(rtnPod.Name, metav1.GetOptions{}) // return fresh pod
gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
return rtnPod return rtnPod
} }
@ -189,7 +189,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs") mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs")
// use ip rather than hostname in GCE // use ip rather than hostname in GCE
nodeIP, err := framework.GetHostExternalAddress(c, pod) nodeIP, err := framework.GetHostExternalAddress(c, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
condMsg := "deleted" condMsg := "deleted"
if !expectClean { if !expectClean {
@ -216,7 +216,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
e2elog.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg) e2elog.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
err = wait.Poll(poll, timeout, func() (bool, error) { err = wait.Poll(poll, timeout, func() (bool, error) {
result, err := e2essh.NodeExec(nodeIP, test.cmd, framework.TestContext.Provider) result, err := e2essh.NodeExec(nodeIP, test.cmd, framework.TestContext.Provider)
gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
e2essh.LogResult(result) e2essh.LogResult(result)
ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0) ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0)
if expectClean && ok { // keep trying if expectClean && ok { // keep trying
@ -227,7 +227,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
} }
return true, nil // done, host is as expected return true, nil // done, host is as expected
}) })
gomega.Expect(err).NotTo(gomega.HaveOccurred(), fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg)) framework.ExpectNoError(err, fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg))
} }
if expectClean { if expectClean {
@ -317,20 +317,21 @@ var _ = SIGDescribe("kubelet", func() {
ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID())) rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))
gomega.Expect(framework.RunRC(testutils.RCConfig{ err := framework.RunRC(testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: rcName, Name: rcName,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Image: imageutils.GetPauseImageName(), Image: imageutils.GetPauseImageName(),
Replicas: totalPods, Replicas: totalPods,
NodeSelector: nodeLabels, NodeSelector: nodeLabels,
})).NotTo(gomega.HaveOccurred()) })
framework.ExpectNoError(err)
// Perform a sanity check so that we know all desired pods are // Perform a sanity check so that we know all desired pods are
// running on the nodes according to kubelet. The timeout is set to // running on the nodes according to kubelet. The timeout is set to
// only 30 seconds here because framework.RunRC already waited for all pods to // only 30 seconds here because framework.RunRC already waited for all pods to
// transition to the running status. // transition to the running status.
gomega.Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods, err = waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods, time.Second*30)
time.Second*30)).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
if resourceMonitor != nil { if resourceMonitor != nil {
resourceMonitor.LogLatest() resourceMonitor.LogLatest()
} }
@ -345,8 +346,8 @@ var _ = SIGDescribe("kubelet", func() {
// - a bug in graceful termination (if it is enabled) // - a bug in graceful termination (if it is enabled)
// - docker slow to delete pods (or resource problems causing slowness) // - docker slow to delete pods (or resource problems causing slowness)
start := time.Now() start := time.Now()
gomega.Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0, err = waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0, itArg.timeout)
itArg.timeout)).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames), e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
time.Since(start)) time.Since(start))
if resourceMonitor != nil { if resourceMonitor != nil {
@ -396,9 +397,9 @@ var _ = SIGDescribe("kubelet", func() {
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
err := framework.DeletePodWithWait(f, c, pod) err := framework.DeletePodWithWait(f, c, pod)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "AfterEach: Failed to delete client pod ", pod.Name) framework.ExpectNoError(err, "AfterEach: Failed to delete client pod ", pod.Name)
err = framework.DeletePodWithWait(f, c, nfsServerPod) err = framework.DeletePodWithWait(f, c, nfsServerPod)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "AfterEach: Failed to delete server pod ", nfsServerPod.Name) framework.ExpectNoError(err, "AfterEach: Failed to delete server pod ", nfsServerPod.Name)
}) })
// execute It blocks from above table of tests // execute It blocks from above table of tests

View File

@ -31,7 +31,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega"
) )
const ( const (
@ -70,13 +69,14 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID())) rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID()))
// TODO: Use a more realistic workload // TODO: Use a more realistic workload
gomega.Expect(framework.RunRC(testutils.RCConfig{ err := framework.RunRC(testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: rcName, Name: rcName,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Image: imageutils.GetPauseImageName(), Image: imageutils.GetPauseImageName(),
Replicas: totalPods, Replicas: totalPods,
})).NotTo(gomega.HaveOccurred()) })
framework.ExpectNoError(err)
// Log once and flush the stats. // Log once and flush the stats.
rm.LogLatest() rm.LogLatest()
@ -103,7 +103,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
ginkgo.By("Reporting overall resource usage") ginkgo.By("Reporting overall resource usage")
logPodsOnNodes(f.ClientSet, nodeNames.List()) logPodsOnNodes(f.ClientSet, nodeNames.List())
usageSummary, err := rm.GetLatest() usageSummary, err := rm.GetLatest()
gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
// TODO(random-liu): Remove the original log when we migrate to new perfdash // TODO(random-liu): Remove the original log when we migrate to new perfdash
e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary)) e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary))
// Log perf result // Log perf result

View File

@ -79,7 +79,7 @@ var _ = SIGDescribe("Pods Extended", func() {
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options) pods, err := podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") framework.ExpectNoError(err, "failed to query for pod")
gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
options = metav1.ListOptions{ options = metav1.ListOptions{
LabelSelector: selector.String(), LabelSelector: selector.String(),
@ -93,7 +93,7 @@ var _ = SIGDescribe("Pods Extended", func() {
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()} options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options) pods, err = podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") framework.ExpectNoError(err, "failed to query for pod")
gomega.Expect(len(pods.Items)).To(gomega.Equal(1)) gomega.Expect(len(pods.Items)).To(gomega.Equal(1))
// We need to wait for the pod to be running, otherwise the deletion // We need to wait for the pod to be running, otherwise the deletion
@ -101,25 +101,25 @@ var _ = SIGDescribe("Pods Extended", func() {
framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
// save the running pod // save the running pod
pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod") framework.ExpectNoError(err, "failed to GET scheduled pod")
// start local proxy, so we can send graceful deletion over query string, rather than body parameter // start local proxy, so we can send graceful deletion over query string, rather than body parameter
cmd := framework.KubectlCmd("proxy", "-p", "0") cmd := framework.KubectlCmd("proxy", "-p", "0")
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd) stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to start up proxy") framework.ExpectNoError(err, "failed to start up proxy")
defer stdout.Close() defer stdout.Close()
defer stderr.Close() defer stderr.Close()
defer framework.TryKill(cmd) defer framework.TryKill(cmd)
buf := make([]byte, 128) buf := make([]byte, 128)
var n int var n int
n, err = stdout.Read(buf) n, err = stdout.Read(buf)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to read from kubectl proxy stdout") framework.ExpectNoError(err, "failed to read from kubectl proxy stdout")
output := string(buf[:n]) output := string(buf[:n])
proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)") proxyRegexp := regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
match := proxyRegexp.FindStringSubmatch(output) match := proxyRegexp.FindStringSubmatch(output)
gomega.Expect(len(match)).To(gomega.Equal(2)) gomega.Expect(len(match)).To(gomega.Equal(2))
port, err := strconv.Atoi(match[1]) port, err := strconv.Atoi(match[1])
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to convert port into string") framework.ExpectNoError(err, "failed to convert port into string")
endpoint := fmt.Sprintf("http://localhost:%d/api/v1/namespaces/%s/pods/%s?gracePeriodSeconds=30", port, pod.Namespace, pod.Name) endpoint := fmt.Sprintf("http://localhost:%d/api/v1/namespaces/%s/pods/%s?gracePeriodSeconds=30", port, pod.Namespace, pod.Name)
tr := &http.Transport{ tr := &http.Transport{
@ -127,21 +127,21 @@ var _ = SIGDescribe("Pods Extended", func() {
} }
client := &http.Client{Transport: tr} client := &http.Client{Transport: tr}
req, err := http.NewRequest("DELETE", endpoint, nil) req, err := http.NewRequest("DELETE", endpoint, nil)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create http request") framework.ExpectNoError(err, "failed to create http request")
ginkgo.By("deleting the pod gracefully") ginkgo.By("deleting the pod gracefully")
rsp, err := client.Do(req) rsp, err := client.Do(req)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to use http client to send delete") framework.ExpectNoError(err, "failed to use http client to send delete")
gomega.Expect(rsp.StatusCode).Should(gomega.Equal(http.StatusOK), "failed to delete gracefully by client request") gomega.Expect(rsp.StatusCode).Should(gomega.Equal(http.StatusOK), "failed to delete gracefully by client request")
var lastPod v1.Pod var lastPod v1.Pod
err = json.NewDecoder(rsp.Body).Decode(&lastPod) err = json.NewDecoder(rsp.Body).Decode(&lastPod)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to decode graceful termination proxy response") framework.ExpectNoError(err, "failed to decode graceful termination proxy response")
defer rsp.Body.Close() defer rsp.Body.Close()
ginkgo.By("verifying the kubelet observed the termination notice") ginkgo.By("verifying the kubelet observed the termination notice")
gomega.Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) { err = wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName) podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
if err != nil { if err != nil {
e2elog.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err) e2elog.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
@ -159,7 +159,8 @@ var _ = SIGDescribe("Pods Extended", func() {
} }
e2elog.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed") e2elog.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
return true, nil return true, nil
})).NotTo(gomega.HaveOccurred(), "kubelet never observed the termination notice") })
framework.ExpectNoError(err, "kubelet never observed the termination notice")
gomega.Expect(lastPod.DeletionTimestamp).ToNot(gomega.BeNil()) gomega.Expect(lastPod.DeletionTimestamp).ToNot(gomega.BeNil())
gomega.Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(gomega.BeZero()) gomega.Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(gomega.BeZero())
@ -167,7 +168,7 @@ var _ = SIGDescribe("Pods Extended", func() {
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()} options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options) pods, err = podClient.List(options)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pods") framework.ExpectNoError(err, "failed to query for pods")
gomega.Expect(len(pods.Items)).To(gomega.Equal(0)) gomega.Expect(len(pods.Items)).To(gomega.Equal(0))
}) })
@ -218,7 +219,7 @@ var _ = SIGDescribe("Pods Extended", func() {
ginkgo.By("verifying QOS class is set on the pod") ginkgo.By("verifying QOS class is set on the pod")
pod, err := podClient.Get(name, metav1.GetOptions{}) pod, err := podClient.Get(name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to query for pod") framework.ExpectNoError(err, "failed to query for pod")
gomega.Expect(pod.Status.QOSClass == v1.PodQOSGuaranteed) gomega.Expect(pod.Status.QOSClass == v1.PodQOSGuaranteed)
}) })
}) })

View File

@ -33,7 +33,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega"
) )
// State partially cloned from webserver.go // State partially cloned from webserver.go
@ -192,11 +191,11 @@ var _ = SIGDescribe("PreStop", func() {
var err error var err error
pod, err = podClient.Get(pod.Name, metav1.GetOptions{}) pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to GET scheduled pod") framework.ExpectNoError(err, "failed to GET scheduled pod")
ginkgo.By("deleting the pod gracefully") ginkgo.By("deleting the pod gracefully")
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds)) err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracefulTerminationPeriodSeconds))
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod") framework.ExpectNoError(err, "failed to delete pod")
//wait up to graceful termination period seconds //wait up to graceful termination period seconds
time.Sleep(30 * time.Second) time.Sleep(30 * time.Second)
@ -205,9 +204,9 @@ var _ = SIGDescribe("PreStop", func() {
result := &v1.PodList{} result := &v1.PodList{}
err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) { err = wait.Poll(time.Second*5, time.Second*60, func() (bool, error) {
client, err := framework.NodeProxyRequest(f.ClientSet, pod.Spec.NodeName, "pods", ports.KubeletPort) client, err := framework.NodeProxyRequest(f.ClientSet, pod.Spec.NodeName, "pods", ports.KubeletPort)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get the pods of the node") framework.ExpectNoError(err, "failed to get the pods of the node")
err = client.Into(result) err = client.Into(result)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to parse the pods of the node") framework.ExpectNoError(err, "failed to parse the pods of the node")
for _, kubeletPod := range result.Items { for _, kubeletPod := range result.Items {
if pod.Name != kubeletPod.Name { if pod.Name != kubeletPod.Name {

View File

@ -218,7 +218,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
gomega.Expect(content).To(gomega.ContainSubstring(testContent)) gomega.Expect(content).To(gomega.ContainSubstring(testContent))
foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
// Confirm that the file can be accessed from a second // Confirm that the file can be accessed from a second
// pod using host_path with the same MCS label // pod using host_path with the same MCS label

View File

@ -50,11 +50,11 @@ func cleanupJob(f *framework.Framework, job *batch.Job) {
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil) j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
} }
_, err := jobutil.UpdateJobWithRetries(c, ns, job.Name, removeFinalizerFunc) _, err := jobutil.UpdateJobWithRetries(c, ns, job.Name, removeFinalizerFunc)
gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
jobutil.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout) jobutil.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout)
err = jobutil.WaitForAllJobPodsGone(c, ns, job.Name) err = jobutil.WaitForAllJobPodsGone(c, ns, job.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
} }
func testFinishedJob(f *framework.Framework) { func testFinishedJob(f *framework.Framework) {
@ -73,19 +73,19 @@ func testFinishedJob(f *framework.Framework) {
e2elog.Logf("Create a Job %s/%s with TTL", ns, job.Name) e2elog.Logf("Create a Job %s/%s with TTL", ns, job.Name)
job, err := jobutil.CreateJob(c, ns, job) job, err := jobutil.CreateJob(c, ns, job)
gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
e2elog.Logf("Wait for the Job to finish") e2elog.Logf("Wait for the Job to finish")
err = jobutil.WaitForJobFinish(c, ns, job.Name) err = jobutil.WaitForJobFinish(c, ns, job.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
e2elog.Logf("Wait for TTL after finished controller to delete the Job") e2elog.Logf("Wait for TTL after finished controller to delete the Job")
err = jobutil.WaitForJobDeleting(c, ns, job.Name) err = jobutil.WaitForJobDeleting(c, ns, job.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
e2elog.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished") e2elog.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
job, err = jobutil.GetJob(c, ns, job.Name) job, err = jobutil.GetJob(c, ns, job.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred()) framework.ExpectNoError(err)
finishTime := jobutil.FinishTime(job) finishTime := jobutil.FinishTime(job)
finishTimeUTC := finishTime.UTC() finishTimeUTC := finishTime.UTC()
gomega.Expect(finishTime.IsZero()).NotTo(gomega.BeTrue()) gomega.Expect(finishTime.IsZero()).NotTo(gomega.BeTrue())