Merge pull request #77123 from k-toyoda-pi/use_expect_no_error_e2e_apps_1

use framework.ExpectNoError() for e2e/apps
This commit is contained in:
Kubernetes Prow Robot 2019-05-03 12:35:38 -07:00 committed by GitHub
commit 217e08d735
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 139 additions and 142 deletions

View File

@ -61,21 +61,21 @@ var _ = SIGDescribe("CronJob", func() {
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
sleepCommand, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring more than one job is running at a time")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
ginkgo.By("Ensuring at least two running jobs exists by listing jobs explicitly")
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
activeJobs, _ := filterActiveJobs(jobs)
gomega.Expect(len(activeJobs) >= 2).To(gomega.BeTrue())
ginkgo.By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
})
// suspended should not schedule jobs
@ -86,7 +86,7 @@ var _ = SIGDescribe("CronJob", func() {
t := true
cronJob.Spec.Suspend = &t
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring no jobs are scheduled")
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, false)
@ -94,12 +94,12 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("Ensuring no job exists by listing jobs explicitly")
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
gomega.Expect(jobs.Items).To(gomega.HaveLen(0))
ginkgo.By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
})
// only single active job is allowed for ForbidConcurrent
@ -108,20 +108,20 @@ var _ = SIGDescribe("CronJob", func() {
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
sleepCommand, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring a job is scheduled")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to schedule CronJob %s", cronJob.Name)
framework.ExpectNoError(err, "Failed to schedule CronJob %s", cronJob.Name)
ginkgo.By("Ensuring exactly one is scheduled")
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get CronJob %s", cronJob.Name)
framework.ExpectNoError(err, "Failed to get CronJob %s", cronJob.Name)
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly")
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
activeJobs, _ := filterActiveJobs(jobs)
gomega.Expect(activeJobs).To(gomega.HaveLen(1))
@ -131,7 +131,7 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
})
// only single active job is allowed for ReplaceConcurrent
@ -140,30 +140,30 @@ var _ = SIGDescribe("CronJob", func() {
cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1beta1.ReplaceConcurrent,
sleepCommand, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring a job is scheduled")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to schedule CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to schedule CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
ginkgo.By("Ensuring exactly one is scheduled")
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get CronJob %s", cronJob.Name)
framework.ExpectNoError(err, "Failed to get CronJob %s", cronJob.Name)
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly")
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the jobs in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to list the jobs in namespace %s", f.Namespace.Name)
activeJobs, _ := filterActiveJobs(jobs)
gomega.Expect(activeJobs).To(gomega.HaveLen(1))
ginkgo.By("Ensuring the job is replaced with a new one")
err = waitForJobReplaced(f.ClientSet, f.Namespace.Name, jobs.Items[0].Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to replace CronJob %s in namespace %s", jobs.Items[0].Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to replace CronJob %s in namespace %s", jobs.Items[0].Name, f.Namespace.Name)
ginkgo.By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
})
// shouldn't give us unexpected warnings
@ -172,13 +172,13 @@ var _ = SIGDescribe("CronJob", func() {
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
nil, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly")
err = waitForJobsAtLeast(f.ClientSet, f.Namespace.Name, 2)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure at least two job exists in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to ensure at least two job exists in namespace %s", f.Namespace.Name)
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure at least on finished job exists in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to ensure at least on finished job exists in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring no unexpected event has happened")
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
@ -186,7 +186,7 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
})
// deleted jobs should be removed from the active list
@ -195,15 +195,15 @@ var _ = SIGDescribe("CronJob", func() {
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
sleepCommand, nil)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring a job is scheduled")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure a %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to ensure a %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
ginkgo.By("Ensuring exactly one is scheduled")
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure exactly one %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to ensure exactly one %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
ginkgo.By("Deleting the job")
@ -217,15 +217,15 @@ var _ = SIGDescribe("CronJob", func() {
ginkgo.By("Ensuring the job is not in the cronjob active list")
err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure the %s cronjob is not in active list in namespace %s", cronJob.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to ensure the %s cronjob is not in active list in namespace %s", cronJob.Name, f.Namespace.Name)
ginkgo.By("Ensuring MissingJob event has occurred")
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure missing job event has occurred for %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to ensure missing job event has occurred for %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
ginkgo.By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to remove %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to remove %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
})
// cleanup of successful finished jobs, with limit of one successful job
@ -235,37 +235,37 @@ var _ = SIGDescribe("CronJob", func() {
cronJob := newTestCronJob("concurrent-limit", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
successCommand, &successLimit)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create allowconcurrent cronjob with custom history limits in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to create allowconcurrent cronjob with custom history limits in namespace %s", f.Namespace.Name)
// Job is going to complete instantly: do not check for an active job
// as we are most likely to miss it
ginkgo.By("Ensuring a finished job exists")
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure a finished cronjob exists in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring a finished job exists by listing jobs explicitly")
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure a finished cronjob exists by listing jobs explicitly in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists by listing jobs explicitly in namespace %s", f.Namespace.Name)
_, finishedJobs := filterActiveJobs(jobs)
gomega.Expect(len(finishedJobs) == 1).To(gomega.BeTrue())
// Job should get deleted when the next job finishes the next minute
ginkgo.By("Ensuring this job and its pods does not exist anymore")
err = waitForJobToDisappear(f.ClientSet, f.Namespace.Name, finishedJobs[0])
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure that job does not exists anymore in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to ensure that job does not exists anymore in namespace %s", f.Namespace.Name)
err = waitForJobsPodToDisappear(f.ClientSet, f.Namespace.Name, finishedJobs[0])
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure that pods for job does not exists anymore in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to ensure that pods for job does not exists anymore in namespace %s", f.Namespace.Name)
ginkgo.By("Ensuring there is 1 finished job by listing jobs explicitly")
jobs, err = f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure there is one finished job by listing job explicitly in namespace %s", f.Namespace.Name)
framework.ExpectNoError(err, "Failed to ensure there is one finished job by listing job explicitly in namespace %s", f.Namespace.Name)
_, finishedJobs = filterActiveJobs(jobs)
gomega.Expect(len(finishedJobs) == 1).To(gomega.BeTrue())
ginkgo.By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to remove the %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to remove the %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
})
})

View File

@ -37,7 +37,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
// This test primarily checks 2 things:
@ -115,7 +114,7 @@ func (r *RestartDaemonConfig) waitUp() {
func (r *RestartDaemonConfig) kill() {
framework.Logf("Killing %v", r)
_, err := framework.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName))
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
// Restart checks if the daemon is up, kills it, and waits till it comes back up
@ -208,7 +207,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
Replicas: numPods,
CreatedPods: &[]*v1.Pod{},
}
gomega.Expect(framework.RunRC(config)).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(framework.RunRC(config))
replacePods(*config.CreatedPods, existingPods)
stopCh = make(chan struct{})

View File

@ -72,7 +72,7 @@ var _ = SIGDescribe("DisruptionController", func() {
}
return pdb.Status.PodDisruptionsAllowed > 0, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
evictionCases := []struct {
@ -179,7 +179,7 @@ var _ = SIGDescribe("DisruptionController", func() {
return false, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
e := &policy.Eviction{
ObjectMeta: metav1.ObjectMeta{
@ -210,7 +210,7 @@ var _ = SIGDescribe("DisruptionController", func() {
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
})
}
@ -228,7 +228,7 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable
},
}
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavailable intstr.IntOrString) {
@ -243,7 +243,7 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavail
},
}
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {

View File

@ -42,11 +42,11 @@ var _ = SIGDescribe("Job", func() {
ginkgo.By("Creating a job")
job := jobutil.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job reaches completions")
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name)
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
})
// Pods sometimes fail, but eventually succeed.
@ -61,11 +61,11 @@ var _ = SIGDescribe("Job", func() {
// test timeout.
job := jobutil.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions, nil, backoffLimit)
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job reaches completions")
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name)
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
})
// Pods sometimes fail, but eventually succeed, after pod restarts
@ -82,11 +82,11 @@ var _ = SIGDescribe("Job", func() {
// test less flaky, for now.
job := jobutil.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, 3, nil, 999)
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job reaches completions")
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name)
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
})
ginkgo.It("should exceed active deadline", func() {
@ -94,10 +94,10 @@ var _ = SIGDescribe("Job", func() {
var activeDeadlineSeconds int64 = 1
job := jobutil.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit)
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job past active deadline")
err = jobutil.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+10)*time.Second, "DeadlineExceeded")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name)
framework.ExpectNoError(err, "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name)
})
/*
@ -109,11 +109,11 @@ var _ = SIGDescribe("Job", func() {
ginkgo.By("Creating a job")
job := jobutil.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring active pods == parallelism")
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
ginkgo.By("delete a job")
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
@ -131,16 +131,16 @@ var _ = SIGDescribe("Job", func() {
// Save Kind since it won't be populated in the returned job.
kind := job.Kind
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
job.Kind = kind
ginkgo.By("Ensuring active pods == parallelism")
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
ginkgo.By("Orphaning one of the Job's Pods")
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism)))
pod := pods.Items[0]
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
@ -183,15 +183,15 @@ var _ = SIGDescribe("Job", func() {
backoff := 1
job := jobutil.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff))
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
ginkgo.By("Ensuring job exceed backofflimit")
err = jobutil.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, jobutil.JobTimeout, "BackoffLimitExceeded")
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name)
framework.ExpectNoError(err, "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
// gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1))
// due to NumRequeus not being stable enough, especially with failed status
// updates we need to allow more than backoff+1

View File

@ -112,7 +112,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
c = f.ClientSet
ns = f.Namespace.Name
_, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
framework.SkipUnlessProviderIs("gke", "aws")
@ -139,7 +139,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
var podOpts metav1.ListOptions
nodeOpts := metav1.ListOptions{}
nodes, err := c.CoreV1().Nodes().List(nodeOpts)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.FilterNodes(nodes, func(node v1.Node) bool {
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
return false
@ -243,17 +243,17 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
replicas := int32(numNodes)
common.NewRCByName(c, ns, name, replicas, nil)
err = framework.VerifyPods(c, ns, name, true, replicas)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Each pod should start running and responding")
framework.ExpectNoError(err, "Each pod should start running and responding")
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
nodeName := pods.Items[0].Spec.NodeName
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// This creates a temporary network partition, verifies that 'podNameToDisappear',
// that belongs to replication controller 'rcName', really disappeared (because its
@ -264,11 +264,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("verifying whether the pod from the unreachable node is recreated")
err = framework.VerifyPods(c, ns, name, true, replicas)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
@ -284,14 +284,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// since we have no guarantees the pod will be scheduled on our node.
additionalPod := "additionalpod"
err = newPodOnNode(c, ns, additionalPod, node.Name)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = framework.VerifyPods(c, ns, additionalPod, true, 1)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// verify that it is really on the requested node
{
pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
if pod.Spec.NodeName != node.Name {
framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
}
@ -310,17 +310,17 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
replicas := int32(numNodes)
common.NewRCByName(c, ns, name, replicas, &gracePeriod)
err = framework.VerifyPods(c, ns, name, true, replicas)
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Each pod should start running and responding")
framework.ExpectNoError(err, "Each pod should start running and responding")
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
nodeName := pods.Items[0].Spec.NodeName
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// This creates a temporary network partition, verifies that 'podNameToDisappear',
// that belongs to replication controller 'rcName', did not disappear (because its
@ -335,7 +335,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.By(fmt.Sprintf("verifying that there are %v running pods during partition", replicas))
_, err = framework.PodsCreated(c, ns, name, replicas)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
@ -376,7 +376,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
pst := framework.NewStatefulSetTester(c)
@ -393,7 +393,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() {
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
pst := framework.NewStatefulSetTester(c)
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
@ -430,21 +430,21 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
job := jobutil.NewTestJob("notTerminate", "network-partition", v1.RestartPolicyNever,
parallelism, completions, nil, backoffLimit)
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
label := labels.SelectorFromSet(labels.Set(map[string]string{jobutil.JobSelectorKey: job.Name}))
ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
nodeName := pods.Items[0].Spec.NodeName
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// This creates a temporary network partition, verifies that the job has 'parallelism' number of
// running pods after the node-controller detects node unreachable.
@ -456,7 +456,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)

View File

@ -32,7 +32,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = SIGDescribe("ReplicationController", func() {
@ -125,12 +124,12 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image)
newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(newRC)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Check that pods for the new RC were created.
// TODO: Maybe switch PodsCreated to just check owner references.
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
@ -149,14 +148,14 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
}
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
running++
}
// Sanity check
if running != replicas {
gomega.Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(fmt.Errorf("unexpected number of running pods: %+v", pods.Items))
}
// Verify that something is listening.
@ -182,7 +181,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
quota := newPodQuota(name, "2")
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
@ -196,12 +195,12 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("resource quota %q never synced", name)
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
rc := newRC(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
rc, err = c.CoreV1().ReplicationControllers(namespace).Create(rc)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name))
generation := rc.Generation
@ -223,14 +222,14 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("rc manager never added the failure condition for rc %q: %#v", name, conditions)
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name))
rc, err = framework.UpdateReplicationControllerWithRetries(c, namespace, name, func(update *v1.ReplicationController) {
x := int32(2)
update.Spec.Replicas = &x
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Checking rc %q has no failure condition set", name))
generation = rc.Generation
@ -252,7 +251,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("rc manager never removed the failure condition for rc %q: %#v", name, conditions)
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
func testRCAdoptMatchingOrphans(f *framework.Framework) {
@ -280,7 +279,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImage)
rcSt.Spec.Selector = map[string]string{"name": name}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Then the orphan pod is adopted")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
@ -289,7 +288,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
if errors.IsNotFound(err) {
return true, nil
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
for _, owner := range p2.OwnerReferences {
if *owner.Controller && owner.UID == rc.UID {
// pod adopted
@ -299,7 +298,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
// pod still not adopted
return false, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
func testRCReleaseControlledNotMatching(f *framework.Framework) {
@ -309,16 +308,16 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImage)
rcSt.Spec.Selector = map[string]string{"name": name}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("When the matched label of one of its pods change")
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rc.Name, replicas)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
p := pods.Items[0]
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
pod.Labels = map[string]string{"name": "not-matching-name"}
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
@ -330,12 +329,12 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Then the pod is released")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
for _, owner := range p2.OwnerReferences {
if *owner.Controller && owner.UID == rc.UID {
// pod still belonging to the replication controller
@ -345,5 +344,5 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
// pod already released
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}

View File

@ -33,7 +33,6 @@ import (
replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -127,12 +126,12 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image)
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Check that pods for the new RS were created.
// TODO: Maybe switch PodsCreated to just check owner references.
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
@ -151,14 +150,14 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
}
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
running++
}
// Sanity check
if running != replicas {
gomega.Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(fmt.Errorf("unexpected number of running pods: %+v", pods.Items))
}
// Verify that something is listening.
@ -184,7 +183,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
ginkgo.By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name))
quota := newPodQuota(name, "2")
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
@ -198,12 +197,12 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("resource quota %q never synced", name)
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
rs := newRS(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
rs, err = c.AppsV1().ReplicaSets(namespace).Create(rs)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name))
generation := rs.Generation
@ -226,14 +225,14 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("rs controller never added the failure condition for replica set %q: %#v", name, conditions)
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
rs, err = replicasetutil.UpdateReplicaSetWithRetries(c, namespace, name, func(update *apps.ReplicaSet) {
x := int32(2)
update.Spec.Replicas = &x
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Checking replica set %q has no failure condition set", name))
generation = rs.Generation
@ -255,7 +254,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("rs controller never removed the failure condition for rs %q: %#v", name, conditions)
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}
func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
@ -283,7 +282,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImage)
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(rsSt)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Then the orphan pod is adopted")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
@ -292,7 +291,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
if errors.IsNotFound(err) {
return true, nil
}
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
for _, owner := range p2.OwnerReferences {
if *owner.Controller && owner.UID == rs.UID {
// pod adopted
@ -302,16 +301,16 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
// pod still not adopted
return false, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("When the matched label of one of its pods change")
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rs.Name, replicas)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
p = &pods.Items[0]
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
pod.Labels = map[string]string{"name": "not-matching-name"}
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
@ -323,12 +322,12 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
}
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Then the pod is released")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
for _, owner := range p2.OwnerReferences {
if *owner.Controller && owner.UID == rs.UID {
// pod still belonging to the replicaset
@ -338,5 +337,5 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
// pod already released
return true, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
}

View File

@ -79,7 +79,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
_, err := c.CoreV1().Services(ns).Create(headlessService)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
ginkgo.AfterEach(func() {
@ -100,7 +100,7 @@ var _ = SIGDescribe("StatefulSet", func() {
sst.PauseNewPods(ss)
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Saturating stateful set " + ss.Name)
sst.Saturate(ss)
@ -143,7 +143,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// Save Kind since it won't be populated in the returned ss.
kind := ss.Kind
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ss.Kind = kind
ginkgo.By("Saturating stateful set " + ss.Name)
@ -226,7 +226,7 @@ var _ = SIGDescribe("StatefulSet", func() {
sst.PauseNewPods(ss)
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
sst.WaitForRunning(1, 0, ss)
@ -294,7 +294,7 @@ var _ = SIGDescribe("StatefulSet", func() {
}(),
}
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
@ -318,7 +318,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Creating a new revision")
ss = sst.WaitForStatus(ss)
@ -365,7 +365,7 @@ var _ = SIGDescribe("StatefulSet", func() {
}(),
}
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ss, pods = sst.WaitForPartitionedRollingUpdate(ss)
for i := range pods.Items {
if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
@ -446,7 +446,7 @@ var _ = SIGDescribe("StatefulSet", func() {
}(),
}
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ss, pods = sst.WaitForPartitionedRollingUpdate(ss)
for i := range pods.Items {
if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
@ -498,7 +498,7 @@ var _ = SIGDescribe("StatefulSet", func() {
Type: apps.OnDeleteStatefulSetStrategyType,
}
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
@ -538,7 +538,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Creating a new revision")
ss = sst.WaitForStatus(ss)
@ -580,14 +580,14 @@ var _ = SIGDescribe("StatefulSet", func() {
watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
LabelSelector: psLabels.AsSelector().String(),
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
sst := framework.NewStatefulSetTester(c)
sst.SetHTTPProbe(ss)
ss, err = c.AppsV1().StatefulSets(ns).Create(ss)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
@ -618,13 +618,13 @@ var _ = SIGDescribe("StatefulSet", func() {
return len(expectedOrder) == 0, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Scale down will halt with unhealthy stateful pod")
watcher, err = f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
LabelSelector: psLabels.AsSelector().String(),
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
sst.BreakHTTPProbe(ss)
sst.WaitForStatusReadyReplicas(ss, 0)
@ -651,7 +651,7 @@ var _ = SIGDescribe("StatefulSet", func() {
return len(expectedOrder) == 0, nil
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
})
/*
@ -668,7 +668,7 @@ var _ = SIGDescribe("StatefulSet", func() {
sst := framework.NewStatefulSetTester(c)
sst.SetHTTPProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
@ -794,7 +794,7 @@ var _ = SIGDescribe("StatefulSet", func() {
sst := framework.NewStatefulSetTester(c)
sst.SetHTTPProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
@ -1089,7 +1089,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
sst := framework.NewStatefulSetTester(c)
sst.SetHTTPProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
@ -1107,7 +1107,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
}
sst.SortStatefulPods(pods)
err = sst.BreakPodHTTPProbe(ss, &pods.Items[1])
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
newImage := NewNginxImage
oldImage := ss.Spec.Template.Spec.Containers[0].Image
@ -1117,7 +1117,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = newImage
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("Creating a new revision")
ss = sst.WaitForStatus(ss)
@ -1129,7 +1129,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
pods = sst.GetPodList(ss)
sst.SortStatefulPods(pods)
err = sst.RestorePodHTTPProbe(ss, &pods.Items[1])
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
ss, pods = sst.WaitForRollingUpdate(ss)
gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(updateRevision),
@ -1155,14 +1155,14 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
ginkgo.By("Rolling back to a previous revision")
err = sst.BreakPodHTTPProbe(ss, &pods.Items[1])
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
priorRevision := currentRevision
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
update.Spec.Template.Spec.Containers[0].Image = oldImage
})
gomega.Expect(err).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(err)
ss = sst.WaitForStatus(ss)
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),