mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
use framework.ExpectNoError() for e2e/apps
This commit is contained in:
parent
1c5eed4233
commit
2c098b37e3
@ -61,21 +61,21 @@ var _ = SIGDescribe("CronJob", func() {
|
|||||||
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||||
sleepCommand, nil)
|
sleepCommand, nil)
|
||||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring more than one job is running at a time")
|
ginkgo.By("Ensuring more than one job is running at a time")
|
||||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
|
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring at least two running jobs exists by listing jobs explicitly")
|
ginkgo.By("Ensuring at least two running jobs exists by listing jobs explicitly")
|
||||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
||||||
activeJobs, _ := filterActiveJobs(jobs)
|
activeJobs, _ := filterActiveJobs(jobs)
|
||||||
gomega.Expect(len(activeJobs) >= 2).To(gomega.BeTrue())
|
gomega.Expect(len(activeJobs) >= 2).To(gomega.BeTrue())
|
||||||
|
|
||||||
ginkgo.By("Removing cronjob")
|
ginkgo.By("Removing cronjob")
|
||||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
// suspended should not schedule jobs
|
// suspended should not schedule jobs
|
||||||
@ -86,7 +86,7 @@ var _ = SIGDescribe("CronJob", func() {
|
|||||||
t := true
|
t := true
|
||||||
cronJob.Spec.Suspend = &t
|
cronJob.Spec.Suspend = &t
|
||||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring no jobs are scheduled")
|
ginkgo.By("Ensuring no jobs are scheduled")
|
||||||
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, false)
|
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, false)
|
||||||
@ -94,12 +94,12 @@ var _ = SIGDescribe("CronJob", func() {
|
|||||||
|
|
||||||
ginkgo.By("Ensuring no job exists by listing jobs explicitly")
|
ginkgo.By("Ensuring no job exists by listing jobs explicitly")
|
||||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
||||||
gomega.Expect(jobs.Items).To(gomega.HaveLen(0))
|
gomega.Expect(jobs.Items).To(gomega.HaveLen(0))
|
||||||
|
|
||||||
ginkgo.By("Removing cronjob")
|
ginkgo.By("Removing cronjob")
|
||||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
// only single active job is allowed for ForbidConcurrent
|
// only single active job is allowed for ForbidConcurrent
|
||||||
@ -108,20 +108,20 @@ var _ = SIGDescribe("CronJob", func() {
|
|||||||
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
|
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
|
||||||
sleepCommand, nil)
|
sleepCommand, nil)
|
||||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring a job is scheduled")
|
ginkgo.By("Ensuring a job is scheduled")
|
||||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to schedule CronJob %s", cronJob.Name)
|
framework.ExpectNoError(err, "Failed to schedule CronJob %s", cronJob.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring exactly one is scheduled")
|
ginkgo.By("Ensuring exactly one is scheduled")
|
||||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get CronJob %s", cronJob.Name)
|
framework.ExpectNoError(err, "Failed to get CronJob %s", cronJob.Name)
|
||||||
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
|
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
|
||||||
|
|
||||||
ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly")
|
ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly")
|
||||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
|
||||||
activeJobs, _ := filterActiveJobs(jobs)
|
activeJobs, _ := filterActiveJobs(jobs)
|
||||||
gomega.Expect(activeJobs).To(gomega.HaveLen(1))
|
gomega.Expect(activeJobs).To(gomega.HaveLen(1))
|
||||||
|
|
||||||
@ -131,7 +131,7 @@ var _ = SIGDescribe("CronJob", func() {
|
|||||||
|
|
||||||
ginkgo.By("Removing cronjob")
|
ginkgo.By("Removing cronjob")
|
||||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
// only single active job is allowed for ReplaceConcurrent
|
// only single active job is allowed for ReplaceConcurrent
|
||||||
@ -140,30 +140,30 @@ var _ = SIGDescribe("CronJob", func() {
|
|||||||
cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1beta1.ReplaceConcurrent,
|
cronJob := newTestCronJob("replace", "*/1 * * * ?", batchv1beta1.ReplaceConcurrent,
|
||||||
sleepCommand, nil)
|
sleepCommand, nil)
|
||||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring a job is scheduled")
|
ginkgo.By("Ensuring a job is scheduled")
|
||||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to schedule CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to schedule CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring exactly one is scheduled")
|
ginkgo.By("Ensuring exactly one is scheduled")
|
||||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get CronJob %s", cronJob.Name)
|
framework.ExpectNoError(err, "Failed to get CronJob %s", cronJob.Name)
|
||||||
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
|
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
|
||||||
|
|
||||||
ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly")
|
ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly")
|
||||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to list the jobs in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to list the jobs in namespace %s", f.Namespace.Name)
|
||||||
activeJobs, _ := filterActiveJobs(jobs)
|
activeJobs, _ := filterActiveJobs(jobs)
|
||||||
gomega.Expect(activeJobs).To(gomega.HaveLen(1))
|
gomega.Expect(activeJobs).To(gomega.HaveLen(1))
|
||||||
|
|
||||||
ginkgo.By("Ensuring the job is replaced with a new one")
|
ginkgo.By("Ensuring the job is replaced with a new one")
|
||||||
err = waitForJobReplaced(f.ClientSet, f.Namespace.Name, jobs.Items[0].Name)
|
err = waitForJobReplaced(f.ClientSet, f.Namespace.Name, jobs.Items[0].Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to replace CronJob %s in namespace %s", jobs.Items[0].Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to replace CronJob %s in namespace %s", jobs.Items[0].Name, f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Removing cronjob")
|
ginkgo.By("Removing cronjob")
|
||||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
// shouldn't give us unexpected warnings
|
// shouldn't give us unexpected warnings
|
||||||
@ -172,13 +172,13 @@ var _ = SIGDescribe("CronJob", func() {
|
|||||||
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||||
nil, nil)
|
nil, nil)
|
||||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly")
|
ginkgo.By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly")
|
||||||
err = waitForJobsAtLeast(f.ClientSet, f.Namespace.Name, 2)
|
err = waitForJobsAtLeast(f.ClientSet, f.Namespace.Name, 2)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure at least two job exists in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to ensure at least two job exists in namespace %s", f.Namespace.Name)
|
||||||
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
|
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure at least on finished job exists in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to ensure at least on finished job exists in namespace %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring no unexpected event has happened")
|
ginkgo.By("Ensuring no unexpected event has happened")
|
||||||
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
|
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
|
||||||
@ -186,7 +186,7 @@ var _ = SIGDescribe("CronJob", func() {
|
|||||||
|
|
||||||
ginkgo.By("Removing cronjob")
|
ginkgo.By("Removing cronjob")
|
||||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to delete CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
// deleted jobs should be removed from the active list
|
// deleted jobs should be removed from the active list
|
||||||
@ -195,15 +195,15 @@ var _ = SIGDescribe("CronJob", func() {
|
|||||||
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
|
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batchv1beta1.ForbidConcurrent,
|
||||||
sleepCommand, nil)
|
sleepCommand, nil)
|
||||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to create CronJob in namespace %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring a job is scheduled")
|
ginkgo.By("Ensuring a job is scheduled")
|
||||||
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure a %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to ensure a %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring exactly one is scheduled")
|
ginkgo.By("Ensuring exactly one is scheduled")
|
||||||
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure exactly one %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to ensure exactly one %s cronjob is scheduled in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||||
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
|
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
|
||||||
|
|
||||||
ginkgo.By("Deleting the job")
|
ginkgo.By("Deleting the job")
|
||||||
@ -217,15 +217,15 @@ var _ = SIGDescribe("CronJob", func() {
|
|||||||
|
|
||||||
ginkgo.By("Ensuring the job is not in the cronjob active list")
|
ginkgo.By("Ensuring the job is not in the cronjob active list")
|
||||||
err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
|
err = waitForJobNotActive(f.ClientSet, f.Namespace.Name, cronJob.Name, job.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure the %s cronjob is not in active list in namespace %s", cronJob.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to ensure the %s cronjob is not in active list in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring MissingJob event has occurred")
|
ginkgo.By("Ensuring MissingJob event has occurred")
|
||||||
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
|
err = waitForEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure missing job event has occurred for %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to ensure missing job event has occurred for %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Removing cronjob")
|
ginkgo.By("Removing cronjob")
|
||||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to remove %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to remove %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
// cleanup of successful finished jobs, with limit of one successful job
|
// cleanup of successful finished jobs, with limit of one successful job
|
||||||
@ -235,37 +235,37 @@ var _ = SIGDescribe("CronJob", func() {
|
|||||||
cronJob := newTestCronJob("concurrent-limit", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
cronJob := newTestCronJob("concurrent-limit", "*/1 * * * ?", batchv1beta1.AllowConcurrent,
|
||||||
successCommand, &successLimit)
|
successCommand, &successLimit)
|
||||||
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create allowconcurrent cronjob with custom history limits in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to create allowconcurrent cronjob with custom history limits in namespace %s", f.Namespace.Name)
|
||||||
|
|
||||||
// Job is going to complete instantly: do not check for an active job
|
// Job is going to complete instantly: do not check for an active job
|
||||||
// as we are most likely to miss it
|
// as we are most likely to miss it
|
||||||
|
|
||||||
ginkgo.By("Ensuring a finished job exists")
|
ginkgo.By("Ensuring a finished job exists")
|
||||||
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
|
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure a finished cronjob exists in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists in namespace %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring a finished job exists by listing jobs explicitly")
|
ginkgo.By("Ensuring a finished job exists by listing jobs explicitly")
|
||||||
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure a finished cronjob exists by listing jobs explicitly in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists by listing jobs explicitly in namespace %s", f.Namespace.Name)
|
||||||
_, finishedJobs := filterActiveJobs(jobs)
|
_, finishedJobs := filterActiveJobs(jobs)
|
||||||
gomega.Expect(len(finishedJobs) == 1).To(gomega.BeTrue())
|
gomega.Expect(len(finishedJobs) == 1).To(gomega.BeTrue())
|
||||||
|
|
||||||
// Job should get deleted when the next job finishes the next minute
|
// Job should get deleted when the next job finishes the next minute
|
||||||
ginkgo.By("Ensuring this job and its pods does not exist anymore")
|
ginkgo.By("Ensuring this job and its pods does not exist anymore")
|
||||||
err = waitForJobToDisappear(f.ClientSet, f.Namespace.Name, finishedJobs[0])
|
err = waitForJobToDisappear(f.ClientSet, f.Namespace.Name, finishedJobs[0])
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure that job does not exists anymore in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to ensure that job does not exists anymore in namespace %s", f.Namespace.Name)
|
||||||
err = waitForJobsPodToDisappear(f.ClientSet, f.Namespace.Name, finishedJobs[0])
|
err = waitForJobsPodToDisappear(f.ClientSet, f.Namespace.Name, finishedJobs[0])
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure that pods for job does not exists anymore in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to ensure that pods for job does not exists anymore in namespace %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring there is 1 finished job by listing jobs explicitly")
|
ginkgo.By("Ensuring there is 1 finished job by listing jobs explicitly")
|
||||||
jobs, err = f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
jobs, err = f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to ensure there is one finished job by listing job explicitly in namespace %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to ensure there is one finished job by listing job explicitly in namespace %s", f.Namespace.Name)
|
||||||
_, finishedJobs = filterActiveJobs(jobs)
|
_, finishedJobs = filterActiveJobs(jobs)
|
||||||
gomega.Expect(len(finishedJobs) == 1).To(gomega.BeTrue())
|
gomega.Expect(len(finishedJobs) == 1).To(gomega.BeTrue())
|
||||||
|
|
||||||
ginkgo.By("Removing cronjob")
|
ginkgo.By("Removing cronjob")
|
||||||
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to remove the %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "Failed to remove the %s cronjob in namespace %s", cronJob.Name, f.Namespace.Name)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -37,7 +37,6 @@ import (
|
|||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// This test primarily checks 2 things:
|
// This test primarily checks 2 things:
|
||||||
@ -116,7 +115,7 @@ func (r *RestartDaemonConfig) waitUp() {
|
|||||||
func (r *RestartDaemonConfig) kill() {
|
func (r *RestartDaemonConfig) kill() {
|
||||||
framework.Logf("Killing %v", r)
|
framework.Logf("Killing %v", r)
|
||||||
_, err := framework.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName))
|
_, err := framework.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName))
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Restart checks if the daemon is up, kills it, and waits till it comes back up
|
// Restart checks if the daemon is up, kills it, and waits till it comes back up
|
||||||
@ -209,7 +208,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
|||||||
Replicas: numPods,
|
Replicas: numPods,
|
||||||
CreatedPods: &[]*v1.Pod{},
|
CreatedPods: &[]*v1.Pod{},
|
||||||
}
|
}
|
||||||
gomega.Expect(framework.RunRC(config)).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(framework.RunRC(config))
|
||||||
replacePods(*config.CreatedPods, existingPods)
|
replacePods(*config.CreatedPods, existingPods)
|
||||||
|
|
||||||
stopCh = make(chan struct{})
|
stopCh = make(chan struct{})
|
||||||
|
@ -72,7 +72,7 @@ var _ = SIGDescribe("DisruptionController", func() {
|
|||||||
}
|
}
|
||||||
return pdb.Status.PodDisruptionsAllowed > 0, nil
|
return pdb.Status.PodDisruptionsAllowed > 0, nil
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
|
|
||||||
evictionCases := []struct {
|
evictionCases := []struct {
|
||||||
@ -179,7 +179,7 @@ var _ = SIGDescribe("DisruptionController", func() {
|
|||||||
|
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
e := &policy.Eviction{
|
e := &policy.Eviction{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -210,7 +210,7 @@ var _ = SIGDescribe("DisruptionController", func() {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -228,7 +228,7 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
|
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavailable intstr.IntOrString) {
|
func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavailable intstr.IntOrString) {
|
||||||
@ -243,7 +243,7 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavail
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
|
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||||
|
@ -42,11 +42,11 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
ginkgo.By("Creating a job")
|
ginkgo.By("Creating a job")
|
||||||
job := jobutil.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
job := jobutil.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring job reaches completions")
|
ginkgo.By("Ensuring job reaches completions")
|
||||||
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Pods sometimes fail, but eventually succeed.
|
// Pods sometimes fail, but eventually succeed.
|
||||||
@ -61,11 +61,11 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
// test timeout.
|
// test timeout.
|
||||||
job := jobutil.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions, nil, backoffLimit)
|
job := jobutil.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions, nil, backoffLimit)
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring job reaches completions")
|
ginkgo.By("Ensuring job reaches completions")
|
||||||
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Pods sometimes fail, but eventually succeed, after pod restarts
|
// Pods sometimes fail, but eventually succeed, after pod restarts
|
||||||
@ -82,11 +82,11 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
// test less flaky, for now.
|
// test less flaky, for now.
|
||||||
job := jobutil.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, 3, nil, 999)
|
job := jobutil.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, 3, nil, 999)
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring job reaches completions")
|
ginkgo.By("Ensuring job reaches completions")
|
||||||
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should exceed active deadline", func() {
|
ginkgo.It("should exceed active deadline", func() {
|
||||||
@ -94,10 +94,10 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
var activeDeadlineSeconds int64 = 1
|
var activeDeadlineSeconds int64 = 1
|
||||||
job := jobutil.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit)
|
job := jobutil.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit)
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
ginkgo.By("Ensuring job past active deadline")
|
ginkgo.By("Ensuring job past active deadline")
|
||||||
err = jobutil.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+10)*time.Second, "DeadlineExceeded")
|
err = jobutil.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+10)*time.Second, "DeadlineExceeded")
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -109,11 +109,11 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
ginkgo.By("Creating a job")
|
ginkgo.By("Creating a job")
|
||||||
job := jobutil.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
job := jobutil.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring active pods == parallelism")
|
ginkgo.By("Ensuring active pods == parallelism")
|
||||||
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("delete a job")
|
ginkgo.By("delete a job")
|
||||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
||||||
@ -131,16 +131,16 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
// Save Kind since it won't be populated in the returned job.
|
// Save Kind since it won't be populated in the returned job.
|
||||||
kind := job.Kind
|
kind := job.Kind
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
job.Kind = kind
|
job.Kind = kind
|
||||||
|
|
||||||
ginkgo.By("Ensuring active pods == parallelism")
|
ginkgo.By("Ensuring active pods == parallelism")
|
||||||
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Orphaning one of the Job's Pods")
|
ginkgo.By("Orphaning one of the Job's Pods")
|
||||||
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
||||||
gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism)))
|
gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism)))
|
||||||
pod := pods.Items[0]
|
pod := pods.Items[0]
|
||||||
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
|
f.PodClient().Update(pod.Name, func(pod *v1.Pod) {
|
||||||
@ -183,15 +183,15 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
backoff := 1
|
backoff := 1
|
||||||
job := jobutil.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff))
|
job := jobutil.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff))
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
ginkgo.By("Ensuring job exceed backofflimit")
|
ginkgo.By("Ensuring job exceed backofflimit")
|
||||||
|
|
||||||
err = jobutil.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, jobutil.JobTimeout, "BackoffLimitExceeded")
|
err = jobutil.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, jobutil.JobTimeout, "BackoffLimitExceeded")
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
|
ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
|
||||||
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
||||||
// gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1))
|
// gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1))
|
||||||
// due to NumRequeus not being stable enough, especially with failed status
|
// due to NumRequeus not being stable enough, especially with failed status
|
||||||
// updates we need to allow more than backoff+1
|
// updates we need to allow more than backoff+1
|
||||||
|
@ -112,7 +112,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
_, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
_, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
|
||||||
framework.SkipUnlessProviderIs("gke", "aws")
|
framework.SkipUnlessProviderIs("gke", "aws")
|
||||||
@ -139,7 +139,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
var podOpts metav1.ListOptions
|
var podOpts metav1.ListOptions
|
||||||
nodeOpts := metav1.ListOptions{}
|
nodeOpts := metav1.ListOptions{}
|
||||||
nodes, err := c.CoreV1().Nodes().List(nodeOpts)
|
nodes, err := c.CoreV1().Nodes().List(nodeOpts)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
framework.FilterNodes(nodes, func(node v1.Node) bool {
|
framework.FilterNodes(nodes, func(node v1.Node) bool {
|
||||||
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
|
if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
|
||||||
return false
|
return false
|
||||||
@ -243,17 +243,17 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
replicas := int32(numNodes)
|
replicas := int32(numNodes)
|
||||||
common.NewRCByName(c, ns, name, replicas, nil)
|
common.NewRCByName(c, ns, name, replicas, nil)
|
||||||
err = framework.VerifyPods(c, ns, name, true, replicas)
|
err = framework.VerifyPods(c, ns, name, true, replicas)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Each pod should start running and responding")
|
framework.ExpectNoError(err, "Each pod should start running and responding")
|
||||||
|
|
||||||
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
|
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||||
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
nodeName := pods.Items[0].Spec.NodeName
|
nodeName := pods.Items[0].Spec.NodeName
|
||||||
|
|
||||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// This creates a temporary network partition, verifies that 'podNameToDisappear',
|
// This creates a temporary network partition, verifies that 'podNameToDisappear',
|
||||||
// that belongs to replication controller 'rcName', really disappeared (because its
|
// that belongs to replication controller 'rcName', really disappeared (because its
|
||||||
@ -264,11 +264,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||||
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
|
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("verifying whether the pod from the unreachable node is recreated")
|
ginkgo.By("verifying whether the pod from the unreachable node is recreated")
|
||||||
err = framework.VerifyPods(c, ns, name, true, replicas)
|
err = framework.VerifyPods(c, ns, name, true, replicas)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
|
|
||||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||||
@ -284,14 +284,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
// since we have no guarantees the pod will be scheduled on our node.
|
// since we have no guarantees the pod will be scheduled on our node.
|
||||||
additionalPod := "additionalpod"
|
additionalPod := "additionalpod"
|
||||||
err = newPodOnNode(c, ns, additionalPod, node.Name)
|
err = newPodOnNode(c, ns, additionalPod, node.Name)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
err = framework.VerifyPods(c, ns, additionalPod, true, 1)
|
err = framework.VerifyPods(c, ns, additionalPod, true, 1)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// verify that it is really on the requested node
|
// verify that it is really on the requested node
|
||||||
{
|
{
|
||||||
pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{})
|
pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
if pod.Spec.NodeName != node.Name {
|
if pod.Spec.NodeName != node.Name {
|
||||||
framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
|
framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
|
||||||
}
|
}
|
||||||
@ -310,17 +310,17 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
replicas := int32(numNodes)
|
replicas := int32(numNodes)
|
||||||
common.NewRCByName(c, ns, name, replicas, &gracePeriod)
|
common.NewRCByName(c, ns, name, replicas, &gracePeriod)
|
||||||
err = framework.VerifyPods(c, ns, name, true, replicas)
|
err = framework.VerifyPods(c, ns, name, true, replicas)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Each pod should start running and responding")
|
framework.ExpectNoError(err, "Each pod should start running and responding")
|
||||||
|
|
||||||
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
|
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||||
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
nodeName := pods.Items[0].Spec.NodeName
|
nodeName := pods.Items[0].Spec.NodeName
|
||||||
|
|
||||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// This creates a temporary network partition, verifies that 'podNameToDisappear',
|
// This creates a temporary network partition, verifies that 'podNameToDisappear',
|
||||||
// that belongs to replication controller 'rcName', did not disappear (because its
|
// that belongs to replication controller 'rcName', did not disappear (because its
|
||||||
@ -335,7 +335,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("verifying that there are %v running pods during partition", replicas))
|
ginkgo.By(fmt.Sprintf("verifying that there are %v running pods during partition", replicas))
|
||||||
_, err = framework.PodsCreated(c, ns, name, replicas)
|
_, err = framework.PodsCreated(c, ns, name, replicas)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
|
|
||||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||||
@ -376,7 +376,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
|
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
|
||||||
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
|
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
|
||||||
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
|
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
pst := framework.NewStatefulSetTester(c)
|
pst := framework.NewStatefulSetTester(c)
|
||||||
|
|
||||||
@ -393,7 +393,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
ginkgo.It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() {
|
ginkgo.It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() {
|
||||||
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
|
ps := framework.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
|
||||||
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
|
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
pst := framework.NewStatefulSetTester(c)
|
pst := framework.NewStatefulSetTester(c)
|
||||||
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)
|
||||||
@ -430,21 +430,21 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
job := jobutil.NewTestJob("notTerminate", "network-partition", v1.RestartPolicyNever,
|
job := jobutil.NewTestJob("notTerminate", "network-partition", v1.RestartPolicyNever,
|
||||||
parallelism, completions, nil, backoffLimit)
|
parallelism, completions, nil, backoffLimit)
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
label := labels.SelectorFromSet(labels.Set(map[string]string{jobutil.JobSelectorKey: job.Name}))
|
label := labels.SelectorFromSet(labels.Set(map[string]string{jobutil.JobSelectorKey: job.Name}))
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
||||||
_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
|
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
|
||||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||||
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
nodeName := pods.Items[0].Spec.NodeName
|
nodeName := pods.Items[0].Spec.NodeName
|
||||||
|
|
||||||
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// This creates a temporary network partition, verifies that the job has 'parallelism' number of
|
// This creates a temporary network partition, verifies that the job has 'parallelism' number of
|
||||||
// running pods after the node-controller detects node unreachable.
|
// running pods after the node-controller detects node unreachable.
|
||||||
@ -456,7 +456,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
||||||
_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
|
|
||||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||||
|
@ -32,7 +32,6 @@ import (
|
|||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = SIGDescribe("ReplicationController", func() {
|
var _ = SIGDescribe("ReplicationController", func() {
|
||||||
@ -125,12 +124,12 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
|||||||
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image)
|
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image)
|
||||||
newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
||||||
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(newRC)
|
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(newRC)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// Check that pods for the new RC were created.
|
// Check that pods for the new RC were created.
|
||||||
// TODO: Maybe switch PodsCreated to just check owner references.
|
// TODO: Maybe switch PodsCreated to just check owner references.
|
||||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||||
// are running so non-running pods cause a timeout for this test.
|
// are running so non-running pods cause a timeout for this test.
|
||||||
@ -149,14 +148,14 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
|||||||
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
|
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||||
running++
|
running++
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sanity check
|
// Sanity check
|
||||||
if running != replicas {
|
if running != replicas {
|
||||||
gomega.Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(fmt.Errorf("unexpected number of running pods: %+v", pods.Items))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that something is listening.
|
// Verify that something is listening.
|
||||||
@ -182,7 +181,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
|||||||
framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
|
framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
|
||||||
quota := newPodQuota(name, "2")
|
quota := newPodQuota(name, "2")
|
||||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
|
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||||
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
|
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
|
||||||
@ -196,12 +195,12 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
|||||||
if err == wait.ErrWaitTimeout {
|
if err == wait.ErrWaitTimeout {
|
||||||
err = fmt.Errorf("resource quota %q never synced", name)
|
err = fmt.Errorf("resource quota %q never synced", name)
|
||||||
}
|
}
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
|
ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
|
||||||
rc := newRC(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
|
rc := newRC(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
|
||||||
rc, err = c.CoreV1().ReplicationControllers(namespace).Create(rc)
|
rc, err = c.CoreV1().ReplicationControllers(namespace).Create(rc)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name))
|
ginkgo.By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name))
|
||||||
generation := rc.Generation
|
generation := rc.Generation
|
||||||
@ -223,14 +222,14 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
|||||||
if err == wait.ErrWaitTimeout {
|
if err == wait.ErrWaitTimeout {
|
||||||
err = fmt.Errorf("rc manager never added the failure condition for rc %q: %#v", name, conditions)
|
err = fmt.Errorf("rc manager never added the failure condition for rc %q: %#v", name, conditions)
|
||||||
}
|
}
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name))
|
ginkgo.By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name))
|
||||||
rc, err = framework.UpdateReplicationControllerWithRetries(c, namespace, name, func(update *v1.ReplicationController) {
|
rc, err = framework.UpdateReplicationControllerWithRetries(c, namespace, name, func(update *v1.ReplicationController) {
|
||||||
x := int32(2)
|
x := int32(2)
|
||||||
update.Spec.Replicas = &x
|
update.Spec.Replicas = &x
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking rc %q has no failure condition set", name))
|
ginkgo.By(fmt.Sprintf("Checking rc %q has no failure condition set", name))
|
||||||
generation = rc.Generation
|
generation = rc.Generation
|
||||||
@ -252,7 +251,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
|||||||
if err == wait.ErrWaitTimeout {
|
if err == wait.ErrWaitTimeout {
|
||||||
err = fmt.Errorf("rc manager never removed the failure condition for rc %q: %#v", name, conditions)
|
err = fmt.Errorf("rc manager never removed the failure condition for rc %q: %#v", name, conditions)
|
||||||
}
|
}
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
||||||
@ -280,7 +279,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
|||||||
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImage)
|
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImage)
|
||||||
rcSt.Spec.Selector = map[string]string{"name": name}
|
rcSt.Spec.Selector = map[string]string{"name": name}
|
||||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
|
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Then the orphan pod is adopted")
|
ginkgo.By("Then the orphan pod is adopted")
|
||||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||||
@ -289,7 +288,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
|||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
for _, owner := range p2.OwnerReferences {
|
for _, owner := range p2.OwnerReferences {
|
||||||
if *owner.Controller && owner.UID == rc.UID {
|
if *owner.Controller && owner.UID == rc.UID {
|
||||||
// pod adopted
|
// pod adopted
|
||||||
@ -299,7 +298,7 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
|
|||||||
// pod still not adopted
|
// pod still not adopted
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
||||||
@ -309,16 +308,16 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
|||||||
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImage)
|
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, NginxImage)
|
||||||
rcSt.Spec.Selector = map[string]string{"name": name}
|
rcSt.Spec.Selector = map[string]string{"name": name}
|
||||||
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
|
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("When the matched label of one of its pods change")
|
ginkgo.By("When the matched label of one of its pods change")
|
||||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rc.Name, replicas)
|
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rc.Name, replicas)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
p := pods.Items[0]
|
p := pods.Items[0]
|
||||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
pod.Labels = map[string]string{"name": "not-matching-name"}
|
pod.Labels = map[string]string{"name": "not-matching-name"}
|
||||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
|
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
|
||||||
@ -330,12 +329,12 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Then the pod is released")
|
ginkgo.By("Then the pod is released")
|
||||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
for _, owner := range p2.OwnerReferences {
|
for _, owner := range p2.OwnerReferences {
|
||||||
if *owner.Controller && owner.UID == rc.UID {
|
if *owner.Controller && owner.UID == rc.UID {
|
||||||
// pod still belonging to the replication controller
|
// pod still belonging to the replication controller
|
||||||
@ -345,5 +344,5 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
|
|||||||
// pod already released
|
// pod already released
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,6 @@ import (
|
|||||||
replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset"
|
replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -127,12 +126,12 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
|||||||
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image)
|
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image)
|
||||||
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
||||||
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS)
|
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// Check that pods for the new RS were created.
|
// Check that pods for the new RS were created.
|
||||||
// TODO: Maybe switch PodsCreated to just check owner references.
|
// TODO: Maybe switch PodsCreated to just check owner references.
|
||||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||||
// are running so non-running pods cause a timeout for this test.
|
// are running so non-running pods cause a timeout for this test.
|
||||||
@ -151,14 +150,14 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
|||||||
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
|
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||||
running++
|
running++
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sanity check
|
// Sanity check
|
||||||
if running != replicas {
|
if running != replicas {
|
||||||
gomega.Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(fmt.Errorf("unexpected number of running pods: %+v", pods.Items))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that something is listening.
|
// Verify that something is listening.
|
||||||
@ -184,7 +183,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
|||||||
ginkgo.By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name))
|
ginkgo.By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name))
|
||||||
quota := newPodQuota(name, "2")
|
quota := newPodQuota(name, "2")
|
||||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
|
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||||
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
|
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
|
||||||
@ -198,12 +197,12 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
|||||||
if err == wait.ErrWaitTimeout {
|
if err == wait.ErrWaitTimeout {
|
||||||
err = fmt.Errorf("resource quota %q never synced", name)
|
err = fmt.Errorf("resource quota %q never synced", name)
|
||||||
}
|
}
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
|
ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
|
||||||
rs := newRS(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
|
rs := newRS(name, 3, map[string]string{"name": name}, NginxImageName, NginxImage)
|
||||||
rs, err = c.AppsV1().ReplicaSets(namespace).Create(rs)
|
rs, err = c.AppsV1().ReplicaSets(namespace).Create(rs)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name))
|
ginkgo.By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name))
|
||||||
generation := rs.Generation
|
generation := rs.Generation
|
||||||
@ -226,14 +225,14 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
|||||||
if err == wait.ErrWaitTimeout {
|
if err == wait.ErrWaitTimeout {
|
||||||
err = fmt.Errorf("rs controller never added the failure condition for replica set %q: %#v", name, conditions)
|
err = fmt.Errorf("rs controller never added the failure condition for replica set %q: %#v", name, conditions)
|
||||||
}
|
}
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
|
ginkgo.By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
|
||||||
rs, err = replicasetutil.UpdateReplicaSetWithRetries(c, namespace, name, func(update *apps.ReplicaSet) {
|
rs, err = replicasetutil.UpdateReplicaSetWithRetries(c, namespace, name, func(update *apps.ReplicaSet) {
|
||||||
x := int32(2)
|
x := int32(2)
|
||||||
update.Spec.Replicas = &x
|
update.Spec.Replicas = &x
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking replica set %q has no failure condition set", name))
|
ginkgo.By(fmt.Sprintf("Checking replica set %q has no failure condition set", name))
|
||||||
generation = rs.Generation
|
generation = rs.Generation
|
||||||
@ -255,7 +254,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
|
|||||||
if err == wait.ErrWaitTimeout {
|
if err == wait.ErrWaitTimeout {
|
||||||
err = fmt.Errorf("rs controller never removed the failure condition for rs %q: %#v", name, conditions)
|
err = fmt.Errorf("rs controller never removed the failure condition for rs %q: %#v", name, conditions)
|
||||||
}
|
}
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
||||||
@ -283,7 +282,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
|||||||
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImage)
|
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, NginxImage)
|
||||||
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
|
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
|
||||||
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(rsSt)
|
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(rsSt)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Then the orphan pod is adopted")
|
ginkgo.By("Then the orphan pod is adopted")
|
||||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||||
@ -292,7 +291,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
|||||||
if errors.IsNotFound(err) {
|
if errors.IsNotFound(err) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
for _, owner := range p2.OwnerReferences {
|
for _, owner := range p2.OwnerReferences {
|
||||||
if *owner.Controller && owner.UID == rs.UID {
|
if *owner.Controller && owner.UID == rs.UID {
|
||||||
// pod adopted
|
// pod adopted
|
||||||
@ -302,16 +301,16 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
|||||||
// pod still not adopted
|
// pod still not adopted
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("When the matched label of one of its pods change")
|
ginkgo.By("When the matched label of one of its pods change")
|
||||||
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rs.Name, replicas)
|
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rs.Name, replicas)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
p = &pods.Items[0]
|
p = &pods.Items[0]
|
||||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||||
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
pod.Labels = map[string]string{"name": "not-matching-name"}
|
pod.Labels = map[string]string{"name": "not-matching-name"}
|
||||||
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
|
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
|
||||||
@ -323,12 +322,12 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
|||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Then the pod is released")
|
ginkgo.By("Then the pod is released")
|
||||||
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
|
||||||
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
for _, owner := range p2.OwnerReferences {
|
for _, owner := range p2.OwnerReferences {
|
||||||
if *owner.Controller && owner.UID == rs.UID {
|
if *owner.Controller && owner.UID == rs.UID {
|
||||||
// pod still belonging to the replicaset
|
// pod still belonging to the replicaset
|
||||||
@ -338,5 +337,5 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
|||||||
// pod already released
|
// pod already released
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
@ -79,7 +79,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
|
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
|
||||||
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
|
headlessService := framework.CreateServiceSpec(headlessSvcName, "", true, labels)
|
||||||
_, err := c.CoreV1().Services(ns).Create(headlessService)
|
_, err := c.CoreV1().Services(ns).Create(headlessService)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
@ -100,7 +100,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
sst.PauseNewPods(ss)
|
sst.PauseNewPods(ss)
|
||||||
|
|
||||||
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Saturating stateful set " + ss.Name)
|
ginkgo.By("Saturating stateful set " + ss.Name)
|
||||||
sst.Saturate(ss)
|
sst.Saturate(ss)
|
||||||
@ -143,7 +143,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
// Save Kind since it won't be populated in the returned ss.
|
// Save Kind since it won't be populated in the returned ss.
|
||||||
kind := ss.Kind
|
kind := ss.Kind
|
||||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
ss.Kind = kind
|
ss.Kind = kind
|
||||||
|
|
||||||
ginkgo.By("Saturating stateful set " + ss.Name)
|
ginkgo.By("Saturating stateful set " + ss.Name)
|
||||||
@ -226,7 +226,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
sst.PauseNewPods(ss)
|
sst.PauseNewPods(ss)
|
||||||
|
|
||||||
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
sst.WaitForRunning(1, 0, ss)
|
sst.WaitForRunning(1, 0, ss)
|
||||||
|
|
||||||
@ -294,7 +294,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
}(),
|
}(),
|
||||||
}
|
}
|
||||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||||
ss = sst.WaitForStatus(ss)
|
ss = sst.WaitForStatus(ss)
|
||||||
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||||
@ -318,7 +318,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Creating a new revision")
|
ginkgo.By("Creating a new revision")
|
||||||
ss = sst.WaitForStatus(ss)
|
ss = sst.WaitForStatus(ss)
|
||||||
@ -365,7 +365,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
}(),
|
}(),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
ss, pods = sst.WaitForPartitionedRollingUpdate(ss)
|
ss, pods = sst.WaitForPartitionedRollingUpdate(ss)
|
||||||
for i := range pods.Items {
|
for i := range pods.Items {
|
||||||
if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
|
if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
|
||||||
@ -446,7 +446,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
}(),
|
}(),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
ss, pods = sst.WaitForPartitionedRollingUpdate(ss)
|
ss, pods = sst.WaitForPartitionedRollingUpdate(ss)
|
||||||
for i := range pods.Items {
|
for i := range pods.Items {
|
||||||
if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
|
if i < int(*ss.Spec.UpdateStrategy.RollingUpdate.Partition) {
|
||||||
@ -498,7 +498,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
Type: apps.OnDeleteStatefulSetStrategyType,
|
Type: apps.OnDeleteStatefulSetStrategyType,
|
||||||
}
|
}
|
||||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||||
ss = sst.WaitForStatus(ss)
|
ss = sst.WaitForStatus(ss)
|
||||||
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||||
@ -538,7 +538,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Creating a new revision")
|
ginkgo.By("Creating a new revision")
|
||||||
ss = sst.WaitForStatus(ss)
|
ss = sst.WaitForStatus(ss)
|
||||||
@ -580,14 +580,14 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
|
watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
|
||||||
LabelSelector: psLabels.AsSelector().String(),
|
LabelSelector: psLabels.AsSelector().String(),
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
|
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
|
||||||
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
|
ss := framework.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
|
||||||
sst := framework.NewStatefulSetTester(c)
|
sst := framework.NewStatefulSetTester(c)
|
||||||
sst.SetHTTPProbe(ss)
|
sst.SetHTTPProbe(ss)
|
||||||
ss, err = c.AppsV1().StatefulSets(ns).Create(ss)
|
ss, err = c.AppsV1().StatefulSets(ns).Create(ss)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
||||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||||
@ -618,13 +618,13 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
return len(expectedOrder) == 0, nil
|
return len(expectedOrder) == 0, nil
|
||||||
|
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Scale down will halt with unhealthy stateful pod")
|
ginkgo.By("Scale down will halt with unhealthy stateful pod")
|
||||||
watcher, err = f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
|
watcher, err = f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
|
||||||
LabelSelector: psLabels.AsSelector().String(),
|
LabelSelector: psLabels.AsSelector().String(),
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
sst.BreakHTTPProbe(ss)
|
sst.BreakHTTPProbe(ss)
|
||||||
sst.WaitForStatusReadyReplicas(ss, 0)
|
sst.WaitForStatusReadyReplicas(ss, 0)
|
||||||
@ -651,7 +651,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
return len(expectedOrder) == 0, nil
|
return len(expectedOrder) == 0, nil
|
||||||
|
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -668,7 +668,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
sst := framework.NewStatefulSetTester(c)
|
sst := framework.NewStatefulSetTester(c)
|
||||||
sst.SetHTTPProbe(ss)
|
sst.SetHTTPProbe(ss)
|
||||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
|
||||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||||
@ -794,7 +794,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
sst := framework.NewStatefulSetTester(c)
|
sst := framework.NewStatefulSetTester(c)
|
||||||
sst.SetHTTPProbe(ss)
|
sst.SetHTTPProbe(ss)
|
||||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||||
ss = sst.WaitForStatus(ss)
|
ss = sst.WaitForStatus(ss)
|
||||||
|
|
||||||
@ -1089,7 +1089,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
|||||||
sst := framework.NewStatefulSetTester(c)
|
sst := framework.NewStatefulSetTester(c)
|
||||||
sst.SetHTTPProbe(ss)
|
sst.SetHTTPProbe(ss)
|
||||||
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
sst.WaitForRunningAndReady(*ss.Spec.Replicas, ss)
|
||||||
ss = sst.WaitForStatus(ss)
|
ss = sst.WaitForStatus(ss)
|
||||||
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
currentRevision, updateRevision := ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||||
@ -1107,7 +1107,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
|||||||
}
|
}
|
||||||
sst.SortStatefulPods(pods)
|
sst.SortStatefulPods(pods)
|
||||||
err = sst.BreakPodHTTPProbe(ss, &pods.Items[1])
|
err = sst.BreakPodHTTPProbe(ss, &pods.Items[1])
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||||
newImage := NewNginxImage
|
newImage := NewNginxImage
|
||||||
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
oldImage := ss.Spec.Template.Spec.Containers[0].Image
|
||||||
@ -1117,7 +1117,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
|||||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||||
update.Spec.Template.Spec.Containers[0].Image = newImage
|
update.Spec.Template.Spec.Containers[0].Image = newImage
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Creating a new revision")
|
ginkgo.By("Creating a new revision")
|
||||||
ss = sst.WaitForStatus(ss)
|
ss = sst.WaitForStatus(ss)
|
||||||
@ -1129,7 +1129,7 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
|||||||
pods = sst.GetPodList(ss)
|
pods = sst.GetPodList(ss)
|
||||||
sst.SortStatefulPods(pods)
|
sst.SortStatefulPods(pods)
|
||||||
err = sst.RestorePodHTTPProbe(ss, &pods.Items[1])
|
err = sst.RestorePodHTTPProbe(ss, &pods.Items[1])
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
ss, pods = sst.WaitForPodReady(ss, pods.Items[1].Name)
|
||||||
ss, pods = sst.WaitForRollingUpdate(ss)
|
ss, pods = sst.WaitForRollingUpdate(ss)
|
||||||
gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(updateRevision),
|
gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(updateRevision),
|
||||||
@ -1155,14 +1155,14 @@ func rollbackTest(c clientset.Interface, ns string, ss *apps.StatefulSet) {
|
|||||||
|
|
||||||
ginkgo.By("Rolling back to a previous revision")
|
ginkgo.By("Rolling back to a previous revision")
|
||||||
err = sst.BreakPodHTTPProbe(ss, &pods.Items[1])
|
err = sst.BreakPodHTTPProbe(ss, &pods.Items[1])
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
ss, pods = sst.WaitForPodNotReady(ss, pods.Items[1].Name)
|
||||||
priorRevision := currentRevision
|
priorRevision := currentRevision
|
||||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||||
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
ss, err = framework.UpdateStatefulSetWithRetries(c, ns, ss.Name, func(update *apps.StatefulSet) {
|
||||||
update.Spec.Template.Spec.Containers[0].Image = oldImage
|
update.Spec.Template.Spec.Containers[0].Image = oldImage
|
||||||
})
|
})
|
||||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
framework.ExpectNoError(err)
|
||||||
ss = sst.WaitForStatus(ss)
|
ss = sst.WaitForStatus(ss)
|
||||||
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
currentRevision, updateRevision = ss.Status.CurrentRevision, ss.Status.UpdateRevision
|
||||||
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
|
gomega.Expect(currentRevision).NotTo(gomega.Equal(updateRevision),
|
||||||
|
Loading…
Reference in New Issue
Block a user