From 794a8668fa8bfc7d23033acdb7a7c911bacd4f1a Mon Sep 17 00:00:00 2001 From: tanjunchen <2799194073@qq.com> Date: Thu, 26 Dec 2019 17:35:18 +0800 Subject: [PATCH] move funcs from test/e2e/framework/job to test/e2e/apps --- test/e2e/apps/job.go | 25 +++++++- .../autoscaling/autoscaling_utils.go | 6 -- test/e2e/framework/job/fixtures.go | 11 ---- test/e2e/framework/job/rest.go | 27 -------- test/e2e/framework/job/wait.go | 31 ---------- test/e2e/node/ttlafterfinished.go | 62 +++++++++++++++++-- 6 files changed, 81 insertions(+), 81 deletions(-) diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index e3ee4497155..0b7943d1d35 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -20,9 +20,12 @@ import ( "fmt" "time" + batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" batchinternal "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/test/e2e/framework" jobutil "k8s.io/kubernetes/test/e2e/framework/job" @@ -137,7 +140,7 @@ var _ = SIGDescribe("Job", func() { job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job) framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring job past active deadline") - err = jobutil.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+10)*time.Second, "DeadlineExceeded") + err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+10)*time.Second, "DeadlineExceeded") framework.ExpectNoError(err, "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name) }) @@ -234,7 +237,7 @@ var _ = SIGDescribe("Job", func() { framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name) ginkgo.By("Ensuring job exceed backofflimit") - err = jobutil.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, jobutil.JobTimeout, "BackoffLimitExceeded") + err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, jobutil.JobTimeout, "BackoffLimitExceeded") framework.ExpectNoError(err, "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name) ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1)) @@ -252,3 +255,21 @@ var _ = SIGDescribe("Job", func() { } }) }) + +// waitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail. +func waitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error { + return wait.Poll(framework.Poll, timeout, func() (bool, error) { + curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) + if err != nil { + return false, err + } + for _, c := range curr.Status.Conditions { + if c.Type == batchv1.JobFailed && c.Status == v1.ConditionTrue { + if reason == "" || reason == c.Reason { + return true, nil + } + } + } + return false, nil + }) +} diff --git a/test/e2e/framework/autoscaling/autoscaling_utils.go b/test/e2e/framework/autoscaling/autoscaling_utils.go index 867b31a1e34..313713b6614 100644 --- a/test/e2e/framework/autoscaling/autoscaling_utils.go +++ b/test/e2e/framework/autoscaling/autoscaling_utils.go @@ -112,12 +112,6 @@ func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKin dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, scaleClient, nil, nil) } -// NewMetricExporter is a wrapper to create a new ResourceConsumer for metrics exporter -func NewMetricExporter(name, nsName string, podAnnotations, serviceAnnotations map[string]string, metricValue int, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer { - return newResourceConsumer(name, nsName, KindDeployment, 1, 0, 0, metricValue, dynamicConsumptionTimeInSeconds, - dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, 100, 100, clientset, scaleClient, podAnnotations, serviceAnnotations) -} - /* NewResourceConsumer creates new ResourceConsumer initCPUTotal argument is in millicores diff --git a/test/e2e/framework/job/fixtures.go b/test/e2e/framework/job/fixtures.go index 641aa31ed4c..e6f13cc56bf 100644 --- a/test/e2e/framework/job/fixtures.go +++ b/test/e2e/framework/job/fixtures.go @@ -97,14 +97,3 @@ func NewTestJob(behavior, name string, rPol v1.RestartPolicy, parallelism, compl } return job } - -// FinishTime returns finish time of the specified job. -func FinishTime(finishedJob *batchv1.Job) metav1.Time { - var finishTime metav1.Time - for _, c := range finishedJob.Status.Conditions { - if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == v1.ConditionTrue { - return c.LastTransitionTime - } - } - return finishTime -} diff --git a/test/e2e/framework/job/rest.go b/test/e2e/framework/job/rest.go index eebb80672db..13417ddae76 100644 --- a/test/e2e/framework/job/rest.go +++ b/test/e2e/framework/job/rest.go @@ -17,15 +17,11 @@ limitations under the License. package job import ( - "fmt" - batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework" ) // GetJob uses c to get the Job in namespace ns named name. If the returned error is nil, the returned Job is valid. @@ -52,29 +48,6 @@ func UpdateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job return c.BatchV1().Jobs(ns).Update(job) } -// UpdateJobWithRetries updates job with retries. -func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate func(*batchv1.Job)) (job *batchv1.Job, err error) { - jobs := c.BatchV1().Jobs(namespace) - var updateErr error - pollErr := wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { - if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil { - return false, err - } - // Apply the update, then attempt to push it to the apiserver. - applyUpdate(job) - if job, err = jobs.Update(job); err == nil { - framework.Logf("Updating job %s", name) - return true, nil - } - updateErr = err - return false, nil - }) - if pollErr == wait.ErrWaitTimeout { - pollErr = fmt.Errorf("couldn't apply the provided updated to job %q: %v", name, updateErr) - } - return job, pollErr -} - // DeleteJob uses c to delete the Job named name in namespace ns. If the returned error is nil, the Job has been // deleted. func DeleteJob(c clientset.Interface, ns, name string) error { diff --git a/test/e2e/framework/job/wait.go b/test/e2e/framework/job/wait.go index b975460087f..8eb5decfd45 100644 --- a/test/e2e/framework/job/wait.go +++ b/test/e2e/framework/job/wait.go @@ -19,7 +19,6 @@ package job import ( "time" - batchv1 "k8s.io/api/batch/v1" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -69,24 +68,6 @@ func WaitForJobFinish(c clientset.Interface, ns, jobName string) error { }) } -// WaitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail. -func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error { - return wait.Poll(framework.Poll, timeout, func() (bool, error) { - curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) - if err != nil { - return false, err - } - for _, c := range curr.Status.Conditions { - if c.Type == batchv1.JobFailed && c.Status == v1.ConditionTrue { - if reason == "" || reason == c.Reason { - return true, nil - } - } - } - return false, nil - }) -} - // WaitForJobGone uses c to wait for up to timeout for the Job named jobName in namespace ns to be removed. func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Duration) error { return wait.Poll(framework.Poll, timeout, func() (bool, error) { @@ -109,15 +90,3 @@ func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error { return len(pods.Items) == 0, nil }) } - -// WaitForJobDeleting uses c to wait for the Job jobName in namespace ns to have -// a non-nil deletionTimestamp (i.e. being deleted). -func WaitForJobDeleting(c clientset.Interface, ns, jobName string) error { - return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { - curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) - if err != nil { - return false, err - } - return curr.ObjectMeta.DeletionTimestamp != nil, nil - }) -} diff --git a/test/e2e/node/ttlafterfinished.go b/test/e2e/node/ttlafterfinished.go index 136908cf5f2..7f5fd1e9d2e 100644 --- a/test/e2e/node/ttlafterfinished.go +++ b/test/e2e/node/ttlafterfinished.go @@ -17,11 +17,14 @@ limitations under the License. package node import ( + "fmt" "time" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/util/slice" "k8s.io/kubernetes/test/e2e/framework" jobutil "k8s.io/kubernetes/test/e2e/framework/job" @@ -29,7 +32,12 @@ import ( "github.com/onsi/ginkgo" ) -const dummyFinalizer = "k8s.io/dummy-finalizer" +const ( + dummyFinalizer = "k8s.io/dummy-finalizer" + + // JobTimeout is how long to wait for a job to finish. + JobTimeout = 15 * time.Minute +) var _ = framework.KubeDescribe("[Feature:TTLAfterFinished][NodeAlphaFeature:TTLAfterFinished]", func() { f := framework.NewDefaultFramework("ttlafterfinished") @@ -47,7 +55,7 @@ func cleanupJob(f *framework.Framework, job *batchv1.Job) { removeFinalizerFunc := func(j *batchv1.Job) { j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil) } - _, err := jobutil.UpdateJobWithRetries(c, ns, job.Name, removeFinalizerFunc) + _, err := updateJobWithRetries(c, ns, job.Name, removeFinalizerFunc) framework.ExpectNoError(err) jobutil.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout) @@ -78,13 +86,13 @@ func testFinishedJob(f *framework.Framework) { framework.ExpectNoError(err) framework.Logf("Wait for TTL after finished controller to delete the Job") - err = jobutil.WaitForJobDeleting(c, ns, job.Name) + err = waitForJobDeleting(c, ns, job.Name) framework.ExpectNoError(err) framework.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished") job, err = jobutil.GetJob(c, ns, job.Name) framework.ExpectNoError(err) - finishTime := jobutil.FinishTime(job) + finishTime := FinishTime(job) finishTimeUTC := finishTime.UTC() framework.ExpectNotEqual(finishTime.IsZero(), true) @@ -94,3 +102,49 @@ func testFinishedJob(f *framework.Framework) { expireAtUTC := finishTimeUTC.Add(time.Duration(ttl) * time.Second) framework.ExpectEqual(deleteAtUTC.Before(expireAtUTC), false) } + +// FinishTime returns finish time of the specified job. +func FinishTime(finishedJob *batchv1.Job) metav1.Time { + var finishTime metav1.Time + for _, c := range finishedJob.Status.Conditions { + if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == v1.ConditionTrue { + return c.LastTransitionTime + } + } + return finishTime +} + +// updateJobWithRetries updates job with retries. +func updateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate func(*batchv1.Job)) (job *batchv1.Job, err error) { + jobs := c.BatchV1().Jobs(namespace) + var updateErr error + pollErr := wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { + if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil { + return false, err + } + // Apply the update, then attempt to push it to the apiserver. + applyUpdate(job) + if job, err = jobs.Update(job); err == nil { + framework.Logf("Updating job %s", name) + return true, nil + } + updateErr = err + return false, nil + }) + if pollErr == wait.ErrWaitTimeout { + pollErr = fmt.Errorf("couldn't apply the provided updated to job %q: %v", name, updateErr) + } + return job, pollErr +} + +// waitForJobDeleting uses c to wait for the Job jobName in namespace ns to have +// a non-nil deletionTimestamp (i.e. being deleted). +func waitForJobDeleting(c clientset.Interface, ns, jobName string) error { + return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) { + curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) + if err != nil { + return false, err + } + return curr.ObjectMeta.DeletionTimestamp != nil, nil + }) +}