mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 02:11:09 +00:00
Merge pull request #86640 from tanjunchen/move-code-test/e2e/framework/job
move funcs from test/e2e/framework/job to test/e2e/apps
This commit is contained in:
commit
de3be348a6
@ -20,9 +20,12 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
@ -137,7 +140,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
ginkgo.By("Ensuring job past active deadline")
|
||||
err = jobutil.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+10)*time.Second, "DeadlineExceeded")
|
||||
err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+10)*time.Second, "DeadlineExceeded")
|
||||
framework.ExpectNoError(err, "failed to ensure job past active deadline in namespace: %s", f.Namespace.Name)
|
||||
})
|
||||
|
||||
@ -234,7 +237,7 @@ var _ = SIGDescribe("Job", func() {
|
||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||
ginkgo.By("Ensuring job exceed backofflimit")
|
||||
|
||||
err = jobutil.WaitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, jobutil.JobTimeout, "BackoffLimitExceeded")
|
||||
err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, jobutil.JobTimeout, "BackoffLimitExceeded")
|
||||
framework.ExpectNoError(err, "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
|
||||
@ -252,3 +255,21 @@ var _ = SIGDescribe("Job", func() {
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// waitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
|
||||
func waitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
|
||||
return wait.Poll(framework.Poll, timeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, c := range curr.Status.Conditions {
|
||||
if c.Type == batchv1.JobFailed && c.Status == v1.ConditionTrue {
|
||||
if reason == "" || reason == c.Reason {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
@ -97,14 +97,3 @@ func NewTestJob(behavior, name string, rPol v1.RestartPolicy, parallelism, compl
|
||||
}
|
||||
return job
|
||||
}
|
||||
|
||||
// FinishTime returns finish time of the specified job.
|
||||
func FinishTime(finishedJob *batchv1.Job) metav1.Time {
|
||||
var finishTime metav1.Time
|
||||
for _, c := range finishedJob.Status.Conditions {
|
||||
if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == v1.ConditionTrue {
|
||||
return c.LastTransitionTime
|
||||
}
|
||||
}
|
||||
return finishTime
|
||||
}
|
||||
|
@ -17,15 +17,11 @@ limitations under the License.
|
||||
package job
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// GetJob uses c to get the Job in namespace ns named name. If the returned error is nil, the returned Job is valid.
|
||||
@ -52,29 +48,6 @@ func UpdateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job
|
||||
return c.BatchV1().Jobs(ns).Update(job)
|
||||
}
|
||||
|
||||
// UpdateJobWithRetries updates job with retries.
|
||||
func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate func(*batchv1.Job)) (job *batchv1.Job, err error) {
|
||||
jobs := c.BatchV1().Jobs(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) {
|
||||
if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(job)
|
||||
if job, err = jobs.Update(job); err == nil {
|
||||
framework.Logf("Updating job %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to job %q: %v", name, updateErr)
|
||||
}
|
||||
return job, pollErr
|
||||
}
|
||||
|
||||
// DeleteJob uses c to delete the Job named name in namespace ns. If the returned error is nil, the Job has been
|
||||
// deleted.
|
||||
func DeleteJob(c clientset.Interface, ns, name string) error {
|
||||
|
@ -19,7 +19,6 @@ package job
|
||||
import (
|
||||
"time"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -69,24 +68,6 @@ func WaitForJobFinish(c clientset.Interface, ns, jobName string) error {
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
|
||||
func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
|
||||
return wait.Poll(framework.Poll, timeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, c := range curr.Status.Conditions {
|
||||
if c.Type == batchv1.JobFailed && c.Status == v1.ConditionTrue {
|
||||
if reason == "" || reason == c.Reason {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobGone uses c to wait for up to timeout for the Job named jobName in namespace ns to be removed.
|
||||
func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
|
||||
return wait.Poll(framework.Poll, timeout, func() (bool, error) {
|
||||
@ -109,15 +90,3 @@ func WaitForAllJobPodsGone(c clientset.Interface, ns, jobName string) error {
|
||||
return len(pods.Items) == 0, nil
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForJobDeleting uses c to wait for the Job jobName in namespace ns to have
|
||||
// a non-nil deletionTimestamp (i.e. being deleted).
|
||||
func WaitForJobDeleting(c clientset.Interface, ns, jobName string) error {
|
||||
return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return curr.ObjectMeta.DeletionTimestamp != nil, nil
|
||||
})
|
||||
}
|
||||
|
@ -17,11 +17,14 @@ limitations under the License.
|
||||
package node
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/util/slice"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
@ -29,7 +32,12 @@ import (
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
const dummyFinalizer = "k8s.io/dummy-finalizer"
|
||||
const (
|
||||
dummyFinalizer = "k8s.io/dummy-finalizer"
|
||||
|
||||
// JobTimeout is how long to wait for a job to finish.
|
||||
JobTimeout = 15 * time.Minute
|
||||
)
|
||||
|
||||
var _ = framework.KubeDescribe("[Feature:TTLAfterFinished][NodeAlphaFeature:TTLAfterFinished]", func() {
|
||||
f := framework.NewDefaultFramework("ttlafterfinished")
|
||||
@ -47,7 +55,7 @@ func cleanupJob(f *framework.Framework, job *batchv1.Job) {
|
||||
removeFinalizerFunc := func(j *batchv1.Job) {
|
||||
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
|
||||
}
|
||||
_, err := jobutil.UpdateJobWithRetries(c, ns, job.Name, removeFinalizerFunc)
|
||||
_, err := updateJobWithRetries(c, ns, job.Name, removeFinalizerFunc)
|
||||
framework.ExpectNoError(err)
|
||||
jobutil.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout)
|
||||
|
||||
@ -78,13 +86,13 @@ func testFinishedJob(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Wait for TTL after finished controller to delete the Job")
|
||||
err = jobutil.WaitForJobDeleting(c, ns, job.Name)
|
||||
err = waitForJobDeleting(c, ns, job.Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
|
||||
job, err = jobutil.GetJob(c, ns, job.Name)
|
||||
framework.ExpectNoError(err)
|
||||
finishTime := jobutil.FinishTime(job)
|
||||
finishTime := FinishTime(job)
|
||||
finishTimeUTC := finishTime.UTC()
|
||||
framework.ExpectNotEqual(finishTime.IsZero(), true)
|
||||
|
||||
@ -94,3 +102,49 @@ func testFinishedJob(f *framework.Framework) {
|
||||
expireAtUTC := finishTimeUTC.Add(time.Duration(ttl) * time.Second)
|
||||
framework.ExpectEqual(deleteAtUTC.Before(expireAtUTC), false)
|
||||
}
|
||||
|
||||
// FinishTime returns finish time of the specified job.
|
||||
func FinishTime(finishedJob *batchv1.Job) metav1.Time {
|
||||
var finishTime metav1.Time
|
||||
for _, c := range finishedJob.Status.Conditions {
|
||||
if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == v1.ConditionTrue {
|
||||
return c.LastTransitionTime
|
||||
}
|
||||
}
|
||||
return finishTime
|
||||
}
|
||||
|
||||
// updateJobWithRetries updates job with retries.
|
||||
func updateJobWithRetries(c clientset.Interface, namespace, name string, applyUpdate func(*batchv1.Job)) (job *batchv1.Job, err error) {
|
||||
jobs := c.BatchV1().Jobs(namespace)
|
||||
var updateErr error
|
||||
pollErr := wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) {
|
||||
if job, err = jobs.Get(name, metav1.GetOptions{}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Apply the update, then attempt to push it to the apiserver.
|
||||
applyUpdate(job)
|
||||
if job, err = jobs.Update(job); err == nil {
|
||||
framework.Logf("Updating job %s", name)
|
||||
return true, nil
|
||||
}
|
||||
updateErr = err
|
||||
return false, nil
|
||||
})
|
||||
if pollErr == wait.ErrWaitTimeout {
|
||||
pollErr = fmt.Errorf("couldn't apply the provided updated to job %q: %v", name, updateErr)
|
||||
}
|
||||
return job, pollErr
|
||||
}
|
||||
|
||||
// waitForJobDeleting uses c to wait for the Job jobName in namespace ns to have
|
||||
// a non-nil deletionTimestamp (i.e. being deleted).
|
||||
func waitForJobDeleting(c clientset.Interface, ns, jobName string) error {
|
||||
return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) {
|
||||
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return curr.ObjectMeta.DeletionTimestamp != nil, nil
|
||||
})
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user