mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #87303 from k-toyoda-pi/modify_alias_e2ejob
Modify alias of e2e/framework/job to e2ejob
This commit is contained in:
commit
acf64f4501
@ -51,6 +51,7 @@
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1": "kubeletstatsv1alpha1",
|
"k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1": "kubeletstatsv1alpha1",
|
||||||
"k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1": "proxyconfigv1alpha1",
|
"k8s.io/kubernetes/pkg/proxy/apis/config/v1alpha1": "proxyconfigv1alpha1",
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1": "schedulerconfigv1alpha1",
|
"k8s.io/kubernetes/pkg/scheduler/apis/config/v1alpha1": "schedulerconfigv1alpha1",
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework/job": "e2ejob",
|
||||||
"k8s.io/kubernetes/test/e2e/framework/kubectl": "e2ekubectl",
|
"k8s.io/kubernetes/test/e2e/framework/kubectl": "e2ekubectl",
|
||||||
"k8s.io/kubernetes/test/e2e/framework/node": "e2enode",
|
"k8s.io/kubernetes/test/e2e/framework/node": "e2enode",
|
||||||
"k8s.io/kubernetes/test/e2e/framework/pod": "e2epod",
|
"k8s.io/kubernetes/test/e2e/framework/pod": "e2epod",
|
||||||
|
@ -34,7 +34,7 @@ import (
|
|||||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||||
"k8s.io/kubernetes/pkg/controller/job"
|
"k8s.io/kubernetes/pkg/controller/job"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
@ -213,7 +213,7 @@ var _ = SIGDescribe("CronJob", func() {
|
|||||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
||||||
|
|
||||||
ginkgo.By("Ensuring job was deleted")
|
ginkgo.By("Ensuring job was deleted")
|
||||||
_, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
_, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
framework.ExpectError(err)
|
framework.ExpectError(err)
|
||||||
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
|
||||||
@ -45,16 +45,16 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
// Simplest case: N pods succeed
|
// Simplest case: N pods succeed
|
||||||
ginkgo.It("should run a job to completion when tasks succeed", func() {
|
ginkgo.It("should run a job to completion when tasks succeed", func() {
|
||||||
ginkgo.By("Creating a job")
|
ginkgo.By("Creating a job")
|
||||||
job := jobutil.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
job := e2ejob.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring job reaches completions")
|
ginkgo.By("Ensuring job reaches completions")
|
||||||
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring pods for job exist")
|
ginkgo.By("Ensuring pods for job exist")
|
||||||
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to get pod list for job in namespace: %s", f.Namespace.Name)
|
||||||
successes := int32(0)
|
successes := int32(0)
|
||||||
for _, pod := range pods.Items {
|
for _, pod := range pods.Items {
|
||||||
@ -72,12 +72,12 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
*/
|
*/
|
||||||
ginkgo.It("should remove pods when job is deleted", func() {
|
ginkgo.It("should remove pods when job is deleted", func() {
|
||||||
ginkgo.By("Creating a job")
|
ginkgo.By("Creating a job")
|
||||||
job := jobutil.NewTestJob("notTerminate", "all-pods-removed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
job := e2ejob.NewTestJob("notTerminate", "all-pods-removed", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensure pods equal to paralellism count is attached to the job")
|
ginkgo.By("Ensure pods equal to paralellism count is attached to the job")
|
||||||
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||||
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to ensure number of pods associated with job %s is equal to parallelism count in namespace: %s", job.Name, f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Delete the job")
|
ginkgo.By("Delete the job")
|
||||||
@ -85,7 +85,7 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
framework.ExpectNoError(err, "failed to delete the job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to delete the job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensure the pods associated with the job are also deleted")
|
ginkgo.By("Ensure the pods associated with the job are also deleted")
|
||||||
err = jobutil.WaitForAllJobPodsGone(f.ClientSet, f.Namespace.Name, job.Name)
|
err = e2ejob.WaitForAllJobPodsGone(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -102,12 +102,12 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
// throttle frequently failing containers in a given pod, ramping
|
// throttle frequently failing containers in a given pod, ramping
|
||||||
// up to 5 minutes between restarts, making test timeout due to
|
// up to 5 minutes between restarts, making test timeout due to
|
||||||
// successive failures too likely with a reasonable test timeout.
|
// successive failures too likely with a reasonable test timeout.
|
||||||
job := jobutil.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions, nil, backoffLimit)
|
job := e2ejob.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions, nil, backoffLimit)
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring job reaches completions")
|
ginkgo.By("Ensuring job reaches completions")
|
||||||
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, completions)
|
||||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -125,20 +125,20 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Creating a job")
|
ginkgo.By("Creating a job")
|
||||||
job := jobutil.NewTestJobOnNode("failOnce", "fail-once-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name)
|
job := e2ejob.NewTestJobOnNode("failOnce", "fail-once-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit, node.Name)
|
||||||
job, err = jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err = e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring job reaches completions")
|
ginkgo.By("Ensuring job reaches completions")
|
||||||
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, *job.Spec.Completions)
|
||||||
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to ensure job completion in namespace: %s", f.Namespace.Name)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should fail when exceeds active deadline", func() {
|
ginkgo.It("should fail when exceeds active deadline", func() {
|
||||||
ginkgo.By("Creating a job")
|
ginkgo.By("Creating a job")
|
||||||
var activeDeadlineSeconds int64 = 1
|
var activeDeadlineSeconds int64 = 1
|
||||||
job := jobutil.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit)
|
job := e2ejob.NewTestJob("notTerminate", "exceed-active-deadline", v1.RestartPolicyNever, parallelism, completions, &activeDeadlineSeconds, backoffLimit)
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
ginkgo.By("Ensuring job past active deadline")
|
ginkgo.By("Ensuring job past active deadline")
|
||||||
err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+10)*time.Second, "DeadlineExceeded")
|
err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, time.Duration(activeDeadlineSeconds+10)*time.Second, "DeadlineExceeded")
|
||||||
@ -152,19 +152,19 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
*/
|
*/
|
||||||
framework.ConformanceIt("should delete a job", func() {
|
framework.ConformanceIt("should delete a job", func() {
|
||||||
ginkgo.By("Creating a job")
|
ginkgo.By("Creating a job")
|
||||||
job := jobutil.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
job := e2ejob.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring active pods == parallelism")
|
ginkgo.By("Ensuring active pods == parallelism")
|
||||||
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||||
framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("delete a job")
|
ginkgo.By("delete a job")
|
||||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(f.ClientSet, batchinternal.Kind("Job"), f.Namespace.Name, job.Name))
|
||||||
|
|
||||||
ginkgo.By("Ensuring job was deleted")
|
ginkgo.By("Ensuring job was deleted")
|
||||||
_, err = jobutil.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
_, err = e2ejob.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name)
|
framework.ExpectError(err, "failed to ensure job %s was deleted in namespace: %s", job.Name, f.Namespace.Name)
|
||||||
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
framework.ExpectEqual(apierrors.IsNotFound(err), true)
|
||||||
})
|
})
|
||||||
@ -178,20 +178,20 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
*/
|
*/
|
||||||
framework.ConformanceIt("should adopt matching orphans and release non-matching pods", func() {
|
framework.ConformanceIt("should adopt matching orphans and release non-matching pods", func() {
|
||||||
ginkgo.By("Creating a job")
|
ginkgo.By("Creating a job")
|
||||||
job := jobutil.NewTestJob("notTerminate", "adopt-release", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
job := e2ejob.NewTestJob("notTerminate", "adopt-release", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||||
// Replace job with the one returned from Create() so it has the UID.
|
// Replace job with the one returned from Create() so it has the UID.
|
||||||
// Save Kind since it won't be populated in the returned job.
|
// Save Kind since it won't be populated in the returned job.
|
||||||
kind := job.Kind
|
kind := job.Kind
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
job.Kind = kind
|
job.Kind = kind
|
||||||
|
|
||||||
ginkgo.By("Ensuring active pods == parallelism")
|
ginkgo.By("Ensuring active pods == parallelism")
|
||||||
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
|
||||||
framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to ensure active pods == parallelism in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By("Orphaning one of the Job's Pods")
|
ginkgo.By("Orphaning one of the Job's Pods")
|
||||||
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
||||||
gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism)))
|
gomega.Expect(pods.Items).To(gomega.HaveLen(int(parallelism)))
|
||||||
pod := pods.Items[0]
|
pod := pods.Items[0]
|
||||||
@ -200,7 +200,7 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.By("Checking that the Job readopts the Pod")
|
ginkgo.By("Checking that the Job readopts the Pod")
|
||||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", jobutil.JobTimeout,
|
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "adopted", e2ejob.JobTimeout,
|
||||||
func(pod *v1.Pod) (bool, error) {
|
func(pod *v1.Pod) (bool, error) {
|
||||||
controllerRef := metav1.GetControllerOf(pod)
|
controllerRef := metav1.GetControllerOf(pod)
|
||||||
if controllerRef == nil {
|
if controllerRef == nil {
|
||||||
@ -219,7 +219,7 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.By("Checking that the Job releases the Pod")
|
ginkgo.By("Checking that the Job releases the Pod")
|
||||||
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", jobutil.JobTimeout,
|
gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, pod.Namespace, pod.Name, "released", e2ejob.JobTimeout,
|
||||||
func(pod *v1.Pod) (bool, error) {
|
func(pod *v1.Pod) (bool, error) {
|
||||||
controllerRef := metav1.GetControllerOf(pod)
|
controllerRef := metav1.GetControllerOf(pod)
|
||||||
if controllerRef != nil {
|
if controllerRef != nil {
|
||||||
@ -233,16 +233,16 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
ginkgo.It("should fail to exceed backoffLimit", func() {
|
ginkgo.It("should fail to exceed backoffLimit", func() {
|
||||||
ginkgo.By("Creating a job")
|
ginkgo.By("Creating a job")
|
||||||
backoff := 1
|
backoff := 1
|
||||||
job := jobutil.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff))
|
job := e2ejob.NewTestJob("fail", "backofflimit", v1.RestartPolicyNever, 1, 1, nil, int32(backoff))
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
ginkgo.By("Ensuring job exceed backofflimit")
|
ginkgo.By("Ensuring job exceed backofflimit")
|
||||||
|
|
||||||
err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, jobutil.JobTimeout, "BackoffLimitExceeded")
|
err = waitForJobFailure(f.ClientSet, f.Namespace.Name, job.Name, e2ejob.JobTimeout, "BackoffLimitExceeded")
|
||||||
framework.ExpectNoError(err, "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to ensure job exceed backofflimit in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
|
ginkgo.By(fmt.Sprintf("Checking that %d pod created and status is failed", backoff+1))
|
||||||
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
framework.ExpectNoError(err, "failed to get PodList for job %s in namespace: %s", job.Name, f.Namespace.Name)
|
||||||
// gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1))
|
// gomega.Expect(pods.Items).To(gomega.HaveLen(backoff + 1))
|
||||||
// due to NumRequeus not being stable enough, especially with failed status
|
// due to NumRequeus not being stable enough, especially with failed status
|
||||||
|
@ -36,7 +36,7 @@ import (
|
|||||||
nodepkg "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
nodepkg "k8s.io/kubernetes/pkg/controller/nodelifecycle"
|
||||||
"k8s.io/kubernetes/test/e2e/common"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
|
||||||
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
|
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
@ -438,11 +438,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
completions := int32(4)
|
completions := int32(4)
|
||||||
backoffLimit := int32(6) // default value
|
backoffLimit := int32(6) // default value
|
||||||
|
|
||||||
job := jobutil.NewTestJob("notTerminate", "network-partition", v1.RestartPolicyNever,
|
job := e2ejob.NewTestJob("notTerminate", "network-partition", v1.RestartPolicyNever,
|
||||||
parallelism, completions, nil, backoffLimit)
|
parallelism, completions, nil, backoffLimit)
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
label := labels.SelectorFromSet(labels.Set(map[string]string{jobutil.JobSelectorKey: job.Name}))
|
label := labels.SelectorFromSet(labels.Set(map[string]string{e2ejob.JobSelectorKey: job.Name}))
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
ginkgo.By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
|
||||||
_, err = e2epod.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
_, err = e2epod.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -55,11 +55,11 @@ var _ = SIGDescribe("Metadata Concealment", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
job, err := e2ejob.CreateJob(f.ClientSet, f.Namespace.Name, job)
|
||||||
framework.ExpectNoError(err, "failed to create job (%s:%s)", f.Namespace.Name, job.Name)
|
framework.ExpectNoError(err, "failed to create job (%s:%s)", f.Namespace.Name, job.Name)
|
||||||
|
|
||||||
ginkgo.By("Ensuring job reaches completions")
|
ginkgo.By("Ensuring job reaches completions")
|
||||||
err = jobutil.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
|
err = e2ejob.WaitForJobComplete(f.ClientSet, f.Namespace.Name, job.Name, int32(1))
|
||||||
framework.ExpectNoError(err, "failed to ensure job completion (%s:%s)", f.Namespace.Name, job.Name)
|
framework.ExpectNoError(err, "failed to ensure job completion (%s:%s)", f.Namespace.Name, job.Name)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -70,7 +70,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/auth"
|
"k8s.io/kubernetes/test/e2e/framework/auth"
|
||||||
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
|
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
|
||||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
|
||||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
@ -1964,7 +1964,7 @@ metadata:
|
|||||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
|
gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
|
||||||
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
|
gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
|
||||||
|
|
||||||
err := jobutil.WaitForJobGone(c, ns, jobName, wait.ForeverTestTimeout)
|
err := e2ejob.WaitForJobGone(c, ns, jobName, wait.ForeverTestTimeout)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("verifying the job " + jobName + " was deleted")
|
ginkgo.By("verifying the job " + jobName + " was deleted")
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/pkg/util/slice"
|
"k8s.io/kubernetes/pkg/util/slice"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
@ -57,9 +57,9 @@ func cleanupJob(f *framework.Framework, job *batchv1.Job) {
|
|||||||
}
|
}
|
||||||
_, err := updateJobWithRetries(c, ns, job.Name, removeFinalizerFunc)
|
_, err := updateJobWithRetries(c, ns, job.Name, removeFinalizerFunc)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
jobutil.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout)
|
e2ejob.WaitForJobGone(c, ns, job.Name, wait.ForeverTestTimeout)
|
||||||
|
|
||||||
err = jobutil.WaitForAllJobPodsGone(c, ns, job.Name)
|
err = e2ejob.WaitForAllJobPodsGone(c, ns, job.Name)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,17 +72,17 @@ func testFinishedJob(f *framework.Framework) {
|
|||||||
backoffLimit := int32(2)
|
backoffLimit := int32(2)
|
||||||
ttl := int32(10)
|
ttl := int32(10)
|
||||||
|
|
||||||
job := jobutil.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
job := e2ejob.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||||
job.Spec.TTLSecondsAfterFinished = &ttl
|
job.Spec.TTLSecondsAfterFinished = &ttl
|
||||||
job.ObjectMeta.Finalizers = []string{dummyFinalizer}
|
job.ObjectMeta.Finalizers = []string{dummyFinalizer}
|
||||||
defer cleanupJob(f, job)
|
defer cleanupJob(f, job)
|
||||||
|
|
||||||
framework.Logf("Create a Job %s/%s with TTL", ns, job.Name)
|
framework.Logf("Create a Job %s/%s with TTL", ns, job.Name)
|
||||||
job, err := jobutil.CreateJob(c, ns, job)
|
job, err := e2ejob.CreateJob(c, ns, job)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
framework.Logf("Wait for the Job to finish")
|
framework.Logf("Wait for the Job to finish")
|
||||||
err = jobutil.WaitForJobFinish(c, ns, job.Name)
|
err = e2ejob.WaitForJobFinish(c, ns, job.Name)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
framework.Logf("Wait for TTL after finished controller to delete the Job")
|
framework.Logf("Wait for TTL after finished controller to delete the Job")
|
||||||
@ -90,7 +90,7 @@ func testFinishedJob(f *framework.Framework) {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
framework.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
|
framework.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
|
||||||
job, err = jobutil.GetJob(c, ns, job.Name)
|
job, err = e2ejob.GetJob(c, ns, job.Name)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
finishTime := FinishTime(job)
|
finishTime := FinishTime(job)
|
||||||
finishTimeUTC := finishTime.UTC()
|
finishTimeUTC := finishTime.UTC()
|
||||||
|
@ -28,7 +28,7 @@ import (
|
|||||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/gpu"
|
"k8s.io/kubernetes/test/e2e/framework/gpu"
|
||||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||||
@ -224,11 +224,11 @@ func testNvidiaGPUsJob(f *framework.Framework) {
|
|||||||
ginkgo.By("Starting GPU job")
|
ginkgo.By("Starting GPU job")
|
||||||
StartJob(f, completions)
|
StartJob(f, completions)
|
||||||
|
|
||||||
job, err := jobutil.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add")
|
job, err := e2ejob.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add")
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// make sure job is running by waiting for its first pod to start running
|
// make sure job is running by waiting for its first pod to start running
|
||||||
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, 1)
|
err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, 1)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
|
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
|
||||||
@ -242,7 +242,7 @@ func testNvidiaGPUsJob(f *framework.Framework) {
|
|||||||
ginkgo.By("Done recreating nodes")
|
ginkgo.By("Done recreating nodes")
|
||||||
|
|
||||||
ginkgo.By("Waiting for gpu job to finish")
|
ginkgo.By("Waiting for gpu job to finish")
|
||||||
err = jobutil.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name)
|
err = e2ejob.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
ginkgo.By("Done with gpu job")
|
ginkgo.By("Done with gpu job")
|
||||||
|
|
||||||
@ -254,7 +254,7 @@ func testNvidiaGPUsJob(f *framework.Framework) {
|
|||||||
// StartJob starts a simple CUDA job that requests gpu and the specified number of completions
|
// StartJob starts a simple CUDA job that requests gpu and the specified number of completions
|
||||||
func StartJob(f *framework.Framework, completions int32) {
|
func StartJob(f *framework.Framework, completions int32) {
|
||||||
var activeSeconds int64 = 3600
|
var activeSeconds int64 = 3600
|
||||||
testJob := jobutil.NewTestJob("succeed", "cuda-add", v1.RestartPolicyAlways, 1, completions, &activeSeconds, 6)
|
testJob := e2ejob.NewTestJob("succeed", "cuda-add", v1.RestartPolicyAlways, 1, completions, &activeSeconds, 6)
|
||||||
testJob.Spec.Template.Spec = v1.PodSpec{
|
testJob.Spec.Template.Spec = v1.PodSpec{
|
||||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{
|
||||||
@ -271,7 +271,7 @@ func StartJob(f *framework.Framework, completions int32) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
_, err := jobutil.CreateJob(f.ClientSet, ns, testJob)
|
_, err := e2ejob.CreateJob(f.ClientSet, ns, testJob)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.Logf("Created job %v", testJob)
|
framework.Logf("Created job %v", testJob)
|
||||||
}
|
}
|
||||||
@ -279,7 +279,7 @@ func StartJob(f *framework.Framework, completions int32) {
|
|||||||
// VerifyJobNCompletions verifies that the job has completions number of successful pods
|
// VerifyJobNCompletions verifies that the job has completions number of successful pods
|
||||||
func VerifyJobNCompletions(f *framework.Framework, completions int32) {
|
func VerifyJobNCompletions(f *framework.Framework, completions int32) {
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
pods, err := jobutil.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add")
|
pods, err := e2ejob.GetJobPods(f.ClientSet, f.Namespace.Name, "cuda-add")
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
createdPods := pods.Items
|
createdPods := pods.Items
|
||||||
createdPodNames := podNames(createdPods)
|
createdPodNames := podNames(createdPods)
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
|
||||||
"k8s.io/kubernetes/test/e2e/upgrades"
|
"k8s.io/kubernetes/test/e2e/upgrades"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -46,13 +46,13 @@ func (t *JobUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
t.namespace = f.Namespace.Name
|
t.namespace = f.Namespace.Name
|
||||||
|
|
||||||
ginkgo.By("Creating a job")
|
ginkgo.By("Creating a job")
|
||||||
t.job = jobutil.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6)
|
t.job = e2ejob.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6)
|
||||||
job, err := jobutil.CreateJob(f.ClientSet, t.namespace, t.job)
|
job, err := e2ejob.CreateJob(f.ClientSet, t.namespace, t.job)
|
||||||
t.job = job
|
t.job = job
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Ensuring active pods == parallelism")
|
ginkgo.By("Ensuring active pods == parallelism")
|
||||||
err = jobutil.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2)
|
err = e2ejob.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -73,7 +73,7 @@ func (t *JobUpgradeTest) Teardown(f *framework.Framework) {
|
|||||||
// is running, returning an error if the expected parallelism is not
|
// is running, returning an error if the expected parallelism is not
|
||||||
// satisfied.
|
// satisfied.
|
||||||
func ensureAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
|
func ensureAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
|
||||||
label := labels.SelectorFromSet(labels.Set(map[string]string{jobutil.JobSelectorKey: jobName}))
|
label := labels.SelectorFromSet(labels.Set(map[string]string{e2ejob.JobSelectorKey: jobName}))
|
||||||
options := metav1.ListOptions{LabelSelector: label.String()}
|
options := metav1.ListOptions{LabelSelector: label.String()}
|
||||||
pods, err := c.CoreV1().Pods(ns).List(options)
|
pods, err := c.CoreV1().Pods(ns).List(options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -18,7 +18,7 @@ package upgrades
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
e2ejob "k8s.io/kubernetes/test/e2e/framework/job"
|
||||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -51,7 +51,7 @@ func (t *NvidiaGPUUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
|
|||||||
scheduling.VerifyJobNCompletions(f, completions)
|
scheduling.VerifyJobNCompletions(f, completions)
|
||||||
if upgrade == MasterUpgrade || upgrade == ClusterUpgrade {
|
if upgrade == MasterUpgrade || upgrade == ClusterUpgrade {
|
||||||
// MasterUpgrade should be totally hitless.
|
// MasterUpgrade should be totally hitless.
|
||||||
job, err := jobutil.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add")
|
job, err := e2ejob.GetJob(f.ClientSet, f.Namespace.Name, "cuda-add")
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.ExpectEqual(job.Status.Failed, 0, "Job pods failed during master upgrade: %v", job.Status.Failed)
|
framework.ExpectEqual(job.Status.Failed, 0, "Job pods failed during master upgrade: %v", job.Status.Failed)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user