mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 10:43:56 +00:00
e2e test for checking ready of job status
This commit is contained in:
parent
ba7feccfa3
commit
a913abe191
@ -50,6 +50,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||||
admissionapi "k8s.io/pod-security-admission/api"
|
admissionapi "k8s.io/pod-security-admission/api"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
|
"k8s.io/utils/ptr"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
@ -817,8 +818,45 @@ var _ = SIGDescribe("Job", func() {
|
|||||||
gomega.Expect(jobs.Items).To(gomega.BeEmpty(), "Found job %v", jobName)
|
gomega.Expect(jobs.Items).To(gomega.BeEmpty(), "Found job %v", jobName)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should update the status ready field", func(ctx context.Context) {
|
||||||
|
ginkgo.By("Creating a job with suspend=true")
|
||||||
|
job := e2ejob.NewTestJob("notTerminate", "all-ready", v1.RestartPolicyNever, parallelism, completions, nil, backoffLimit)
|
||||||
|
job.Spec.Suspend = ptr.To[bool](true)
|
||||||
|
job, err := e2ejob.CreateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||||
|
framework.ExpectNoError(err, "failed to create job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
|
ginkgo.By("Ensure the job controller updates the status.ready field")
|
||||||
|
err = e2ejob.WaitForJobReady(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To[int32](0))
|
||||||
|
framework.ExpectNoError(err, "failed to ensure job status ready field in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
|
ginkgo.By("Updating the job with suspend=false")
|
||||||
|
err = updateJobSuspendWithRetries(ctx, f, job, ptr.To[bool](false))
|
||||||
|
framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
|
ginkgo.By("Ensure the job controller updates the status.ready field")
|
||||||
|
err = e2ejob.WaitForJobReady(ctx, f.ClientSet, f.Namespace.Name, job.Name, ¶llelism)
|
||||||
|
framework.ExpectNoError(err, "failed to ensure job status ready field in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
|
ginkgo.By("Updating the job with suspend=true")
|
||||||
|
err = updateJobSuspendWithRetries(ctx, f, job, ptr.To[bool](true))
|
||||||
|
framework.ExpectNoError(err, "failed to update job in namespace: %s", f.Namespace.Name)
|
||||||
|
|
||||||
|
ginkgo.By("Ensure the job controller updates the status.ready field")
|
||||||
|
err = e2ejob.WaitForJobReady(ctx, f.ClientSet, f.Namespace.Name, job.Name, ptr.To[int32](0))
|
||||||
|
framework.ExpectNoError(err, "failed to ensure job status ready field in namespace: %s", f.Namespace.Name)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
func updateJobSuspendWithRetries(ctx context.Context, f *framework.Framework, job *batchv1.Job, suspend *bool) error {
|
||||||
|
return retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||||
|
job, err := e2ejob.GetJob(ctx, f.ClientSet, f.Namespace.Name, job.Name)
|
||||||
|
framework.ExpectNoError(err, "unable to get job %s in namespace %s", job.Name, f.Namespace.Name)
|
||||||
|
job.Spec.Suspend = suspend
|
||||||
|
_, err = e2ejob.UpdateJob(ctx, f.ClientSet, f.Namespace.Name, job)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// waitForJobEvent is used to track and log Job events.
|
// waitForJobEvent is used to track and log Job events.
|
||||||
// As delivery of events is not actually guaranteed we
|
// As delivery of events is not actually guaranteed we
|
||||||
// will not return an error if we miss the required event.
|
// will not return an error if we miss the required event.
|
||||||
|
@ -18,6 +18,7 @@ package job
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
@ -27,8 +28,17 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
"k8s.io/kubernetes/test/utils/format"
|
||||||
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// JobState is used to verify if Job matches a particular condition.
|
||||||
|
// If it matches, an empty string is returned.
|
||||||
|
// Otherwise, the string explains why the condition is not matched.
|
||||||
|
// This should be a short string. A dump of the job object will
|
||||||
|
// get added by the caller.
|
||||||
|
type JobState func(job *batchv1.Job) string
|
||||||
|
|
||||||
// WaitForJobPodsRunning wait for all pods for the Job named JobName in namespace ns to become Running. Only use
|
// WaitForJobPodsRunning wait for all pods for the Job named JobName in namespace ns to become Running. Only use
|
||||||
// when pods will run for a long time, or it will be racy.
|
// when pods will run for a long time, or it will be racy.
|
||||||
func WaitForJobPodsRunning(ctx context.Context, c clientset.Interface, ns, jobName string, expectedCount int32) error {
|
func WaitForJobPodsRunning(ctx context.Context, c clientset.Interface, ns, jobName string, expectedCount int32) error {
|
||||||
@ -68,6 +78,16 @@ func WaitForJobComplete(ctx context.Context, c clientset.Interface, ns, jobName
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WaitForJobReady waits for particular value of the Job .status.ready field
|
||||||
|
func WaitForJobReady(ctx context.Context, c clientset.Interface, ns, jobName string, ready *int32) error {
|
||||||
|
return WaitForJobState(ctx, c, ns, jobName, JobTimeout, func(job *batchv1.Job) string {
|
||||||
|
if ptr.Equal(ready, job.Status.Ready) {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return "job does not match intended ready status"
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// WaitForJobFailed uses c to wait for the Job jobName in namespace ns to fail
|
// WaitForJobFailed uses c to wait for the Job jobName in namespace ns to fail
|
||||||
func WaitForJobFailed(c clientset.Interface, ns, jobName string) error {
|
func WaitForJobFailed(c clientset.Interface, ns, jobName string) error {
|
||||||
return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) {
|
return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) {
|
||||||
@ -133,3 +153,20 @@ func WaitForAllJobPodsGone(ctx context.Context, c clientset.Interface, ns, jobNa
|
|||||||
return len(pods.Items) == 0, nil
|
return len(pods.Items) == 0, nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WaitForJobState waits for a job to be matched to the given condition.
|
||||||
|
// The condition callback may use gomega.StopTrying to abort early.
|
||||||
|
func WaitForJobState(ctx context.Context, c clientset.Interface, ns, jobName string, timeout time.Duration, state JobState) error {
|
||||||
|
return framework.Gomega().
|
||||||
|
Eventually(ctx, framework.RetryNotFound(framework.GetObject(c.BatchV1().Jobs(ns).Get, jobName, metav1.GetOptions{}))).
|
||||||
|
WithTimeout(timeout).
|
||||||
|
Should(framework.MakeMatcher(func(job *batchv1.Job) (func() string, error) {
|
||||||
|
matches := state(job)
|
||||||
|
if matches == "" {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return func() string {
|
||||||
|
return fmt.Sprintf("%v\n%s", matches, format.Object(job, 1))
|
||||||
|
}, nil
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user