mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-10 20:42:26 +00:00
Merge pull request #122643 from soltysh/never_terminate
Add a new neverTerminate job behavior just for upgrade
This commit is contained in:
commit
4142dda1e9
@ -92,6 +92,12 @@ func NewTestJobOnNode(behavior, name string, rPol v1.RestartPolicy, parallelism,
|
||||
}
|
||||
}
|
||||
switch behavior {
|
||||
case "neverTerminate":
|
||||
// this job is being used in an upgrade job see test/e2e/upgrades/apps/job.go
|
||||
// it should never be optimized, as it always has to restart during an upgrade
|
||||
// and continue running
|
||||
job.Spec.Template.Spec.Containers[0].Command = []string{"sleep", "1000000"}
|
||||
job.Spec.Template.Spec.TerminationGracePeriodSeconds = ptr.To(int64(1))
|
||||
case "notTerminate":
|
||||
job.Spec.Template.Spec.Containers[0].Image = imageutils.GetPauseImageName()
|
||||
case "fail":
|
||||
|
@ -47,7 +47,7 @@ func (t *JobUpgradeTest) Setup(ctx context.Context, f *framework.Framework) {
|
||||
t.namespace = f.Namespace.Name
|
||||
|
||||
ginkgo.By("Creating a job")
|
||||
t.job = e2ejob.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6)
|
||||
t.job = e2ejob.NewTestJob("neverTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2, nil, 6)
|
||||
job, err := e2ejob.CreateJob(ctx, f.ClientSet, t.namespace, t.job)
|
||||
t.job = job
|
||||
framework.ExpectNoError(err)
|
||||
@ -60,8 +60,11 @@ func (t *JobUpgradeTest) Setup(ctx context.Context, f *framework.Framework) {
|
||||
// Test verifies that the Jobs Pods are running after the an upgrade
|
||||
func (t *JobUpgradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
|
||||
<-done
|
||||
ginkgo.By("Ensuring job is running")
|
||||
err := ensureJobRunning(ctx, f.ClientSet, t.namespace, t.job.Name)
|
||||
framework.ExpectNoError(err)
|
||||
ginkgo.By("Ensuring active pods == parallelism")
|
||||
err := ensureAllJobPodsRunning(ctx, f.ClientSet, t.namespace, t.job.Name, 2)
|
||||
err = ensureAllJobPodsRunning(ctx, f.ClientSet, t.namespace, t.job.Name, 2)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
@ -70,7 +73,7 @@ func (t *JobUpgradeTest) Teardown(ctx context.Context, f *framework.Framework) {
|
||||
// rely on the namespace deletion to clean up everything
|
||||
}
|
||||
|
||||
// ensureAllJobPodsRunning uses c to check in the Job named jobName in ns
|
||||
// ensureAllJobPodsRunning uses c to check if the Job named jobName in ns
|
||||
// is running, returning an error if the expected parallelism is not
|
||||
// satisfied.
|
||||
func ensureAllJobPodsRunning(ctx context.Context, c clientset.Interface, ns, jobName string, parallelism int32) error {
|
||||
@ -93,3 +96,19 @@ func ensureAllJobPodsRunning(ctx context.Context, c clientset.Interface, ns, job
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureJobRunning uses c to check if the Job named jobName in ns is running,
|
||||
// (not completed, nor failed, nor suspended) returning an error if it can't
|
||||
// read the job or when it's not runnig
|
||||
func ensureJobRunning(ctx context.Context, c clientset.Interface, ns, jobName string) error {
|
||||
job, err := e2ejob.GetJob(ctx, c, ns, jobName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, c := range job.Status.Conditions {
|
||||
if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed || c.Type == batchv1.JobSuspended) && c.Status == v1.ConditionTrue {
|
||||
return fmt.Errorf("job is not running %#v", job)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user