mirror of
				https://github.com/k3s-io/kubernetes.git
				synced 2025-11-03 23:40:03 +00:00 
			
		
		
		
	Implements an upgrade test for Job. Job common functionality is refactored into the framework package to allow for code sharing between e2e and upgrade tests.
This commit is contained in:
		@@ -34,6 +34,7 @@ var upgradeTests = []upgrades.Test{
 | 
			
		||||
	&upgrades.SecretUpgradeTest{},
 | 
			
		||||
	&upgrades.StatefulSetUpgradeTest{},
 | 
			
		||||
	&upgrades.DeploymentUpgradeTest{},
 | 
			
		||||
	&upgrades.JobUpgradeTest{},
 | 
			
		||||
	&upgrades.ConfigMapUpgradeTest{},
 | 
			
		||||
	&upgrades.HPAUpgradeTest{},
 | 
			
		||||
	&upgrades.PersistentVolumeUpgradeTest{},
 | 
			
		||||
 
 | 
			
		||||
@@ -89,7 +89,8 @@ var _ = framework.KubeDescribe("CronJob", func() {
 | 
			
		||||
		By("Creating a suspended cronjob")
 | 
			
		||||
		cronJob := newTestCronJob("suspended", "*/1 * * * ?", batch.AllowConcurrent,
 | 
			
		||||
			sleepCommand, nil)
 | 
			
		||||
		cronJob.Spec.Suspend = newBool(true)
 | 
			
		||||
		t := true
 | 
			
		||||
		cronJob.Spec.Suspend = &t
 | 
			
		||||
		cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
 | 
			
		||||
@@ -220,7 +221,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
 | 
			
		||||
		By("Ensuring job was deleted")
 | 
			
		||||
		_, err = getJob(f.ClientSet, f.Namespace.Name, job.Name)
 | 
			
		||||
		_, err = framework.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
 | 
			
		||||
		Expect(err).To(HaveOccurred())
 | 
			
		||||
		Expect(errors.IsNotFound(err)).To(BeTrue())
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -18,6 +18,7 @@ go_library(
 | 
			
		||||
        "framework.go",
 | 
			
		||||
        "get-kubemark-resource-usage.go",
 | 
			
		||||
        "google_compute.go",
 | 
			
		||||
        "jobs_util.go",
 | 
			
		||||
        "kubelet_stats.go",
 | 
			
		||||
        "log_size_monitoring.go",
 | 
			
		||||
        "metrics_util.go",
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										198
									
								
								test/e2e/framework/jobs_util.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										198
									
								
								test/e2e/framework/jobs_util.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,198 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2017 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package framework
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"time"
 | 
			
		||||
 | 
			
		||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/labels"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/v1"
 | 
			
		||||
	batch "k8s.io/kubernetes/pkg/apis/batch/v1"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// How long to wait for a job to finish.
 | 
			
		||||
	JobTimeout = 15 * time.Minute
 | 
			
		||||
 | 
			
		||||
	// Job selector name
 | 
			
		||||
	JobSelectorKey = "job"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// NewTestJob returns a Job which does one of several testing behaviors. notTerminate starts a Job that will run
 | 
			
		||||
// effectively forever. fail starts a Job that will fail immediately. succeed starts a Job that will succeed
 | 
			
		||||
// immediately. randomlySucceedOrFail starts a Job that will succeed or fail randomly. failOnce fails the Job the
 | 
			
		||||
// first time it is run and succeeds subsequently. name is the Name of the Job. RestartPolicy indicates the restart
 | 
			
		||||
// policy of the containers in which the Pod is running. Parallelism is the Job's parallelism, and completions is the
 | 
			
		||||
// Job's required number of completions.
 | 
			
		||||
func NewTestJob(behavior, name string, rPol v1.RestartPolicy, parallelism, completions int32) *batch.Job {
 | 
			
		||||
	job := &batch.Job{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
			Name: name,
 | 
			
		||||
		},
 | 
			
		||||
		Spec: batch.JobSpec{
 | 
			
		||||
			Parallelism:    ¶llelism,
 | 
			
		||||
			Completions:    &completions,
 | 
			
		||||
			ManualSelector: newBool(false),
 | 
			
		||||
			Template: v1.PodTemplateSpec{
 | 
			
		||||
				ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
					Labels: map[string]string{JobSelectorKey: name},
 | 
			
		||||
				},
 | 
			
		||||
				Spec: v1.PodSpec{
 | 
			
		||||
					RestartPolicy: rPol,
 | 
			
		||||
					Volumes: []v1.Volume{
 | 
			
		||||
						{
 | 
			
		||||
							Name: "data",
 | 
			
		||||
							VolumeSource: v1.VolumeSource{
 | 
			
		||||
								EmptyDir: &v1.EmptyDirVolumeSource{},
 | 
			
		||||
							},
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
					Containers: []v1.Container{
 | 
			
		||||
						{
 | 
			
		||||
							Name:    "c",
 | 
			
		||||
							Image:   "gcr.io/google_containers/busybox:1.24",
 | 
			
		||||
							Command: []string{},
 | 
			
		||||
							VolumeMounts: []v1.VolumeMount{
 | 
			
		||||
								{
 | 
			
		||||
									MountPath: "/data",
 | 
			
		||||
									Name:      "data",
 | 
			
		||||
								},
 | 
			
		||||
							},
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	switch behavior {
 | 
			
		||||
	case "notTerminate":
 | 
			
		||||
		job.Spec.Template.Spec.Containers[0].Command = []string{"sleep", "1000000"}
 | 
			
		||||
	case "fail":
 | 
			
		||||
		job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 1"}
 | 
			
		||||
	case "succeed":
 | 
			
		||||
		job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 0"}
 | 
			
		||||
	case "randomlySucceedOrFail":
 | 
			
		||||
		// Bash's $RANDOM generates pseudorandom int in range 0 - 32767.
 | 
			
		||||
		// Dividing by 16384 gives roughly 50/50 chance of success.
 | 
			
		||||
		job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit $(( $RANDOM / 16384 ))"}
 | 
			
		||||
	case "failOnce":
 | 
			
		||||
		// Fail the first the container of the pod is run, and
 | 
			
		||||
		// succeed the second time. Checks for file on emptydir.
 | 
			
		||||
		// If present, succeed.  If not, create but fail.
 | 
			
		||||
		// Note that this cannot be used with RestartNever because
 | 
			
		||||
		// it always fails the first time for a pod.
 | 
			
		||||
		job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "if [[ -r /data/foo ]] ; then exit 0 ; else touch /data/foo ; exit 1 ; fi"}
 | 
			
		||||
	}
 | 
			
		||||
	return job
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// GetJob uses c to get the Job in namespace ns named name. If the returned error is nil, the returned Job is valid.
 | 
			
		||||
func GetJob(c clientset.Interface, ns, name string) (*batch.Job, error) {
 | 
			
		||||
	return c.Batch().Jobs(ns).Get(name, metav1.GetOptions{})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CreateJob uses c to create job in namespace ns. If the returned error is nil, the returned Job is valid and has
 | 
			
		||||
// been created.
 | 
			
		||||
func CreateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
 | 
			
		||||
	return c.Batch().Jobs(ns).Create(job)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// UpdateJob uses c to updated job in namespace ns. If the returned error is nil, the returned Job is valid and has
 | 
			
		||||
// been updated.
 | 
			
		||||
func UpdateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
 | 
			
		||||
	return c.Batch().Jobs(ns).Update(job)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// DeleteJob uses c to delete the Job named name in namespace ns. If the returned error is nil, the Job has been
 | 
			
		||||
// deleted.
 | 
			
		||||
func DeleteJob(c clientset.Interface, ns, name string) error {
 | 
			
		||||
	return c.Batch().Jobs(ns).Delete(name, nil)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WaitForAllJobPodsRunning wait for all pods for the Job named JobName in namespace ns to become Running.  Only use
 | 
			
		||||
// when pods will run for a long time, or it will be racy.
 | 
			
		||||
func WaitForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
 | 
			
		||||
	label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName}))
 | 
			
		||||
	return wait.Poll(Poll, JobTimeout, func() (bool, error) {
 | 
			
		||||
		options := metav1.ListOptions{LabelSelector: label.String()}
 | 
			
		||||
		pods, err := c.Core().Pods(ns).List(options)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
		count := int32(0)
 | 
			
		||||
		for _, p := range pods.Items {
 | 
			
		||||
			if p.Status.Phase == v1.PodRunning {
 | 
			
		||||
				count++
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return count == parallelism, nil
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WaitForJobFinish uses c to wait for compeletions to complete for the Job jobName in namespace ns.
 | 
			
		||||
func WaitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
 | 
			
		||||
	return wait.Poll(Poll, JobTimeout, func() (bool, error) {
 | 
			
		||||
		curr, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
		return curr.Status.Succeeded == completions, nil
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// WaitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
 | 
			
		||||
func WaitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
 | 
			
		||||
	return wait.Poll(Poll, timeout, func() (bool, error) {
 | 
			
		||||
		curr, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
		for _, c := range curr.Status.Conditions {
 | 
			
		||||
			if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue {
 | 
			
		||||
				return true, nil
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return false, nil
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// CheckForAllJobPodsRunning uses c to check in the Job named jobName in ns is running. If the returned error is not
 | 
			
		||||
// nil the returned bool is true if the Job is running.
 | 
			
		||||
func CheckForAllJobPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) (bool, error) {
 | 
			
		||||
	label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName}))
 | 
			
		||||
	options := metav1.ListOptions{LabelSelector: label.String()}
 | 
			
		||||
	pods, err := c.Core().Pods(ns).List(options)
 | 
			
		||||
	if err != nil {
 | 
			
		||||
		return false, err
 | 
			
		||||
	}
 | 
			
		||||
	count := int32(0)
 | 
			
		||||
	for _, p := range pods.Items {
 | 
			
		||||
		if p.Status.Phase == v1.PodRunning {
 | 
			
		||||
			count++
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return count == parallelism, nil
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newBool(val bool) *bool {
 | 
			
		||||
	p := new(bool)
 | 
			
		||||
	*p = val
 | 
			
		||||
	return p
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										171
									
								
								test/e2e/job.go
									
									
									
									
									
								
							
							
						
						
									
										171
									
								
								test/e2e/job.go
									
									
									
									
									
								
							@@ -1,5 +1,5 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2015 The Kubernetes Authors.
 | 
			
		||||
Copyright 2017 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
@@ -21,12 +21,8 @@ import (
 | 
			
		||||
 | 
			
		||||
	"k8s.io/apimachinery/pkg/api/errors"
 | 
			
		||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/labels"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/wait"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/v1"
 | 
			
		||||
	batchinternal "k8s.io/kubernetes/pkg/apis/batch"
 | 
			
		||||
	batch "k8s.io/kubernetes/pkg/apis/batch/v1"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/kubectl"
 | 
			
		||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
			
		||||
 | 
			
		||||
@@ -34,14 +30,6 @@ import (
 | 
			
		||||
	. "github.com/onsi/gomega"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
	// How long to wait for a job to finish.
 | 
			
		||||
	jobTimeout = 15 * time.Minute
 | 
			
		||||
 | 
			
		||||
	// Job selector name
 | 
			
		||||
	jobSelectorKey = "job"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
var _ = framework.KubeDescribe("Job", func() {
 | 
			
		||||
	f := framework.NewDefaultFramework("job")
 | 
			
		||||
	parallelism := int32(2)
 | 
			
		||||
@@ -50,12 +38,12 @@ var _ = framework.KubeDescribe("Job", func() {
 | 
			
		||||
	// Simplest case: all pods succeed promptly
 | 
			
		||||
	It("should run a job to completion when tasks succeed", func() {
 | 
			
		||||
		By("Creating a job")
 | 
			
		||||
		job := newTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions)
 | 
			
		||||
		job, err := createJob(f.ClientSet, f.Namespace.Name, job)
 | 
			
		||||
		job := framework.NewTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions)
 | 
			
		||||
		job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
 | 
			
		||||
		By("Ensuring job reaches completions")
 | 
			
		||||
		err = waitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
 | 
			
		||||
		err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
@@ -69,12 +57,12 @@ var _ = framework.KubeDescribe("Job", func() {
 | 
			
		||||
		// up to 5 minutes between restarts, making test timeouts
 | 
			
		||||
		// due to successive failures too likely with a reasonable
 | 
			
		||||
		// test timeout.
 | 
			
		||||
		job := newTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions)
 | 
			
		||||
		job, err := createJob(f.ClientSet, f.Namespace.Name, job)
 | 
			
		||||
		job := framework.NewTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions)
 | 
			
		||||
		job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
 | 
			
		||||
		By("Ensuring job reaches completions")
 | 
			
		||||
		err = waitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
 | 
			
		||||
		err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
@@ -87,23 +75,23 @@ var _ = framework.KubeDescribe("Job", func() {
 | 
			
		||||
		// Worst case analysis: 15 failures, each taking 1 minute to
 | 
			
		||||
		// run due to some slowness, 1 in 2^15 chance of happening,
 | 
			
		||||
		// causing test flake.  Should be very rare.
 | 
			
		||||
		job := newTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions)
 | 
			
		||||
		job, err := createJob(f.ClientSet, f.Namespace.Name, job)
 | 
			
		||||
		job := framework.NewTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions)
 | 
			
		||||
		job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
 | 
			
		||||
		By("Ensuring job reaches completions")
 | 
			
		||||
		err = waitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
 | 
			
		||||
		err = framework.WaitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	It("should delete a job", func() {
 | 
			
		||||
		By("Creating a job")
 | 
			
		||||
		job := newTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
 | 
			
		||||
		job, err := createJob(f.ClientSet, f.Namespace.Name, job)
 | 
			
		||||
		job := framework.NewTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
 | 
			
		||||
		job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
 | 
			
		||||
		By("Ensuring active pods == parallelism")
 | 
			
		||||
		err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
 | 
			
		||||
		err = framework.WaitForAllJobPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
 | 
			
		||||
		By("delete a job")
 | 
			
		||||
@@ -114,139 +102,8 @@ var _ = framework.KubeDescribe("Job", func() {
 | 
			
		||||
		Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
 | 
			
		||||
		By("Ensuring job was deleted")
 | 
			
		||||
		_, err = getJob(f.ClientSet, f.Namespace.Name, job.Name)
 | 
			
		||||
		_, err = framework.GetJob(f.ClientSet, f.Namespace.Name, job.Name)
 | 
			
		||||
		Expect(err).To(HaveOccurred())
 | 
			
		||||
		Expect(errors.IsNotFound(err)).To(BeTrue())
 | 
			
		||||
	})
 | 
			
		||||
})
 | 
			
		||||
 | 
			
		||||
// newTestJob returns a job which does one of several testing behaviors.
 | 
			
		||||
func newTestJob(behavior, name string, rPol v1.RestartPolicy, parallelism, completions int32) *batch.Job {
 | 
			
		||||
	job := &batch.Job{
 | 
			
		||||
		ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
			Name: name,
 | 
			
		||||
		},
 | 
			
		||||
		Spec: batch.JobSpec{
 | 
			
		||||
			Parallelism:    ¶llelism,
 | 
			
		||||
			Completions:    &completions,
 | 
			
		||||
			ManualSelector: newBool(false),
 | 
			
		||||
			Template: v1.PodTemplateSpec{
 | 
			
		||||
				ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
					Labels: map[string]string{jobSelectorKey: name},
 | 
			
		||||
				},
 | 
			
		||||
				Spec: v1.PodSpec{
 | 
			
		||||
					RestartPolicy: rPol,
 | 
			
		||||
					Volumes: []v1.Volume{
 | 
			
		||||
						{
 | 
			
		||||
							Name: "data",
 | 
			
		||||
							VolumeSource: v1.VolumeSource{
 | 
			
		||||
								EmptyDir: &v1.EmptyDirVolumeSource{},
 | 
			
		||||
							},
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
					Containers: []v1.Container{
 | 
			
		||||
						{
 | 
			
		||||
							Name:    "c",
 | 
			
		||||
							Image:   "gcr.io/google_containers/busybox:1.24",
 | 
			
		||||
							Command: []string{},
 | 
			
		||||
							VolumeMounts: []v1.VolumeMount{
 | 
			
		||||
								{
 | 
			
		||||
									MountPath: "/data",
 | 
			
		||||
									Name:      "data",
 | 
			
		||||
								},
 | 
			
		||||
							},
 | 
			
		||||
						},
 | 
			
		||||
					},
 | 
			
		||||
				},
 | 
			
		||||
			},
 | 
			
		||||
		},
 | 
			
		||||
	}
 | 
			
		||||
	switch behavior {
 | 
			
		||||
	case "notTerminate":
 | 
			
		||||
		job.Spec.Template.Spec.Containers[0].Command = []string{"sleep", "1000000"}
 | 
			
		||||
	case "fail":
 | 
			
		||||
		job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 1"}
 | 
			
		||||
	case "succeed":
 | 
			
		||||
		job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 0"}
 | 
			
		||||
	case "randomlySucceedOrFail":
 | 
			
		||||
		// Bash's $RANDOM generates pseudorandom int in range 0 - 32767.
 | 
			
		||||
		// Dividing by 16384 gives roughly 50/50 chance of success.
 | 
			
		||||
		job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit $(( $RANDOM / 16384 ))"}
 | 
			
		||||
	case "failOnce":
 | 
			
		||||
		// Fail the first the container of the pod is run, and
 | 
			
		||||
		// succeed the second time. Checks for file on emptydir.
 | 
			
		||||
		// If present, succeed.  If not, create but fail.
 | 
			
		||||
		// Note that this cannot be used with RestartNever because
 | 
			
		||||
		// it always fails the first time for a pod.
 | 
			
		||||
		job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "if [[ -r /data/foo ]] ; then exit 0 ; else touch /data/foo ; exit 1 ; fi"}
 | 
			
		||||
	}
 | 
			
		||||
	return job
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func getJob(c clientset.Interface, ns, name string) (*batch.Job, error) {
 | 
			
		||||
	return c.Batch().Jobs(ns).Get(name, metav1.GetOptions{})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func createJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
 | 
			
		||||
	return c.Batch().Jobs(ns).Create(job)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func updateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
 | 
			
		||||
	return c.Batch().Jobs(ns).Update(job)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func deleteJob(c clientset.Interface, ns, name string) error {
 | 
			
		||||
	return c.Batch().Jobs(ns).Delete(name, nil)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Wait for all pods to become Running.  Only use when pods will run for a long time, or it will be racy.
 | 
			
		||||
func waitForAllPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
 | 
			
		||||
	label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName}))
 | 
			
		||||
	return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
 | 
			
		||||
		options := metav1.ListOptions{LabelSelector: label.String()}
 | 
			
		||||
		pods, err := c.Core().Pods(ns).List(options)
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
		count := int32(0)
 | 
			
		||||
		for _, p := range pods.Items {
 | 
			
		||||
			if p.Status.Phase == v1.PodRunning {
 | 
			
		||||
				count++
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return count == parallelism, nil
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Wait for job to reach completions.
 | 
			
		||||
func waitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
 | 
			
		||||
	return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
 | 
			
		||||
		curr, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
		return curr.Status.Succeeded == completions, nil
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Wait for job fail.
 | 
			
		||||
func waitForJobFail(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
 | 
			
		||||
	return wait.Poll(framework.Poll, timeout, func() (bool, error) {
 | 
			
		||||
		curr, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
 | 
			
		||||
		if err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
		for _, c := range curr.Status.Conditions {
 | 
			
		||||
			if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue {
 | 
			
		||||
				return true, nil
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		return false, nil
 | 
			
		||||
	})
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func newBool(val bool) *bool {
 | 
			
		||||
	p := new(bool)
 | 
			
		||||
	*p = val
 | 
			
		||||
	return p
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -442,10 +442,11 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
 | 
			
		||||
			parallelism := int32(2)
 | 
			
		||||
			completions := int32(4)
 | 
			
		||||
 | 
			
		||||
			job := newTestJob("notTerminate", "network-partition", v1.RestartPolicyNever, parallelism, completions)
 | 
			
		||||
			job, err := createJob(f.ClientSet, f.Namespace.Name, job)
 | 
			
		||||
			job := framework.NewTestJob("notTerminate", "network-partition", v1.RestartPolicyNever,
 | 
			
		||||
				parallelism, completions)
 | 
			
		||||
			job, err := framework.CreateJob(f.ClientSet, f.Namespace.Name, job)
 | 
			
		||||
			Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
			label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: job.Name}))
 | 
			
		||||
			label := labels.SelectorFromSet(labels.Set(map[string]string{framework.JobSelectorKey: job.Name}))
 | 
			
		||||
 | 
			
		||||
			By(fmt.Sprintf("verifying that there are now %v running pods", parallelism))
 | 
			
		||||
			_, err = framework.PodsCreatedByLabel(c, ns, job.Name, parallelism, label)
 | 
			
		||||
 
 | 
			
		||||
@@ -13,6 +13,7 @@ go_library(
 | 
			
		||||
        "configmaps.go",
 | 
			
		||||
        "deployments.go",
 | 
			
		||||
        "horizontal_pod_autoscalers.go",
 | 
			
		||||
        "job.go",
 | 
			
		||||
        "persistent_volumes.go",
 | 
			
		||||
        "secrets.go",
 | 
			
		||||
        "services.go",
 | 
			
		||||
@@ -24,6 +25,7 @@ go_library(
 | 
			
		||||
    deps = [
 | 
			
		||||
        "//pkg/api/v1:go_default_library",
 | 
			
		||||
        "//pkg/apis/apps/v1beta1:go_default_library",
 | 
			
		||||
        "//pkg/apis/batch/v1:go_default_library",
 | 
			
		||||
        "//pkg/apis/extensions/v1beta1:go_default_library",
 | 
			
		||||
        "//pkg/controller/deployment/util:go_default_library",
 | 
			
		||||
        "//pkg/kubelet/sysctl:go_default_library",
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										63
									
								
								test/e2e/upgrades/job.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										63
									
								
								test/e2e/upgrades/job.go
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,63 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2017 The Kubernetes Authors.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
you may not use this file except in compliance with the License.
 | 
			
		||||
You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
    http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
See the License for the specific language governing permissions and
 | 
			
		||||
limitations under the License.
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
package upgrades
 | 
			
		||||
 | 
			
		||||
import (
 | 
			
		||||
	"k8s.io/kubernetes/pkg/api/v1"
 | 
			
		||||
	batch "k8s.io/kubernetes/pkg/apis/batch/v1"
 | 
			
		||||
	"k8s.io/kubernetes/test/e2e/framework"
 | 
			
		||||
 | 
			
		||||
	. "github.com/onsi/ginkgo"
 | 
			
		||||
	. "github.com/onsi/gomega"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
// JobUpgradeTest is a test harness for batch Jobs.
 | 
			
		||||
type JobUpgradeTest struct {
 | 
			
		||||
	job       *batch.Job
 | 
			
		||||
	namespace string
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Setup starts a Job with a parallelism of 2 and 2 completions running.
 | 
			
		||||
func (t *JobUpgradeTest) Setup(f *framework.Framework) {
 | 
			
		||||
	ns, err := f.CreateNamespace("service-upgrade", nil)
 | 
			
		||||
	Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
	t.namespace = ns.Name
 | 
			
		||||
 | 
			
		||||
	By("Creating a job")
 | 
			
		||||
	t.job = framework.NewTestJob("notTerminate", "foo", v1.RestartPolicyOnFailure, 2, 2)
 | 
			
		||||
	job, err := framework.CreateJob(f.ClientSet, t.namespace, t.job)
 | 
			
		||||
	t.job = job
 | 
			
		||||
	Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
 | 
			
		||||
	By("Ensuring active pods == parallelism")
 | 
			
		||||
	err = framework.WaitForAllJobPodsRunning(f.ClientSet, t.namespace, job.Name, 2)
 | 
			
		||||
	Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Test verifies that the Jobs Pods are running after the an upgrade
 | 
			
		||||
func (t *JobUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgrade UpgradeType) {
 | 
			
		||||
	<-done
 | 
			
		||||
	By("Ensuring active pods == parallelism")
 | 
			
		||||
	running, err := framework.CheckForAllJobPodsRunning(f.ClientSet, t.namespace, t.job.Name, 2)
 | 
			
		||||
	Expect(err).NotTo(HaveOccurred())
 | 
			
		||||
	Expect(running).To(BeTrue())
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// Teardown cleans up any remaining resources.
 | 
			
		||||
func (t *JobUpgradeTest) Teardown(f *framework.Framework) {
 | 
			
		||||
	// rely on the namespace deletion to clean up everything
 | 
			
		||||
}
 | 
			
		||||
		Reference in New Issue
	
	Block a user