From dbbe68601f7d4006647a698184c592b7d6bac446 Mon Sep 17 00:00:00 2001 From: Mario Valderrama Date: Wed, 12 Jun 2019 20:09:57 +0200 Subject: [PATCH 1/2] Fix golint errors in pkg/controller/job --- cmd/kube-controller-manager/app/batch.go | 2 +- hack/.golint_failures | 1 - pkg/controller/job/job_controller.go | 38 +++++++-------- pkg/controller/job/job_controller_test.go | 56 +++++++++++------------ pkg/controller/job/utils.go | 2 + test/integration/cronjob/cronjob_test.go | 4 +- 6 files changed, 53 insertions(+), 50 deletions(-) diff --git a/cmd/kube-controller-manager/app/batch.go b/cmd/kube-controller-manager/app/batch.go index ceb11e6388b..e4528a99500 100644 --- a/cmd/kube-controller-manager/app/batch.go +++ b/cmd/kube-controller-manager/app/batch.go @@ -34,7 +34,7 @@ func startJobController(ctx ControllerContext) (http.Handler, bool, error) { if !ctx.AvailableResources[schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"}] { return nil, false, nil } - go job.NewJobController( + go job.NewController( ctx.InformerFactory.Core().V1().Pods(), ctx.InformerFactory.Batch().V1().Jobs(), ctx.ClientBuilder.ClientOrDie("job-controller"), diff --git a/hack/.golint_failures b/hack/.golint_failures index 795f37bf204..fb9a90f2aef 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -68,7 +68,6 @@ pkg/controller/endpoint pkg/controller/endpoint/config/v1alpha1 pkg/controller/garbagecollector pkg/controller/garbagecollector/config/v1alpha1 -pkg/controller/job pkg/controller/job/config/v1alpha1 pkg/controller/namespace pkg/controller/namespace/config/v1alpha1 diff --git a/pkg/controller/job/job_controller.go b/pkg/controller/job/job_controller.go index 665c726f77b..ac462cb7d8c 100644 --- a/pkg/controller/job/job_controller.go +++ b/pkg/controller/job/job_controller.go @@ -60,7 +60,8 @@ var ( MaxJobBackOff = 360 * time.Second ) -type JobController struct { +// Controller is a controller for Jobs. +type Controller struct { kubeClient clientset.Interface podControl controller.PodControlInterface @@ -89,7 +90,8 @@ type JobController struct { recorder record.EventRecorder } -func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *JobController { +// NewController creates a new Controller. +func NewController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *Controller { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(klog.Infof) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) @@ -98,7 +100,7 @@ func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchin metrics.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()) } - jm := &JobController{ + jm := &Controller{ kubeClient: kubeClient, podControl: controller.RealPodControl{ KubeClient: kubeClient, @@ -136,7 +138,7 @@ func NewJobController(podInformer coreinformers.PodInformer, jobInformer batchin } // Run the main goroutine responsible for watching and syncing jobs. -func (jm *JobController) Run(workers int, stopCh <-chan struct{}) { +func (jm *Controller) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer jm.queue.ShutDown() @@ -155,7 +157,7 @@ func (jm *JobController) Run(workers int, stopCh <-chan struct{}) { } // getPodJobs returns a list of Jobs that potentially match a Pod. -func (jm *JobController) getPodJobs(pod *v1.Pod) []*batch.Job { +func (jm *Controller) getPodJobs(pod *v1.Pod) []*batch.Job { jobs, err := jm.jobLister.GetPodJobs(pod) if err != nil { return nil @@ -175,7 +177,7 @@ func (jm *JobController) getPodJobs(pod *v1.Pod) []*batch.Job { // resolveControllerRef returns the controller referenced by a ControllerRef, // or nil if the ControllerRef could not be resolved to a matching controller // of the correct Kind. -func (jm *JobController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *batch.Job { +func (jm *Controller) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *batch.Job { // We can't look up by UID, so look up by Name and then verify UID. // Don't even try to look up by Name if it's the wrong Kind. if controllerRef.Kind != controllerKind.Kind { @@ -194,7 +196,7 @@ func (jm *JobController) resolveControllerRef(namespace string, controllerRef *m } // When a pod is created, enqueue the controller that manages it and update it's expectations. -func (jm *JobController) addPod(obj interface{}) { +func (jm *Controller) addPod(obj interface{}) { pod := obj.(*v1.Pod) if pod.DeletionTimestamp != nil { // on a restart of the controller controller, it's possible a new pod shows up in a state that @@ -230,7 +232,7 @@ func (jm *JobController) addPod(obj interface{}) { // When a pod is updated, figure out what job/s manage it and wake them up. // If the labels of the pod have changed we need to awaken both the old // and new job. old and cur must be *v1.Pod types. -func (jm *JobController) updatePod(old, cur interface{}) { +func (jm *Controller) updatePod(old, cur interface{}) { curPod := cur.(*v1.Pod) oldPod := old.(*v1.Pod) if curPod.ResourceVersion == oldPod.ResourceVersion { @@ -282,7 +284,7 @@ func (jm *JobController) updatePod(old, cur interface{}) { // When a pod is deleted, enqueue the job that manages the pod and update its expectations. // obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item. -func (jm *JobController) deletePod(obj interface{}) { +func (jm *Controller) deletePod(obj interface{}) { pod, ok := obj.(*v1.Pod) // When a delete is dropped, the relist will notice a pod in the store not @@ -319,7 +321,7 @@ func (jm *JobController) deletePod(obj interface{}) { jm.enqueueController(job, true) } -func (jm *JobController) updateJob(old, cur interface{}) { +func (jm *Controller) updateJob(old, cur interface{}) { oldJob := old.(*batch.Job) curJob := cur.(*batch.Job) @@ -351,7 +353,7 @@ func (jm *JobController) updateJob(old, cur interface{}) { // obj could be an *batch.Job, or a DeletionFinalStateUnknown marker item, // immediate tells the controller to update the status right away, and should // happen ONLY when there was a successful pod run. -func (jm *JobController) enqueueController(obj interface{}, immediate bool) { +func (jm *Controller) enqueueController(obj interface{}, immediate bool) { key, err := controller.KeyFunc(obj) if err != nil { utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err)) @@ -374,12 +376,12 @@ func (jm *JobController) enqueueController(obj interface{}, immediate bool) { // worker runs a worker thread that just dequeues items, processes them, and marks them done. // It enforces that the syncHandler is never invoked concurrently with the same key. -func (jm *JobController) worker() { +func (jm *Controller) worker() { for jm.processNextWorkItem() { } } -func (jm *JobController) processNextWorkItem() bool { +func (jm *Controller) processNextWorkItem() bool { key, quit := jm.queue.Get() if quit { return false @@ -403,7 +405,7 @@ func (jm *JobController) processNextWorkItem() bool { // getPodsForJob returns the set of pods that this Job should manage. // It also reconciles ControllerRef by adopting/orphaning. // Note that the returned Pods are pointers into the cache. -func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) { +func (jm *Controller) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) { selector, err := metav1.LabelSelectorAsSelector(j.Spec.Selector) if err != nil { return nil, fmt.Errorf("couldn't convert Job selector: %v", err) @@ -433,7 +435,7 @@ func (jm *JobController) getPodsForJob(j *batch.Job) ([]*v1.Pod, error) { // syncJob will sync the job with the given key if it has had its expectations fulfilled, meaning // it did not expect to see any more of its pods created or deleted. This function is not meant to be invoked // concurrently with the same key. -func (jm *JobController) syncJob(key string) (bool, error) { +func (jm *Controller) syncJob(key string) (bool, error) { startTime := time.Now() defer func() { klog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime)) @@ -599,7 +601,7 @@ func (jm *JobController) syncJob(key string) (bool, error) { return forget, manageJobErr } -func (jm *JobController) deleteJobPods(job *batch.Job, pods []*v1.Pod, errCh chan<- error) { +func (jm *Controller) deleteJobPods(job *batch.Job, pods []*v1.Pod, errCh chan<- error) { // TODO: below code should be replaced with pod termination resulting in // pod failures, rather than killing pods. Unfortunately none such solution // exists ATM. There's an open discussion in the topic in @@ -681,7 +683,7 @@ func getStatus(pods []*v1.Pod) (succeeded, failed int32) { // manageJob is the core method responsible for managing the number of running // pods according to what is specified in the job.Spec. // Does NOT modify . -func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *batch.Job) (int32, error) { +func (jm *Controller) manageJob(activePods []*v1.Pod, succeeded int32, job *batch.Job) (int32, error) { var activeLock sync.Mutex active := int32(len(activePods)) parallelism := *job.Spec.Parallelism @@ -820,7 +822,7 @@ func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *b return active, nil } -func (jm *JobController) updateJobStatus(job *batch.Job) error { +func (jm *Controller) updateJobStatus(job *batch.Job) error { jobClient := jm.kubeClient.BatchV1().Jobs(job.Namespace) var err error for i := 0; i <= statusUpdateRetries; i = i + 1 { diff --git a/pkg/controller/job/job_controller_test.go b/pkg/controller/job/job_controller_test.go index bc5a493cefb..59c95f487d8 100644 --- a/pkg/controller/job/job_controller_test.go +++ b/pkg/controller/job/job_controller_test.go @@ -88,9 +88,9 @@ func newJob(parallelism, completions, backoffLimit int32) *batch.Job { return j } -func newJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*JobController, informers.SharedInformerFactory) { +func newControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*Controller, informers.SharedInformerFactory) { sharedInformers := informers.NewSharedInformerFactory(kubeClient, resyncPeriod()) - jm := NewJobController(sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), kubeClient) + jm := NewController(sharedInformers.Core().V1().Pods(), sharedInformers.Batch().V1().Jobs(), kubeClient) jm.podControl = &controller.FakePodControl{} return jm, sharedInformers @@ -272,7 +272,7 @@ func TestControllerSyncJob(t *testing.T) { for name, tc := range testCases { // job manager setup clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{Err: tc.podControllerError, CreateLimit: tc.podLimit} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -413,7 +413,7 @@ func TestSyncJobPastDeadline(t *testing.T) { for name, tc := range testCases { // job manager setup clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -479,7 +479,7 @@ func getCondition(job *batch.Job, condition batch.JobConditionType, reason strin func TestSyncPastDeadlineJobFinished(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -517,7 +517,7 @@ func TestSyncPastDeadlineJobFinished(t *testing.T) { func TestSyncJobComplete(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -545,7 +545,7 @@ func TestSyncJobComplete(t *testing.T) { func TestSyncJobDeleted(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, _ := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager, _ := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -570,7 +570,7 @@ func TestSyncJobDeleted(t *testing.T) { func TestSyncJobUpdateRequeue(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) DefaultJobBackOff = time.Duration(0) // overwrite the default value for testing - manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -599,7 +599,7 @@ func TestSyncJobUpdateRequeue(t *testing.T) { func TestJobPodLookup(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) manager.podStoreSynced = alwaysReady manager.jobStoreSynced = alwaysReady testCases := []struct { @@ -681,7 +681,7 @@ func TestJobPodLookup(t *testing.T) { func TestGetPodsForJob(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -729,7 +729,7 @@ func TestGetPodsForJobAdopt(t *testing.T) { job1 := newJob(1, 1, 6) job1.Name = "job1" clientset := fake.NewSimpleClientset(job1) - jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -756,7 +756,7 @@ func TestGetPodsForJobNoAdoptIfBeingDeleted(t *testing.T) { job1.Name = "job1" job1.DeletionTimestamp = &metav1.Time{} clientset := fake.NewSimpleClientset(job1) - jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -787,7 +787,7 @@ func TestGetPodsForJobNoAdoptIfBeingDeletedRace(t *testing.T) { // The up-to-date object says it's being deleted. job1.DeletionTimestamp = &metav1.Time{} clientset := fake.NewSimpleClientset(job1) - jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -817,7 +817,7 @@ func TestGetPodsForJobNoAdoptIfBeingDeletedRace(t *testing.T) { func TestGetPodsForJobRelease(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -846,7 +846,7 @@ func TestGetPodsForJobRelease(t *testing.T) { func TestAddPod(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -891,7 +891,7 @@ func TestAddPod(t *testing.T) { func TestAddPodOrphan(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -919,7 +919,7 @@ func TestAddPodOrphan(t *testing.T) { func TestUpdatePod(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -968,7 +968,7 @@ func TestUpdatePod(t *testing.T) { func TestUpdatePodOrphanWithNewLabels(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -995,7 +995,7 @@ func TestUpdatePodOrphanWithNewLabels(t *testing.T) { func TestUpdatePodChangeControllerRef(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -1021,7 +1021,7 @@ func TestUpdatePodChangeControllerRef(t *testing.T) { func TestUpdatePodRelease(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -1047,7 +1047,7 @@ func TestUpdatePodRelease(t *testing.T) { func TestDeletePod(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -1092,7 +1092,7 @@ func TestDeletePod(t *testing.T) { func TestDeletePodOrphan(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - jm, informer := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + jm, informer := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) jm.podStoreSynced = alwaysReady jm.jobStoreSynced = alwaysReady @@ -1132,7 +1132,7 @@ func (fe FakeJobExpectations) SatisfiedExpectations(controllerKey string) bool { // and checking expectations. func TestSyncJobExpectations(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -1166,7 +1166,7 @@ func TestWatchJobs(t *testing.T) { clientset := fake.NewSimpleClientset() fakeWatch := watch.NewFake() clientset.PrependWatchReactor("jobs", core.DefaultWatchReactor(fakeWatch, nil)) - manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) manager.podStoreSynced = alwaysReady manager.jobStoreSynced = alwaysReady @@ -1211,7 +1211,7 @@ func TestWatchPods(t *testing.T) { clientset := fake.NewSimpleClientset(testJob) fakeWatch := watch.NewFake() clientset.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil)) - manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) manager.podStoreSynced = alwaysReady manager.jobStoreSynced = alwaysReady @@ -1294,7 +1294,7 @@ func TestJobBackoffReset(t *testing.T) { for name, tc := range testCases { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) DefaultJobBackOff = time.Duration(0) // overwrite the default value for testing - manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -1382,7 +1382,7 @@ func TestJobBackoff(t *testing.T) { for name, tc := range testCases { t.Run(name, func(t *testing.T) { clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady @@ -1474,7 +1474,7 @@ func TestJobBackoffForOnFailure(t *testing.T) { t.Run(name, func(t *testing.T) { // job manager setup clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}) - manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) + manager, sharedInformerFactory := newControllerFromClient(clientset, controller.NoResyncPeriodFunc) fakePodControl := controller.FakePodControl{} manager.podControl = &fakePodControl manager.podStoreSynced = alwaysReady diff --git a/pkg/controller/job/utils.go b/pkg/controller/job/utils.go index 25c3d08c908..571f8ebd4ab 100644 --- a/pkg/controller/job/utils.go +++ b/pkg/controller/job/utils.go @@ -21,6 +21,8 @@ import ( "k8s.io/api/core/v1" ) +// IsJobFinished checks whether the given Job has finished execution. +// It does not discriminate between successful and failed terminations. func IsJobFinished(j *batch.Job) bool { for _, c := range j.Status.Conditions { if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue { diff --git a/test/integration/cronjob/cronjob_test.go b/test/integration/cronjob/cronjob_test.go index 6f5d0116bdd..2cc2daf581c 100644 --- a/test/integration/cronjob/cronjob_test.go +++ b/test/integration/cronjob/cronjob_test.go @@ -36,7 +36,7 @@ import ( "k8s.io/kubernetes/test/integration/framework" ) -func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *cronjob.Controller, *job.JobController, informers.SharedInformerFactory, clientset.Interface, restclient.Config) { +func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *cronjob.Controller, *job.Controller, informers.SharedInformerFactory, clientset.Interface, restclient.Config) { masterConfig := framework.NewIntegrationTestMasterConfig() _, server, closeFn := framework.RunAMaster(masterConfig) @@ -51,7 +51,7 @@ func setup(t *testing.T) (*httptest.Server, framework.CloseFunc, *cronjob.Contro if err != nil { t.Fatalf("Error creating CronJob controller: %v", err) } - jc := job.NewJobController(informerSet.Core().V1().Pods(), informerSet.Batch().V1().Jobs(), clientSet) + jc := job.NewController(informerSet.Core().V1().Pods(), informerSet.Batch().V1().Jobs(), clientSet) return server, closeFn, cjc, jc, informerSet, clientSet, config } From 6ac7421535c9232ed276617aefd99ebc0efec764 Mon Sep 17 00:00:00 2001 From: Mario Valderrama Date: Fri, 14 Jun 2019 14:23:13 +0200 Subject: [PATCH 2/2] Update comments --- pkg/controller/job/job_controller.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/controller/job/job_controller.go b/pkg/controller/job/job_controller.go index ac462cb7d8c..bd9b50bfd35 100644 --- a/pkg/controller/job/job_controller.go +++ b/pkg/controller/job/job_controller.go @@ -60,7 +60,8 @@ var ( MaxJobBackOff = 360 * time.Second ) -// Controller is a controller for Jobs. +// Controller ensures that all Job objects have corresponding pods to +// run their configured workload. type Controller struct { kubeClient clientset.Interface podControl controller.PodControlInterface @@ -90,7 +91,8 @@ type Controller struct { recorder record.EventRecorder } -// NewController creates a new Controller. +// NewController creates a new Job controller that keeps the relevant pods +// in sync with their corresponding Job objects. func NewController(podInformer coreinformers.PodInformer, jobInformer batchinformers.JobInformer, kubeClient clientset.Interface) *Controller { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(klog.Infof)