diff --git a/pkg/controller/daemon/controller.go b/pkg/controller/daemon/daemoncontroller.go similarity index 99% rename from pkg/controller/daemon/controller.go rename to pkg/controller/daemon/daemoncontroller.go index 773ec4e3acd..b5063f79766 100644 --- a/pkg/controller/daemon/controller.go +++ b/pkg/controller/daemon/daemoncontroller.go @@ -661,7 +661,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error { return err } dsNeedsSync := dsc.expectations.SatisfiedExpectations(dsKey) - if dsNeedsSync { + if dsNeedsSync && ds.DeletionTimestamp == nil { dsc.manage(ds) } diff --git a/pkg/controller/daemon/controller_test.go b/pkg/controller/daemon/daemoncontroller_test.go similarity index 96% rename from pkg/controller/daemon/controller_test.go rename to pkg/controller/daemon/daemoncontroller_test.go index 090ae26aad4..5dd4048aa7d 100644 --- a/pkg/controller/daemon/controller_test.go +++ b/pkg/controller/daemon/daemoncontroller_test.go @@ -289,6 +289,24 @@ func TestSufficentCapacityNodeDaemonLaunchesPod(t *testing.T) { syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0) } +// DaemonSets not take any actions when being deleted +func TestDontDoAnythingIfBeingDeleted(t *testing.T) { + podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m") + manager, podControl := newTestController() + node := newNode("not-too-much-mem", nil) + node.Status.Allocatable = allocatableResources("200M", "200m") + manager.nodeStore.Add(node) + manager.podStore.Add(&api.Pod{ + Spec: podSpec, + }) + ds := newDaemonSet("foo") + ds.Spec.Template.Spec = podSpec + now := unversioned.Now() + ds.DeletionTimestamp = &now + manager.dsStore.Add(ds) + syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) +} + // DaemonSets should not place onto nodes that would cause port conflicts func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) { podSpec := api.PodSpec{ diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index 4e1cc0206c0..8587334dacc 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -484,6 +484,10 @@ func (dc *DeploymentController) syncDeployment(key string) error { return err } + if d.DeletionTimestamp != nil { + return dc.syncStatusOnly(d) + } + if d.Spec.Paused { return dc.sync(d) } diff --git a/pkg/controller/deployment/deployment_controller_test.go b/pkg/controller/deployment/deployment_controller_test.go index 9ade955fff7..16d655dd2bb 100644 --- a/pkg/controller/deployment/deployment_controller_test.go +++ b/pkg/controller/deployment/deployment_controller_test.go @@ -252,6 +252,17 @@ func TestSyncDeploymentCreatesReplicaSet(t *testing.T) { f.run(getKey(d, t)) } +func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) { + f := newFixture(t) + + d := newDeployment(1, nil) + now := unversioned.Now() + d.DeletionTimestamp = &now + f.dStore = append(f.dStore, d) + + f.run(getKey(d, t)) +} + // issue: https://github.com/kubernetes/kubernetes/issues/23218 func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) { fake := &fake.Clientset{} diff --git a/pkg/controller/deployment/sync.go b/pkg/controller/deployment/sync.go index f9b071217a5..a9f97b2757f 100644 --- a/pkg/controller/deployment/sync.go +++ b/pkg/controller/deployment/sync.go @@ -35,6 +35,17 @@ import ( rsutil "k8s.io/kubernetes/pkg/util/replicaset" ) +// syncStatusOnly only updates Deployments Status and doesn't take any mutating actions. +func (dc *DeploymentController) syncStatusOnly(deployment *extensions.Deployment) error { + newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, false) + if err != nil { + return err + } + + allRSs := append(oldRSs, newRS) + return dc.syncDeploymentStatus(allRSs, newRS, deployment) +} + // sync is responsible for reconciling deployments on scaling events or when they // are paused. func (dc *DeploymentController) sync(deployment *extensions.Deployment) error { diff --git a/pkg/controller/job/controller.go b/pkg/controller/job/jobcontroller.go similarity index 99% rename from pkg/controller/job/controller.go rename to pkg/controller/job/jobcontroller.go index a310c424f1c..46156d68794 100644 --- a/pkg/controller/job/controller.go +++ b/pkg/controller/job/jobcontroller.go @@ -374,7 +374,7 @@ func (jm *JobController) syncJob(key string) error { job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline")) jm.recorder.Event(&job, api.EventTypeNormal, "DeadlineExceeded", "Job was active longer than specified deadline") } else { - if jobNeedsSync { + if jobNeedsSync && job.DeletionTimestamp == nil { active = jm.manageJob(activePods, succeeded, &job) } completions := succeeded diff --git a/pkg/controller/job/controller_test.go b/pkg/controller/job/jobcontroller_test.go similarity index 97% rename from pkg/controller/job/controller_test.go rename to pkg/controller/job/jobcontroller_test.go index 8a27478227f..bef44dd84b0 100644 --- a/pkg/controller/job/controller_test.go +++ b/pkg/controller/job/jobcontroller_test.go @@ -107,6 +107,7 @@ func TestControllerSyncJob(t *testing.T) { // job setup parallelism int32 completions int32 + deleting bool // pod setup podControllerError error @@ -124,90 +125,95 @@ func TestControllerSyncJob(t *testing.T) { expectedComplete bool }{ "job start": { - 2, 5, + 2, 5, false, nil, 0, 0, 0, 0, 2, 0, 2, 0, 0, false, }, "WQ job start": { - 2, -1, + 2, -1, false, nil, 0, 0, 0, 0, 2, 0, 2, 0, 0, false, }, "pending pods": { - 2, 5, + 2, 5, false, nil, 2, 0, 0, 0, 0, 0, 2, 0, 0, false, }, "correct # of pods": { - 2, 5, + 2, 5, false, nil, 0, 2, 0, 0, 0, 0, 2, 0, 0, false, }, "WQ job: correct # of pods": { - 2, -1, + 2, -1, false, nil, 0, 2, 0, 0, 0, 0, 2, 0, 0, false, }, "too few active pods": { - 2, 5, + 2, 5, false, nil, 0, 1, 1, 0, 1, 0, 2, 1, 0, false, }, "too few active pods with a dynamic job": { - 2, -1, + 2, -1, false, nil, 0, 1, 0, 0, 1, 0, 2, 0, 0, false, }, "too few active pods, with controller error": { - 2, 5, + 2, 5, false, fmt.Errorf("Fake error"), 0, 1, 1, 0, 0, 0, 1, 1, 0, false, }, "too many active pods": { - 2, 5, + 2, 5, false, nil, 0, 3, 0, 0, 0, 1, 2, 0, 0, false, }, "too many active pods, with controller error": { - 2, 5, + 2, 5, false, fmt.Errorf("Fake error"), 0, 3, 0, 0, 0, 0, 3, 0, 0, false, }, "failed pod": { - 2, 5, + 2, 5, false, nil, 0, 1, 1, 1, 1, 0, 2, 1, 1, false, }, "job finish": { - 2, 5, + 2, 5, false, nil, 0, 0, 5, 0, 0, 0, 0, 5, 0, true, }, "WQ job finishing": { - 2, -1, + 2, -1, false, nil, 0, 1, 1, 0, 0, 0, 1, 1, 0, false, }, "WQ job all finished": { - 2, -1, + 2, -1, false, nil, 0, 0, 2, 0, 0, 0, 0, 2, 0, true, }, "WQ job all finished despite one failure": { - 2, -1, + 2, -1, false, nil, 0, 0, 1, 1, 0, 0, 0, 1, 1, true, }, "more active pods than completions": { - 2, 5, + 2, 5, false, nil, 0, 10, 0, 0, 0, 8, 2, 0, 0, false, }, "status change": { - 2, 5, + 2, 5, false, nil, 0, 2, 2, 0, 0, 0, 2, 2, 0, false, }, + "deleting job": { + 2, 5, true, + nil, 1, 1, 1, 0, + 0, 0, 2, 1, 0, false, + }, } for name, tc := range testCases { @@ -225,6 +231,10 @@ func TestControllerSyncJob(t *testing.T) { // job & pods setup job := newJob(tc.parallelism, tc.completions) + if tc.deleting { + now := unversioned.Now() + job.DeletionTimestamp = &now + } manager.jobStore.Store.Add(job) for _, pod := range newPodList(tc.pendingPods, api.PodPending, job) { manager.podStore.Indexer.Add(&pod)