Merge pull request #27438 from gmarek/controllerDeletion

Automatic merge from submit-queue

Controllers doesn't take any actions when being deleted.

I started doing it for other controllers but it's not always clear to me how it should work. I'll be adding other ones as separate commits to this PR.

cc @caesarxuchao @lavalamp
This commit is contained in:
k8s-merge-robot 2016-07-12 12:37:09 -07:00 committed by GitHub
commit d90cc907c4
7 changed files with 73 additions and 19 deletions

View File

@ -661,7 +661,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
return err
}
dsNeedsSync := dsc.expectations.SatisfiedExpectations(dsKey)
if dsNeedsSync {
if dsNeedsSync && ds.DeletionTimestamp == nil {
dsc.manage(ds)
}

View File

@ -289,6 +289,24 @@ func TestSufficentCapacityNodeDaemonLaunchesPod(t *testing.T) {
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0)
}
// DaemonSets not take any actions when being deleted
func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
manager, podControl := newTestController()
node := newNode("not-too-much-mem", nil)
node.Status.Allocatable = allocatableResources("200M", "200m")
manager.nodeStore.Add(node)
manager.podStore.Add(&api.Pod{
Spec: podSpec,
})
ds := newDaemonSet("foo")
ds.Spec.Template.Spec = podSpec
now := unversioned.Now()
ds.DeletionTimestamp = &now
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
}
// DaemonSets should not place onto nodes that would cause port conflicts
func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
podSpec := api.PodSpec{

View File

@ -484,6 +484,10 @@ func (dc *DeploymentController) syncDeployment(key string) error {
return err
}
if d.DeletionTimestamp != nil {
return dc.syncStatusOnly(d)
}
if d.Spec.Paused {
return dc.sync(d)
}

View File

@ -252,6 +252,17 @@ func TestSyncDeploymentCreatesReplicaSet(t *testing.T) {
f.run(getKey(d, t))
}
func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
f := newFixture(t)
d := newDeployment(1, nil)
now := unversioned.Now()
d.DeletionTimestamp = &now
f.dStore = append(f.dStore, d)
f.run(getKey(d, t))
}
// issue: https://github.com/kubernetes/kubernetes/issues/23218
func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
fake := &fake.Clientset{}

View File

@ -35,6 +35,17 @@ import (
rsutil "k8s.io/kubernetes/pkg/util/replicaset"
)
// syncStatusOnly only updates Deployments Status and doesn't take any mutating actions.
func (dc *DeploymentController) syncStatusOnly(deployment *extensions.Deployment) error {
newRS, oldRSs, err := dc.getAllReplicaSetsAndSyncRevision(deployment, false)
if err != nil {
return err
}
allRSs := append(oldRSs, newRS)
return dc.syncDeploymentStatus(allRSs, newRS, deployment)
}
// sync is responsible for reconciling deployments on scaling events or when they
// are paused.
func (dc *DeploymentController) sync(deployment *extensions.Deployment) error {

View File

@ -374,7 +374,7 @@ func (jm *JobController) syncJob(key string) error {
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
jm.recorder.Event(&job, api.EventTypeNormal, "DeadlineExceeded", "Job was active longer than specified deadline")
} else {
if jobNeedsSync {
if jobNeedsSync && job.DeletionTimestamp == nil {
active = jm.manageJob(activePods, succeeded, &job)
}
completions := succeeded

View File

@ -107,6 +107,7 @@ func TestControllerSyncJob(t *testing.T) {
// job setup
parallelism int32
completions int32
deleting bool
// pod setup
podControllerError error
@ -124,90 +125,95 @@ func TestControllerSyncJob(t *testing.T) {
expectedComplete bool
}{
"job start": {
2, 5,
2, 5, false,
nil, 0, 0, 0, 0,
2, 0, 2, 0, 0, false,
},
"WQ job start": {
2, -1,
2, -1, false,
nil, 0, 0, 0, 0,
2, 0, 2, 0, 0, false,
},
"pending pods": {
2, 5,
2, 5, false,
nil, 2, 0, 0, 0,
0, 0, 2, 0, 0, false,
},
"correct # of pods": {
2, 5,
2, 5, false,
nil, 0, 2, 0, 0,
0, 0, 2, 0, 0, false,
},
"WQ job: correct # of pods": {
2, -1,
2, -1, false,
nil, 0, 2, 0, 0,
0, 0, 2, 0, 0, false,
},
"too few active pods": {
2, 5,
2, 5, false,
nil, 0, 1, 1, 0,
1, 0, 2, 1, 0, false,
},
"too few active pods with a dynamic job": {
2, -1,
2, -1, false,
nil, 0, 1, 0, 0,
1, 0, 2, 0, 0, false,
},
"too few active pods, with controller error": {
2, 5,
2, 5, false,
fmt.Errorf("Fake error"), 0, 1, 1, 0,
0, 0, 1, 1, 0, false,
},
"too many active pods": {
2, 5,
2, 5, false,
nil, 0, 3, 0, 0,
0, 1, 2, 0, 0, false,
},
"too many active pods, with controller error": {
2, 5,
2, 5, false,
fmt.Errorf("Fake error"), 0, 3, 0, 0,
0, 0, 3, 0, 0, false,
},
"failed pod": {
2, 5,
2, 5, false,
nil, 0, 1, 1, 1,
1, 0, 2, 1, 1, false,
},
"job finish": {
2, 5,
2, 5, false,
nil, 0, 0, 5, 0,
0, 0, 0, 5, 0, true,
},
"WQ job finishing": {
2, -1,
2, -1, false,
nil, 0, 1, 1, 0,
0, 0, 1, 1, 0, false,
},
"WQ job all finished": {
2, -1,
2, -1, false,
nil, 0, 0, 2, 0,
0, 0, 0, 2, 0, true,
},
"WQ job all finished despite one failure": {
2, -1,
2, -1, false,
nil, 0, 0, 1, 1,
0, 0, 0, 1, 1, true,
},
"more active pods than completions": {
2, 5,
2, 5, false,
nil, 0, 10, 0, 0,
0, 8, 2, 0, 0, false,
},
"status change": {
2, 5,
2, 5, false,
nil, 0, 2, 2, 0,
0, 0, 2, 2, 0, false,
},
"deleting job": {
2, 5, true,
nil, 1, 1, 1, 0,
0, 0, 2, 1, 0, false,
},
}
for name, tc := range testCases {
@ -225,6 +231,10 @@ func TestControllerSyncJob(t *testing.T) {
// job & pods setup
job := newJob(tc.parallelism, tc.completions)
if tc.deleting {
now := unversioned.Now()
job.DeletionTimestamp = &now
}
manager.jobStore.Store.Add(job)
for _, pod := range newPodList(tc.pendingPods, api.PodPending, job) {
manager.podStore.Indexer.Add(&pod)