mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #38613 from kargakis/do-not-adopt-when-deleted
Automatic merge from submit-queue
controller: adopt pods only when controller is not deleted
When a replica set is deleted it will continue adopting pods thus driving the worker that handles it in erroring out because the adoption is [always cancelled](59c313730c/pkg/controller/controller_ref_manager.go (L110)
) in the controller reference manager.
```
E1212 14:40:31.245773 7964 replica_set.go:616] cancel the adopt attempt for pod e2e-tests-deployment-2rr3m_test-rollover-deployment-1981456318-73c3m_791e16cb-c070-11e6-a234-68f72840e7df because the controlller is being deleted
E1212 14:40:31.258462 7964 replica_set.go:616] cancel the adopt attempt for pod e2e-tests-deployment-2rr3m_test-rollover-deployment-1981456318-73c3m_791e16cb-c070-11e6-a234-68f72840e7df because the controlller is being deleted
E1212 14:40:31.259131 7964 replica_set.go:616] cancel the adopt attempt for pod e2e-tests-deployment-2rr3m_test-rollover-deployment-1981456318-73c3m_791e16cb-c070-11e6-a234-68f72840e7df because the controlller is being deleted
E1212 14:40:31.259149 7964 replica_set.go:616] cancel the adopt attempt for pod e2e-tests-deployment-2rr3m_test-rollover-deployment-1981456318-wrmt8_791e3d46-c070-11e6-a234-68f72840e7df because the controlller is being deleted
I1212 14:40:31.268012 7964 deployment_controller.go:314] Error syncing deployment e2e-tests-deployment-2rr3m/test-rollover-deployment: Operation cannot be fulfilled on deployments.extensions "test-rollover-deployment": the object has been modified; please apply your changes to the latest version and try again
E1212 14:40:31.277252 7964 replica_set.go:616] cancel the adopt attempt for pod e2e-tests-deployment-2rr3m_test-rollover-deployment-1981456318-73c3m_791e16cb-c070-11e6-a234-68f72840e7df because the controlller is being deleted
E1212 14:40:31.277276 7964 replica_set.go:616] cancel the adopt attempt for pod e2e-tests-deployment-2rr3m_test-rollover-deployment-1981456318-wrmt8_791e3d46-c070-11e6-a234-68f72840e7df because the controlller is being deleted
E1212 14:40:31.277287 7964 replica_set.go:616] cancel the adopt attempt for pod e2e-tests-deployment-2rr3m_test-rollover-deployment-1981456318-bmqpn_81482114-c070-11e6-a234-68f72840e7df because the controlller is being deleted
E1212 14:40:31.289148 7964 replica_set.go:616] cancel the adopt attempt for pod e2e-tests-deployment-2rr3m_test-rollover-deployment-1981456318-b6s4x_82fa8343-c070-11e6-a234-68f72840e7df because the controlller is being deleted
E1212 14:40:31.289169 7964 replica_set.go:616] cancel the adopt attempt for pod e2e-tests-deployment-2rr3m_test-rollover-deployment-1981456318-73c3m_791e16cb-c070-11e6-a234-68f72840e7df because the controlller is being deleted
E1212 14:40:31.289176 7964 replica_set.go:616] cancel the adopt attempt for pod e2e-tests-deployment-2rr3m_test-rollover-deployment-1981456318-wrmt8_791e3d46-c070-11e6-a234-68f72840e7df because the controlller is being deleted
E1212 14:40:31.289181 7964 replica_set.go:616] cancel the adopt attempt for pod e2e-tests-deployment-2rr3m_test-rollover-deployment-1981456318-bmqpn_81482114-c070-11e6-a234-68f72840e7df because the controlller is being deleted
```
@kubernetes/deployment @caesarxuchao
This commit is contained in:
commit
15f9572b8c
@ -578,15 +578,6 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
|
||||
}
|
||||
rs := *obj.(*extensions.ReplicaSet)
|
||||
|
||||
// Check the expectations of the ReplicaSet before counting active pods, otherwise a new pod can sneak
|
||||
// in and update the expectations after we've retrieved active pods from the store. If a new pod enters
|
||||
// the store after we've checked the expectation, the ReplicaSet sync is just deferred till the next
|
||||
// relist.
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err))
|
||||
// Explicitly return nil to avoid re-enqueue bad key
|
||||
return nil
|
||||
}
|
||||
rsNeedsSync := rsc.expectations.SatisfiedExpectations(key)
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
@ -607,16 +598,19 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
|
||||
}
|
||||
cm := controller.NewPodControllerRefManager(rsc.podControl, rs.ObjectMeta, selector, getRSKind())
|
||||
matchesAndControlled, matchesNeedsController, controlledDoesNotMatch := cm.Classify(pods)
|
||||
for _, pod := range matchesNeedsController {
|
||||
err := cm.AdoptPod(pod)
|
||||
// continue to next pod if adoption fails.
|
||||
if err != nil {
|
||||
// If the pod no longer exists, don't even log the error.
|
||||
if !errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(err)
|
||||
// Adopt pods only if this replica set is not going to be deleted.
|
||||
if rs.DeletionTimestamp == nil {
|
||||
for _, pod := range matchesNeedsController {
|
||||
err := cm.AdoptPod(pod)
|
||||
// continue to next pod if adoption fails.
|
||||
if err != nil {
|
||||
// If the pod no longer exists, don't even log the error.
|
||||
if !errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
} else {
|
||||
matchesAndControlled = append(matchesAndControlled, pod)
|
||||
}
|
||||
} else {
|
||||
matchesAndControlled = append(matchesAndControlled, pod)
|
||||
}
|
||||
}
|
||||
filteredPods = matchesAndControlled
|
||||
|
@ -667,16 +667,8 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
|
||||
}
|
||||
rc := *obj.(*v1.ReplicationController)
|
||||
|
||||
// Check the expectations of the rc before counting active pods, otherwise a new pod can sneak in
|
||||
// and update the expectations after we've retrieved active pods from the store. If a new pod enters
|
||||
// the store after we've checked the expectation, the rc sync is just deferred till the next relist.
|
||||
rcKey, err := controller.KeyFunc(&rc)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't get key for replication controller %#v: %v", rc, err)
|
||||
return err
|
||||
}
|
||||
trace.Step("ReplicationController restored")
|
||||
rcNeedsSync := rm.expectations.SatisfiedExpectations(rcKey)
|
||||
rcNeedsSync := rm.expectations.SatisfiedExpectations(key)
|
||||
trace.Step("Expectations restored")
|
||||
|
||||
// NOTE: filteredPods are pointing to objects from cache - if you need to
|
||||
@ -694,16 +686,19 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
|
||||
}
|
||||
cm := controller.NewPodControllerRefManager(rm.podControl, rc.ObjectMeta, labels.Set(rc.Spec.Selector).AsSelectorPreValidated(), getRCKind())
|
||||
matchesAndControlled, matchesNeedsController, controlledDoesNotMatch := cm.Classify(pods)
|
||||
for _, pod := range matchesNeedsController {
|
||||
err := cm.AdoptPod(pod)
|
||||
// continue to next pod if adoption fails.
|
||||
if err != nil {
|
||||
// If the pod no longer exists, don't even log the error.
|
||||
if !errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(err)
|
||||
// Adopt pods only if this replication controller is not going to be deleted.
|
||||
if rc.DeletionTimestamp == nil {
|
||||
for _, pod := range matchesNeedsController {
|
||||
err := cm.AdoptPod(pod)
|
||||
// continue to next pod if adoption fails.
|
||||
if err != nil {
|
||||
// If the pod no longer exists, don't even log the error.
|
||||
if !errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
} else {
|
||||
matchesAndControlled = append(matchesAndControlled, pod)
|
||||
}
|
||||
} else {
|
||||
matchesAndControlled = append(matchesAndControlled, pod)
|
||||
}
|
||||
}
|
||||
filteredPods = matchesAndControlled
|
||||
|
Loading…
Reference in New Issue
Block a user