mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Deployment controller's cleanupUnhealthyReplicas should respect minReadySeconds
This commit is contained in:
parent
a3f09700e5
commit
726ba45b59
@ -1047,7 +1047,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep
|
|||||||
|
|
||||||
// Clean up unhealthy replicas first, otherwise unhealthy replicas will block deployment
|
// Clean up unhealthy replicas first, otherwise unhealthy replicas will block deployment
|
||||||
// and cause timeout. See https://github.com/kubernetes/kubernetes/issues/16737
|
// and cause timeout. See https://github.com/kubernetes/kubernetes/issues/16737
|
||||||
oldRSs, cleanupCount, err := dc.cleanupUnhealthyReplicas(oldRSs, deployment, maxScaledDown)
|
oldRSs, cleanupCount, err := dc.cleanupUnhealthyReplicas(oldRSs, deployment, deployment.Spec.MinReadySeconds, maxScaledDown)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
@ -1066,7 +1066,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep
|
|||||||
}
|
}
|
||||||
|
|
||||||
// cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted.
|
// cleanupUnhealthyReplicas will scale down old replica sets with unhealthy replicas, so that all unhealthy replicas will be deleted.
|
||||||
func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, maxCleanupCount int32) ([]*extensions.ReplicaSet, int32, error) {
|
func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment, minReadySeconds, maxCleanupCount int32) ([]*extensions.ReplicaSet, int32, error) {
|
||||||
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
|
sort.Sort(controller.ReplicaSetsByCreationTimestamp(oldRSs))
|
||||||
// Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order
|
// Safely scale down all old replica sets with unhealthy replicas. Replica set will sort the pods in the order
|
||||||
// such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will
|
// such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will
|
||||||
@ -1081,16 +1081,16 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// TODO: use dc.getAvailablePodsForReplicaSets instead
|
// TODO: use dc.getAvailablePodsForReplicaSets instead
|
||||||
readyPodCount, err := deploymentutil.GetAvailablePodsForReplicaSets(dc.client, deployment, []*extensions.ReplicaSet{targetRS}, 0)
|
availablePodCount, err := deploymentutil.GetAvailablePodsForReplicaSets(dc.client, deployment, []*extensions.ReplicaSet{targetRS}, minReadySeconds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, totalScaledDown, fmt.Errorf("could not find available pods: %v", err)
|
return nil, totalScaledDown, fmt.Errorf("could not find available pods: %v", err)
|
||||||
}
|
}
|
||||||
if targetRS.Spec.Replicas == readyPodCount {
|
if targetRS.Spec.Replicas == availablePodCount {
|
||||||
// no unhealthy replicas found, no scaling required.
|
// no unhealthy replicas found, no scaling required.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(targetRS.Spec.Replicas-readyPodCount)))
|
scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(targetRS.Spec.Replicas-availablePodCount)))
|
||||||
newReplicasCount := targetRS.Spec.Replicas - scaledDownCount
|
newReplicasCount := targetRS.Spec.Replicas - scaledDownCount
|
||||||
if newReplicasCount > targetRS.Spec.Replicas {
|
if newReplicasCount > targetRS.Spec.Replicas {
|
||||||
return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount)
|
return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount)
|
||||||
|
@ -476,7 +476,7 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
|
|||||||
client: &fakeClientset,
|
client: &fakeClientset,
|
||||||
eventRecorder: &record.FakeRecorder{},
|
eventRecorder: &record.FakeRecorder{},
|
||||||
}
|
}
|
||||||
_, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, &deployment, int32(test.maxCleanupCount))
|
_, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldRSs, &deployment, 0, int32(test.maxCleanupCount))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
continue
|
continue
|
||||||
|
Loading…
Reference in New Issue
Block a user