mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-10 04:27:54 +00:00
Fix rolling-update rollback from an unavailable rc
Rolling back from a broken update with only one replica fails with a timeout in the existing code. The problem is the scale down logic does not consider unavailable replicas in the old replication controller when calculating how much to scale down by. This leads to an obvious problem with a single replica when min unavailable is 1. The fix is to allow scaling down all unavailable replicas in the old controller, while still maintaining the min unavailable invariant.
This commit is contained in:
parent
8535f13a2b
commit
e5a558c35f
@ -300,14 +300,14 @@ func (r *RollingUpdater) scaleDown(newRc, oldRc *api.ReplicationController, desi
|
||||
return oldRc, nil
|
||||
}
|
||||
// Block until there are any pods ready.
|
||||
oldAvailable, newAvailable, err := r.waitForReadyPods(config.Interval, config.Timeout, oldRc, newRc)
|
||||
_, newAvailable, err := r.waitForReadyPods(config.Interval, config.Timeout, oldRc, newRc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The old controller is considered as part of the total because we want to
|
||||
// maintain minimum availability even with a volatile old controller.
|
||||
// Scale down as much as possible while maintaining minimum availability.
|
||||
decrement := (oldAvailable + newAvailable) - minAvailable
|
||||
// Scale down as much as possible while maintaining minimum availability
|
||||
decrement := oldRc.Spec.Replicas + newAvailable - minAvailable
|
||||
// The decrement normally shouldn't drop below 0 because the available count
|
||||
// always starts below the old replica count, but the old replica count can
|
||||
// decrement due to externalities like pods death in the replica set. This
|
||||
|
@ -308,7 +308,7 @@ Scaling foo-v1 down to 0
|
||||
up{8},
|
||||
down{oldReady: 4, newReady: 8, to: 1},
|
||||
up{10},
|
||||
down{oldReady: 10, newReady: 1, to: 0},
|
||||
down{oldReady: 1, newReady: 10, to: 0},
|
||||
},
|
||||
output: `Created foo-v2
|
||||
Scaling up foo-v2 from 0 to 10, scaling down foo-v1 from 10 to 0 (keep 9 pods available, don't exceed 12 pods)
|
||||
@ -516,7 +516,7 @@ Scaling foo-v1 down to 0
|
||||
up{8},
|
||||
down{oldReady: 4, newReady: 8, to: 2},
|
||||
up{10},
|
||||
down{oldReady: 10, newReady: 2, to: 0},
|
||||
down{oldReady: 1, newReady: 10, to: 0},
|
||||
},
|
||||
output: `Created foo-v2
|
||||
Scaling up foo-v2 from 5 to 10, scaling down foo-v1 from 6 to 0 (keep 10 pods available, don't exceed 12 pods)
|
||||
@ -542,6 +542,21 @@ Scaling foo-v1 down to 0
|
||||
Scaling up foo-v2 from 0 to 20, scaling down foo-v1 from 10 to 0 (keep 10 pods available, don't exceed 70 pods)
|
||||
Scaling foo-v2 up to 20
|
||||
Scaling foo-v1 down to 0
|
||||
`,
|
||||
}, {
|
||||
name: "1->1 0/1 scale down unavailable rc to a ready rc (rollback)",
|
||||
oldRc: oldRc(1, 1),
|
||||
newRc: newRc(1, 1),
|
||||
newRcExists: true,
|
||||
maxUnavail: intstr.FromInt(0),
|
||||
maxSurge: intstr.FromInt(1),
|
||||
expected: []interface{}{
|
||||
up{1},
|
||||
down{oldReady: 0, newReady: 1, to: 0},
|
||||
},
|
||||
output: `Continuing update with existing controller foo-v2.
|
||||
Scaling up foo-v2 from 1 to 1, scaling down foo-v1 from 1 to 0 (keep 1 pods available, don't exceed 2 pods)
|
||||
Scaling foo-v1 down to 0
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user