Merge pull request #22526 from kargakis/another-rolling-updater-fix

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2016-03-05 03:07:15 -08:00
commit 953e21c8f9
2 changed files with 31 additions and 17 deletions

View File

@ -191,11 +191,6 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
}
oldRc = updated
}
original, err := strconv.Atoi(oldRc.Annotations[originalReplicasAnnotation])
if err != nil {
return fmt.Errorf("Unable to parse annotation for %s: %s=%s\n",
oldRc.Name, originalReplicasAnnotation, oldRc.Annotations[originalReplicasAnnotation])
}
// The maximum pods which can go unavailable during the update.
maxUnavailable, err := intstr.GetValueFromIntOrPercent(&config.MaxUnavailable, desired, false)
if err != nil {
@ -217,12 +212,12 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
// the effective scale of the old RC regardless of the configuration
// (equivalent to 100% maxUnavailable).
if desired == 0 {
maxUnavailable = original
maxUnavailable = oldRc.Spec.Replicas
minAvailable = 0
}
fmt.Fprintf(out, "Scaling up %s from %d to %d, scaling down %s from %d to 0 (keep %d pods available, don't exceed %d pods)\n",
newRc.Name, newRc.Spec.Replicas, desired, oldRc.Name, oldRc.Spec.Replicas, minAvailable, original+maxSurge)
newRc.Name, newRc.Spec.Replicas, desired, oldRc.Name, oldRc.Spec.Replicas, minAvailable, desired+maxSurge)
// Scale newRc and oldRc until newRc has the desired number of replicas and
// oldRc has 0 replicas.
@ -233,7 +228,7 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
oldReplicas := oldRc.Spec.Replicas
// Scale up as much as possible.
scaledRc, err := r.scaleUp(newRc, oldRc, original, desired, maxSurge, maxUnavailable, scaleRetryParams, config)
scaledRc, err := r.scaleUp(newRc, oldRc, desired, maxSurge, maxUnavailable, scaleRetryParams, config)
if err != nil {
return err
}
@ -266,14 +261,14 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
// scaleUp scales up newRc to desired by whatever increment is possible given
// the configured surge threshold. scaleUp will safely no-op as necessary when
// it detects redundancy or other relevant conditions.
func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, original, desired, maxSurge, maxUnavailable int, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*api.ReplicationController, error) {
func (r *RollingUpdater) scaleUp(newRc, oldRc *api.ReplicationController, desired, maxSurge, maxUnavailable int, scaleRetryParams *RetryParams, config *RollingUpdaterConfig) (*api.ReplicationController, error) {
// If we're already at the desired, do nothing.
if newRc.Spec.Replicas == desired {
return newRc, nil
}
// Scale up as far as we can based on the surge limit.
increment := (original + maxSurge) - (oldRc.Spec.Replicas + newRc.Spec.Replicas)
increment := (desired + maxSurge) - (oldRc.Spec.Replicas + newRc.Spec.Replicas)
// If the old is already scaled down, go ahead and scale all the way up.
if oldRc.Spec.Replicas == 0 {
increment = desired - newRc.Spec.Replicas

View File

@ -541,7 +541,7 @@ Scaling foo-v1 down to 0
down{oldReady: 10, newReady: 20, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 20, scaling down foo-v1 from 10 to 0 (keep 20 pods available, don't exceed 70 pods)
Scaling up foo-v2 from 0 to 20, scaling down foo-v1 from 10 to 0 (keep 20 pods available, don't exceed 80 pods)
Scaling foo-v2 up to 20
Scaling foo-v1 down to 0
`,
@ -572,7 +572,7 @@ Scaling foo-v1 down to 0
down{oldReady: 3, newReady: 0, to: 0},
},
output: `Continuing update with existing controller foo-v2.
Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 4 pods)
Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 1 pods)
Scaling foo-v1 down to 0
`,
},
@ -587,7 +587,7 @@ Scaling foo-v1 down to 0
down{oldReady: 3, newReady: 0, to: 0},
},
output: `Continuing update with existing controller foo-v2.
Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 3 pods)
Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 0 pods)
Scaling foo-v1 down to 0
`,
},
@ -602,7 +602,7 @@ Scaling foo-v1 down to 0
down{oldReady: 3, newReady: 0, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 3 pods)
Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 3 to 0 (keep 0 pods available, don't exceed 0 pods)
Scaling foo-v1 down to 0
`,
},
@ -628,14 +628,16 @@ Scaling up foo-v2 from 0 to 0, scaling down foo-v1 from 0 to 0 (keep 0 pods avai
maxSurge: intstr.FromInt(0),
expected: []interface{}{
down{oldReady: 30, newReady: 0, to: 1},
up{2},
up{1},
down{oldReady: 1, newReady: 2, to: 0},
up{2},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 30 to 0 (keep 1 pods available, don't exceed 30 pods)
Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 30 to 0 (keep 1 pods available, don't exceed 2 pods)
Scaling foo-v1 down to 1
Scaling foo-v2 up to 2
Scaling foo-v2 up to 1
Scaling foo-v1 down to 0
Scaling foo-v2 up to 2
`,
},
{
@ -674,6 +676,23 @@ Scaling foo-v2 up to 2
Scaling up foo-v2 from 0 to 1, scaling down foo-v1 from 1 to 0 (keep 0 pods available, don't exceed 1 pods)
Scaling foo-v1 down to 0
Scaling foo-v2 up to 1
`,
},
{
name: "1->2 25/25 complex asymetric deployment",
oldRc: oldRc(1, 1),
newRc: newRc(0, 2),
newRcExists: false,
maxUnavail: intstr.FromString("25%"),
maxSurge: intstr.FromString("25%"),
expected: []interface{}{
up{2},
down{oldReady: 1, newReady: 2, to: 0},
},
output: `Created foo-v2
Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 1 to 0 (keep 2 pods available, don't exceed 3 pods)
Scaling foo-v2 up to 2
Scaling foo-v1 down to 0
`,
},
}