diff --git a/pkg/kubectl/rolling_updater.go b/pkg/kubectl/rolling_updater.go index 4b8299afe4b..e69d889a73f 100644 --- a/pkg/kubectl/rolling_updater.go +++ b/pkg/kubectl/rolling_updater.go @@ -29,6 +29,7 @@ import ( client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/deployment" "k8s.io/kubernetes/pkg/util/integer" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/wait" @@ -191,13 +192,9 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { } oldRc = updated } - // The maximum pods which can go unavailable during the update. - maxUnavailable, err := intstr.GetValueFromIntOrPercent(&config.MaxUnavailable, desired, false) - if err != nil { - return err - } - // The maximum scaling increment. - maxSurge, err := intstr.GetValueFromIntOrPercent(&config.MaxSurge, desired, true) + // maxSurge is the maximum scaling increment and maxUnavailable are the maximum pods + // that can be unavailable during a rollout. + maxSurge, maxUnavailable, err := deployment.ResolveFenceposts(&config.MaxSurge, &config.MaxUnavailable, desired) if err != nil { return err } diff --git a/pkg/kubectl/rolling_updater_test.go b/pkg/kubectl/rolling_updater_test.go index 8c88de0074a..727e66abcad 100644 --- a/pkg/kubectl/rolling_updater_test.go +++ b/pkg/kubectl/rolling_updater_test.go @@ -662,11 +662,11 @@ Scaling foo-v2 up to 2 `, }, { - name: "1->1 100%/0 allow maxUnavailability", + name: "1->1 1/0 allow maxUnavailability", oldRc: oldRc(1, 1), newRc: newRc(0, 1), newRcExists: false, - maxUnavail: intstr.FromString("100%"), + maxUnavail: intstr.FromString("1%"), maxSurge: intstr.FromInt(0), expected: []interface{}{ down{oldReady: 1, newReady: 0, to: 0}, @@ -693,6 +693,48 @@ Scaling foo-v2 up to 1 Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 1 to 0 (keep 2 pods available, don't exceed 3 pods) Scaling foo-v2 up to 2 Scaling foo-v1 down to 0 +`, + }, + { + name: "2->2 25/1 maxSurge trumps maxUnavailable", + oldRc: oldRc(2, 2), + newRc: newRc(0, 2), + newRcExists: false, + maxUnavail: intstr.FromString("25%"), + maxSurge: intstr.FromString("1%"), + expected: []interface{}{ + up{1}, + down{oldReady: 2, newReady: 1, to: 1}, + up{2}, + down{oldReady: 1, newReady: 2, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 2 pods available, don't exceed 3 pods) +Scaling foo-v2 up to 1 +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 2 +Scaling foo-v1 down to 0 +`, + }, + { + name: "2->2 25/0 maxUnavailable resolves to zero, then one", + oldRc: oldRc(2, 2), + newRc: newRc(0, 2), + newRcExists: false, + maxUnavail: intstr.FromString("25%"), + maxSurge: intstr.FromString("0%"), + expected: []interface{}{ + down{oldReady: 2, newReady: 0, to: 1}, + up{1}, + down{oldReady: 1, newReady: 1, to: 0}, + up{2}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 1 pods available, don't exceed 2 pods) +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 1 +Scaling foo-v1 down to 0 +Scaling foo-v2 up to 2 `, }, } diff --git a/pkg/util/deployment/deployment.go b/pkg/util/deployment/deployment.go index 6ad5e6bf6d9..869936109cf 100644 --- a/pkg/util/deployment/deployment.go +++ b/pkg/util/deployment/deployment.go @@ -466,3 +466,33 @@ func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment, return deployment.Status.ObservedGeneration >= desiredGeneration, nil }) } + +// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one +// step. For example: +// +// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1) +// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1) +// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) +// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) +func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int) (int, int, error) { + surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, desired, true) + if err != nil { + return 0, 0, err + } + unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, desired, false) + if err != nil { + return 0, 0, err + } + + if surge == 0 && unavailable == 0 { + // Validation should never allow the user to explicitly use zero values for both maxSurge + // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. + // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the + // theory that surge might not work due to quota. + unavailable = 1 + } + + return surge, unavailable, nil +}