From 77097d9a20d9aaa680508250d2bb66e4ce2599e0 Mon Sep 17 00:00:00 2001 From: Michail Kargakis Date: Fri, 4 Mar 2016 11:29:55 +0100 Subject: [PATCH 1/2] kubectl: set maxUnavailable to 1 if both fenceposts resolve to zero Due to rounding down for maxUnavailable, we may end up with rolling updates that have zero surge and unavailable pods something that 1) is not allowed as per validation, 2) blocks updates. If we end up in such a situation set maxUnavailable to 1 on the theory that surge might not work due to quota. --- pkg/kubectl/rolling_updater.go | 11 +++---- pkg/kubectl/rolling_updater_test.go | 46 +++++++++++++++++++++++++++-- pkg/util/deployment/deployment.go | 30 +++++++++++++++++++ 3 files changed, 78 insertions(+), 9 deletions(-) diff --git a/pkg/kubectl/rolling_updater.go b/pkg/kubectl/rolling_updater.go index 4b8299afe4b..e69d889a73f 100644 --- a/pkg/kubectl/rolling_updater.go +++ b/pkg/kubectl/rolling_updater.go @@ -29,6 +29,7 @@ import ( client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/util/deployment" "k8s.io/kubernetes/pkg/util/integer" "k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/wait" @@ -191,13 +192,9 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error { } oldRc = updated } - // The maximum pods which can go unavailable during the update. - maxUnavailable, err := intstr.GetValueFromIntOrPercent(&config.MaxUnavailable, desired, false) - if err != nil { - return err - } - // The maximum scaling increment. - maxSurge, err := intstr.GetValueFromIntOrPercent(&config.MaxSurge, desired, true) + // maxSurge is the maximum scaling increment and maxUnavailable are the maximum pods + // that can be unavailable during a rollout. + maxSurge, maxUnavailable, err := deployment.ResolveFenceposts(&config.MaxSurge, &config.MaxUnavailable, desired) if err != nil { return err } diff --git a/pkg/kubectl/rolling_updater_test.go b/pkg/kubectl/rolling_updater_test.go index 8c88de0074a..727e66abcad 100644 --- a/pkg/kubectl/rolling_updater_test.go +++ b/pkg/kubectl/rolling_updater_test.go @@ -662,11 +662,11 @@ Scaling foo-v2 up to 2 `, }, { - name: "1->1 100%/0 allow maxUnavailability", + name: "1->1 1/0 allow maxUnavailability", oldRc: oldRc(1, 1), newRc: newRc(0, 1), newRcExists: false, - maxUnavail: intstr.FromString("100%"), + maxUnavail: intstr.FromString("1%"), maxSurge: intstr.FromInt(0), expected: []interface{}{ down{oldReady: 1, newReady: 0, to: 0}, @@ -693,6 +693,48 @@ Scaling foo-v2 up to 1 Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 1 to 0 (keep 2 pods available, don't exceed 3 pods) Scaling foo-v2 up to 2 Scaling foo-v1 down to 0 +`, + }, + { + name: "2->2 25/1 maxSurge trumps maxUnavailable", + oldRc: oldRc(2, 2), + newRc: newRc(0, 2), + newRcExists: false, + maxUnavail: intstr.FromString("25%"), + maxSurge: intstr.FromString("1%"), + expected: []interface{}{ + up{1}, + down{oldReady: 2, newReady: 1, to: 1}, + up{2}, + down{oldReady: 1, newReady: 2, to: 0}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 2 pods available, don't exceed 3 pods) +Scaling foo-v2 up to 1 +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 2 +Scaling foo-v1 down to 0 +`, + }, + { + name: "2->2 25/0 maxUnavailable resolves to zero, then one", + oldRc: oldRc(2, 2), + newRc: newRc(0, 2), + newRcExists: false, + maxUnavail: intstr.FromString("25%"), + maxSurge: intstr.FromString("0%"), + expected: []interface{}{ + down{oldReady: 2, newReady: 0, to: 1}, + up{1}, + down{oldReady: 1, newReady: 1, to: 0}, + up{2}, + }, + output: `Created foo-v2 +Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 1 pods available, don't exceed 2 pods) +Scaling foo-v1 down to 1 +Scaling foo-v2 up to 1 +Scaling foo-v1 down to 0 +Scaling foo-v2 up to 2 `, }, } diff --git a/pkg/util/deployment/deployment.go b/pkg/util/deployment/deployment.go index 6ad5e6bf6d9..869936109cf 100644 --- a/pkg/util/deployment/deployment.go +++ b/pkg/util/deployment/deployment.go @@ -466,3 +466,33 @@ func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment, return deployment.Status.ObservedGeneration >= desiredGeneration, nil }) } + +// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one +// step. For example: +// +// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1) +// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1) +// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) +// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) +func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int) (int, int, error) { + surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, desired, true) + if err != nil { + return 0, 0, err + } + unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, desired, false) + if err != nil { + return 0, 0, err + } + + if surge == 0 && unavailable == 0 { + // Validation should never allow the user to explicitly use zero values for both maxSurge + // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. + // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the + // theory that surge might not work due to quota. + unavailable = 1 + } + + return surge, unavailable, nil +} From 13889199762e28b39c67b08bebdad191d39c91b5 Mon Sep 17 00:00:00 2001 From: Michail Kargakis Date: Mon, 7 Mar 2016 11:18:58 +0100 Subject: [PATCH 2/2] controller: resolve unavailable in conjuction with surge for deployments Due to rounding down for maxUnavailable, we may end up with deployments that have zero surge and unavailable pods something that 1) is not allowed as per validation, 2) blocks deployments. If we end up in such a situation set maxUnavailable to 1 on the theory that surge might not work due to quota. --- .../deployment/deployment_controller.go | 5 ++-- .../deployment/deployment_controller_test.go | 26 ++++++++++--------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index ce39936056d..108c254249b 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -38,7 +38,6 @@ import ( deploymentutil "k8s.io/kubernetes/pkg/util/deployment" utilerrors "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/integer" - intstrutil "k8s.io/kubernetes/pkg/util/intstr" labelsutil "k8s.io/kubernetes/pkg/util/labels" podutil "k8s.io/kubernetes/pkg/util/pod" utilruntime "k8s.io/kubernetes/pkg/util/runtime" @@ -843,7 +842,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep return false, fmt.Errorf("could not find available pods: %v", err) } - maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas, false) + _, maxUnavailable, err := deploymentutil.ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas) if err != nil { return false, err } @@ -940,7 +939,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re // scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate". // Need check maxUnavailable to ensure availability func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int, error) { - maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas, false) + _, maxUnavailable, err := deploymentutil.ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas) if err != nil { return 0, err } diff --git a/pkg/controller/deployment/deployment_controller_test.go b/pkg/controller/deployment/deployment_controller_test.go index 2c010f0875f..6a8d7115298 100644 --- a/pkg/controller/deployment/deployment_controller_test.go +++ b/pkg/controller/deployment/deployment_controller_test.go @@ -230,13 +230,14 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) { expectedOldReplicas int }{ { - deploymentReplicas: 10, - maxUnavailable: intstr.FromInt(0), - oldReplicas: 10, - newReplicas: 0, - readyPodsFromOldRS: 10, - readyPodsFromNewRS: 0, - scaleExpected: false, + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(0), + oldReplicas: 10, + newReplicas: 0, + readyPodsFromOldRS: 10, + readyPodsFromNewRS: 0, + scaleExpected: true, + expectedOldReplicas: 9, }, { deploymentReplicas: 10, @@ -492,11 +493,12 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing expectedOldReplicas int }{ { - deploymentReplicas: 10, - maxUnavailable: intstr.FromInt(0), - readyPods: 10, - oldReplicas: 10, - scaleExpected: false, + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(0), + readyPods: 10, + oldReplicas: 10, + scaleExpected: true, + expectedOldReplicas: 9, }, { deploymentReplicas: 10,