mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 21:17:23 +00:00
Merge pull request #22516 from kargakis/resolve-fenceposts-together
Auto commit by PR queue bot
This commit is contained in:
commit
ec9c67d65d
@ -38,7 +38,6 @@ import (
|
|||||||
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
|
deploymentutil "k8s.io/kubernetes/pkg/util/deployment"
|
||||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||||
"k8s.io/kubernetes/pkg/util/integer"
|
"k8s.io/kubernetes/pkg/util/integer"
|
||||||
intstrutil "k8s.io/kubernetes/pkg/util/intstr"
|
|
||||||
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
labelsutil "k8s.io/kubernetes/pkg/util/labels"
|
||||||
podutil "k8s.io/kubernetes/pkg/util/pod"
|
podutil "k8s.io/kubernetes/pkg/util/pod"
|
||||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||||
@ -843,7 +842,7 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep
|
|||||||
return false, fmt.Errorf("could not find available pods: %v", err)
|
return false, fmt.Errorf("could not find available pods: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas, false)
|
_, maxUnavailable, err := deploymentutil.ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -940,7 +939,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re
|
|||||||
// scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate".
|
// scaleDownOldReplicaSetsForRollingUpdate scales down old replica sets when deployment strategy is "RollingUpdate".
|
||||||
// Need check maxUnavailable to ensure availability
|
// Need check maxUnavailable to ensure availability
|
||||||
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int, error) {
|
func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs []*extensions.ReplicaSet, oldRSs []*extensions.ReplicaSet, deployment *extensions.Deployment) (int, error) {
|
||||||
maxUnavailable, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas, false)
|
_, maxUnavailable, err := deploymentutil.ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -236,7 +236,8 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
|
|||||||
newReplicas: 0,
|
newReplicas: 0,
|
||||||
readyPodsFromOldRS: 10,
|
readyPodsFromOldRS: 10,
|
||||||
readyPodsFromNewRS: 0,
|
readyPodsFromNewRS: 0,
|
||||||
scaleExpected: false,
|
scaleExpected: true,
|
||||||
|
expectedOldReplicas: 9,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
@ -496,7 +497,8 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
|
|||||||
maxUnavailable: intstr.FromInt(0),
|
maxUnavailable: intstr.FromInt(0),
|
||||||
readyPods: 10,
|
readyPods: 10,
|
||||||
oldReplicas: 10,
|
oldReplicas: 10,
|
||||||
scaleExpected: false,
|
scaleExpected: true,
|
||||||
|
expectedOldReplicas: 9,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
|
"k8s.io/kubernetes/pkg/util/deployment"
|
||||||
"k8s.io/kubernetes/pkg/util/integer"
|
"k8s.io/kubernetes/pkg/util/integer"
|
||||||
"k8s.io/kubernetes/pkg/util/intstr"
|
"k8s.io/kubernetes/pkg/util/intstr"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
@ -191,13 +192,9 @@ func (r *RollingUpdater) Update(config *RollingUpdaterConfig) error {
|
|||||||
}
|
}
|
||||||
oldRc = updated
|
oldRc = updated
|
||||||
}
|
}
|
||||||
// The maximum pods which can go unavailable during the update.
|
// maxSurge is the maximum scaling increment and maxUnavailable are the maximum pods
|
||||||
maxUnavailable, err := intstr.GetValueFromIntOrPercent(&config.MaxUnavailable, desired, false)
|
// that can be unavailable during a rollout.
|
||||||
if err != nil {
|
maxSurge, maxUnavailable, err := deployment.ResolveFenceposts(&config.MaxSurge, &config.MaxUnavailable, desired)
|
||||||
return err
|
|
||||||
}
|
|
||||||
// The maximum scaling increment.
|
|
||||||
maxSurge, err := intstr.GetValueFromIntOrPercent(&config.MaxSurge, desired, true)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -662,11 +662,11 @@ Scaling foo-v2 up to 2
|
|||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "1->1 100%/0 allow maxUnavailability",
|
name: "1->1 1/0 allow maxUnavailability",
|
||||||
oldRc: oldRc(1, 1),
|
oldRc: oldRc(1, 1),
|
||||||
newRc: newRc(0, 1),
|
newRc: newRc(0, 1),
|
||||||
newRcExists: false,
|
newRcExists: false,
|
||||||
maxUnavail: intstr.FromString("100%"),
|
maxUnavail: intstr.FromString("1%"),
|
||||||
maxSurge: intstr.FromInt(0),
|
maxSurge: intstr.FromInt(0),
|
||||||
expected: []interface{}{
|
expected: []interface{}{
|
||||||
down{oldReady: 1, newReady: 0, to: 0},
|
down{oldReady: 1, newReady: 0, to: 0},
|
||||||
@ -693,6 +693,48 @@ Scaling foo-v2 up to 1
|
|||||||
Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 1 to 0 (keep 2 pods available, don't exceed 3 pods)
|
Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 1 to 0 (keep 2 pods available, don't exceed 3 pods)
|
||||||
Scaling foo-v2 up to 2
|
Scaling foo-v2 up to 2
|
||||||
Scaling foo-v1 down to 0
|
Scaling foo-v1 down to 0
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "2->2 25/1 maxSurge trumps maxUnavailable",
|
||||||
|
oldRc: oldRc(2, 2),
|
||||||
|
newRc: newRc(0, 2),
|
||||||
|
newRcExists: false,
|
||||||
|
maxUnavail: intstr.FromString("25%"),
|
||||||
|
maxSurge: intstr.FromString("1%"),
|
||||||
|
expected: []interface{}{
|
||||||
|
up{1},
|
||||||
|
down{oldReady: 2, newReady: 1, to: 1},
|
||||||
|
up{2},
|
||||||
|
down{oldReady: 1, newReady: 2, to: 0},
|
||||||
|
},
|
||||||
|
output: `Created foo-v2
|
||||||
|
Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 2 pods available, don't exceed 3 pods)
|
||||||
|
Scaling foo-v2 up to 1
|
||||||
|
Scaling foo-v1 down to 1
|
||||||
|
Scaling foo-v2 up to 2
|
||||||
|
Scaling foo-v1 down to 0
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "2->2 25/0 maxUnavailable resolves to zero, then one",
|
||||||
|
oldRc: oldRc(2, 2),
|
||||||
|
newRc: newRc(0, 2),
|
||||||
|
newRcExists: false,
|
||||||
|
maxUnavail: intstr.FromString("25%"),
|
||||||
|
maxSurge: intstr.FromString("0%"),
|
||||||
|
expected: []interface{}{
|
||||||
|
down{oldReady: 2, newReady: 0, to: 1},
|
||||||
|
up{1},
|
||||||
|
down{oldReady: 1, newReady: 1, to: 0},
|
||||||
|
up{2},
|
||||||
|
},
|
||||||
|
output: `Created foo-v2
|
||||||
|
Scaling up foo-v2 from 0 to 2, scaling down foo-v1 from 2 to 0 (keep 1 pods available, don't exceed 2 pods)
|
||||||
|
Scaling foo-v1 down to 1
|
||||||
|
Scaling foo-v2 up to 1
|
||||||
|
Scaling foo-v1 down to 0
|
||||||
|
Scaling foo-v2 up to 2
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -466,3 +466,33 @@ func WaitForObservedDeployment(getDeploymentFunc func() (*extensions.Deployment,
|
|||||||
return deployment.Status.ObservedGeneration >= desiredGeneration, nil
|
return deployment.Status.ObservedGeneration >= desiredGeneration, nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one
|
||||||
|
// step. For example:
|
||||||
|
//
|
||||||
|
// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1)
|
||||||
|
// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1)
|
||||||
|
// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
|
||||||
|
// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1)
|
||||||
|
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
|
||||||
|
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
|
||||||
|
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int) (int, int, error) {
|
||||||
|
surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, desired, true)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, desired, false)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if surge == 0 && unavailable == 0 {
|
||||||
|
// Validation should never allow the user to explicitly use zero values for both maxSurge
|
||||||
|
// maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero.
|
||||||
|
// If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the
|
||||||
|
// theory that surge might not work due to quota.
|
||||||
|
unavailable = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return surge, unavailable, nil
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user