mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #44672 from kargakis/update-deployment-completeness
Automatic merge from submit-queue (batch tested with PRs 43575, 44672) Update deployment and daemonset completeness checks maxUnavailable being taken into account for deployment completeness has caused a lot of confusion (https://github.com/kubernetes/kubernetes/issues/44395, https://github.com/kubernetes/kubernetes/issues/44657, https://github.com/kubernetes/kubernetes/issues/40496, others as well I am sure) so I am willing to just stop using it and require all of the new Pods for a Deployment to be available for the Deployment to be considered complete (hence both `rollout status` and ProgressDeadlineSeconds will not be successful in cases where a 1-pod Deployment never becomes successful because its Pod never transitions to ready). @kubernetes/sig-apps-api-reviews thoughts? ```release-note Deployments and DaemonSets are now considered complete once all of the new pods are up and running - affects `kubectl rollout status` (and ProgressDeadlineSeconds for Deployments) ``` Fixes https://github.com/kubernetes/kubernetes/issues/44395
This commit is contained in:
commit
f0ce5bd8d8
@ -418,17 +418,6 @@ func MaxUnavailable(deployment extensions.Deployment) int32 {
|
||||
return maxUnavailable
|
||||
}
|
||||
|
||||
// MaxUnavailableInternal returns the maximum unavailable pods a rolling deployment can take.
|
||||
// TODO: remove the duplicate
|
||||
func MaxUnavailableInternal(deployment internalextensions.Deployment) int32 {
|
||||
if !(deployment.Spec.Strategy.Type == internalextensions.RollingUpdateDeploymentStrategyType) || deployment.Spec.Replicas == 0 {
|
||||
return int32(0)
|
||||
}
|
||||
// Error caught by validation
|
||||
_, maxUnavailable, _ := ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas)
|
||||
return maxUnavailable
|
||||
}
|
||||
|
||||
// MinAvailable returns the minimum available pods of a given deployment
|
||||
func MinAvailable(deployment *extensions.Deployment) int32 {
|
||||
if !IsRollingUpdate(deployment) {
|
||||
@ -839,12 +828,12 @@ func IsRollingUpdate(deployment *extensions.Deployment) bool {
|
||||
return deployment.Spec.Strategy.Type == extensions.RollingUpdateDeploymentStrategyType
|
||||
}
|
||||
|
||||
// DeploymentComplete considers a deployment to be complete once its desired replicas equals its
|
||||
// updatedReplicas, no old pods are running, and it doesn't violate minimum availability.
|
||||
// DeploymentComplete considers a deployment to be complete once all of its desired replicas
|
||||
// are updated and available, and no old pods are running.
|
||||
func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
|
||||
return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) &&
|
||||
newStatus.Replicas == *(deployment.Spec.Replicas) &&
|
||||
newStatus.AvailableReplicas >= *(deployment.Spec.Replicas)-MaxUnavailable(*deployment) &&
|
||||
newStatus.AvailableReplicas == *(deployment.Spec.Replicas) &&
|
||||
newStatus.ObservedGeneration >= deployment.Generation
|
||||
}
|
||||
|
||||
|
@ -960,37 +960,43 @@ func TestDeploymentComplete(t *testing.T) {
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "complete",
|
||||
name: "not complete: min but not all pods become available",
|
||||
|
||||
d: deployment(5, 5, 5, 4, 1, 0),
|
||||
expected: true,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "not complete",
|
||||
name: "not complete: min availability is not honored",
|
||||
|
||||
d: deployment(5, 5, 5, 3, 1, 0),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "complete #2",
|
||||
name: "complete",
|
||||
|
||||
d: deployment(5, 5, 5, 5, 0, 0),
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "not complete #2",
|
||||
name: "not complete: all pods are available but not updated",
|
||||
|
||||
d: deployment(5, 5, 4, 5, 0, 0),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "not complete #3",
|
||||
name: "not complete: still running old pods",
|
||||
|
||||
// old replica set: spec.replicas=1, status.replicas=1, status.availableReplicas=1
|
||||
// new replica set: spec.replicas=1, status.replicas=1, status.availableReplicas=0
|
||||
d: deployment(1, 2, 1, 1, 0, 1),
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "not complete: one replica deployment never comes up",
|
||||
|
||||
d: deployment(1, 1, 1, 0, 1, 1),
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
@ -78,9 +77,8 @@ func (s *DeploymentStatusViewer) Status(namespace, name string, revision int64)
|
||||
if deployment.Status.Replicas > deployment.Status.UpdatedReplicas {
|
||||
return fmt.Sprintf("Waiting for rollout to finish: %d old replicas are pending termination...\n", deployment.Status.Replicas-deployment.Status.UpdatedReplicas), false, nil
|
||||
}
|
||||
minRequired := deployment.Spec.Replicas - util.MaxUnavailableInternal(*deployment)
|
||||
if deployment.Status.AvailableReplicas < minRequired {
|
||||
return fmt.Sprintf("Waiting for rollout to finish: %d of %d updated replicas are available (minimum required: %d)...\n", deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas, minRequired), false, nil
|
||||
if deployment.Status.AvailableReplicas < deployment.Status.UpdatedReplicas {
|
||||
return fmt.Sprintf("Waiting for rollout to finish: %d of %d updated replicas are available...\n", deployment.Status.AvailableReplicas, deployment.Status.UpdatedReplicas), false, nil
|
||||
}
|
||||
return fmt.Sprintf("deployment %q successfully rolled out\n", name), true, nil
|
||||
}
|
||||
@ -102,11 +100,8 @@ func (s *DaemonSetStatusViewer) Status(namespace, name string, revision int64) (
|
||||
if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled {
|
||||
return fmt.Sprintf("Waiting for rollout to finish: %d out of %d new pods have been updated...\n", daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled), false, nil
|
||||
}
|
||||
|
||||
maxUnavailable, _ := intstrutil.GetValueFromIntOrPercent(&daemon.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(daemon.Status.DesiredNumberScheduled), true)
|
||||
minRequired := daemon.Status.DesiredNumberScheduled - int32(maxUnavailable)
|
||||
if daemon.Status.NumberAvailable < minRequired {
|
||||
return fmt.Sprintf("Waiting for rollout to finish: %d of %d updated pods are available (minimum required: %d)...\n", daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled, minRequired), false, nil
|
||||
if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled {
|
||||
return fmt.Sprintf("Waiting for rollout to finish: %d of %d updated pods are available...\n", daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled), false, nil
|
||||
}
|
||||
return fmt.Sprintf("daemon set %q successfully rolled out\n", name), true, nil
|
||||
}
|
||||
|
@ -20,24 +20,17 @@ import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
intstrutil "k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
)
|
||||
|
||||
func intOrStringP(i int) *intstrutil.IntOrString {
|
||||
intstr := intstrutil.FromInt(i)
|
||||
return &intstr
|
||||
}
|
||||
|
||||
func TestDeploymentStatusViewerStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
generation int64
|
||||
specReplicas int32
|
||||
maxUnavailable *intstrutil.IntOrString
|
||||
status extensions.DeploymentStatus
|
||||
msg string
|
||||
done bool
|
||||
generation int64
|
||||
specReplicas int32
|
||||
status extensions.DeploymentStatus
|
||||
msg string
|
||||
done bool
|
||||
}{
|
||||
{
|
||||
generation: 0,
|
||||
@ -68,9 +61,8 @@ func TestDeploymentStatusViewerStatus(t *testing.T) {
|
||||
done: false,
|
||||
},
|
||||
{
|
||||
generation: 1,
|
||||
specReplicas: 2,
|
||||
maxUnavailable: intOrStringP(0),
|
||||
generation: 1,
|
||||
specReplicas: 2,
|
||||
status: extensions.DeploymentStatus{
|
||||
ObservedGeneration: 1,
|
||||
Replicas: 2,
|
||||
@ -79,7 +71,7 @@ func TestDeploymentStatusViewerStatus(t *testing.T) {
|
||||
UnavailableReplicas: 1,
|
||||
},
|
||||
|
||||
msg: "Waiting for rollout to finish: 1 of 2 updated replicas are available (minimum required: 2)...\n",
|
||||
msg: "Waiting for rollout to finish: 1 of 2 updated replicas are available...\n",
|
||||
done: false,
|
||||
},
|
||||
{
|
||||
@ -110,26 +102,9 @@ func TestDeploymentStatusViewerStatus(t *testing.T) {
|
||||
msg: "Waiting for deployment spec update to be observed...\n",
|
||||
done: false,
|
||||
},
|
||||
{
|
||||
generation: 1,
|
||||
specReplicas: 2,
|
||||
maxUnavailable: intOrStringP(1),
|
||||
status: extensions.DeploymentStatus{
|
||||
ObservedGeneration: 1,
|
||||
Replicas: 2,
|
||||
UpdatedReplicas: 2,
|
||||
AvailableReplicas: 1,
|
||||
UnavailableReplicas: 0,
|
||||
},
|
||||
|
||||
msg: "deployment \"foo\" successfully rolled out\n",
|
||||
done: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i := range tests {
|
||||
test := tests[i]
|
||||
t.Logf("testing scenario %d", i)
|
||||
for _, test := range tests {
|
||||
d := &extensions.Deployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "bar",
|
||||
@ -138,27 +113,18 @@ func TestDeploymentStatusViewerStatus(t *testing.T) {
|
||||
Generation: test.generation,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: extensions.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &extensions.RollingUpdateDeployment{
|
||||
MaxSurge: *intOrStringP(1),
|
||||
},
|
||||
},
|
||||
Replicas: test.specReplicas,
|
||||
},
|
||||
Status: test.status,
|
||||
}
|
||||
if test.maxUnavailable != nil {
|
||||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = *test.maxUnavailable
|
||||
}
|
||||
client := fake.NewSimpleClientset(d).Extensions()
|
||||
dsv := &DeploymentStatusViewer{c: client}
|
||||
msg, done, err := dsv.Status("bar", "foo", 0)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
t.Fatalf("DeploymentStatusViewer.Status(): %v", err)
|
||||
}
|
||||
if done != test.done || msg != test.msg {
|
||||
t.Errorf("deployment with generation %d, %d replicas specified, and status:\n%+v\nreturned:\n%q, %t\nwant:\n%q, %t",
|
||||
t.Errorf("DeploymentStatusViewer.Status() for deployment with generation %d, %d replicas specified, and status %+v returned %q, %t, want %q, %t",
|
||||
test.generation,
|
||||
test.specReplicas,
|
||||
test.status,
|
||||
@ -173,11 +139,10 @@ func TestDeploymentStatusViewerStatus(t *testing.T) {
|
||||
|
||||
func TestDaemonSetStatusViewerStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
generation int64
|
||||
maxUnavailable *intstrutil.IntOrString
|
||||
status extensions.DaemonSetStatus
|
||||
msg string
|
||||
done bool
|
||||
generation int64
|
||||
status extensions.DaemonSetStatus
|
||||
msg string
|
||||
done bool
|
||||
}{
|
||||
{
|
||||
generation: 0,
|
||||
@ -192,8 +157,7 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) {
|
||||
done: false,
|
||||
},
|
||||
{
|
||||
generation: 1,
|
||||
maxUnavailable: intOrStringP(0),
|
||||
generation: 1,
|
||||
status: extensions.DaemonSetStatus{
|
||||
ObservedGeneration: 1,
|
||||
UpdatedNumberScheduled: 2,
|
||||
@ -201,7 +165,7 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) {
|
||||
NumberAvailable: 1,
|
||||
},
|
||||
|
||||
msg: "Waiting for rollout to finish: 1 of 2 updated pods are available (minimum required: 2)...\n",
|
||||
msg: "Waiting for rollout to finish: 1 of 2 updated pods are available...\n",
|
||||
done: false,
|
||||
},
|
||||
{
|
||||
@ -228,19 +192,6 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) {
|
||||
msg: "Waiting for daemon set spec update to be observed...\n",
|
||||
done: false,
|
||||
},
|
||||
{
|
||||
generation: 1,
|
||||
maxUnavailable: intOrStringP(1),
|
||||
status: extensions.DaemonSetStatus{
|
||||
ObservedGeneration: 1,
|
||||
UpdatedNumberScheduled: 2,
|
||||
DesiredNumberScheduled: 2,
|
||||
NumberAvailable: 1,
|
||||
},
|
||||
|
||||
msg: "daemon set \"foo\" successfully rolled out\n",
|
||||
done: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i := range tests {
|
||||
@ -255,15 +206,11 @@ func TestDaemonSetStatusViewerStatus(t *testing.T) {
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
UpdateStrategy: extensions.DaemonSetUpdateStrategy{
|
||||
Type: extensions.RollingUpdateDaemonSetStrategyType,
|
||||
RollingUpdate: &extensions.RollingUpdateDaemonSet{},
|
||||
Type: extensions.RollingUpdateDaemonSetStrategyType,
|
||||
},
|
||||
},
|
||||
Status: test.status,
|
||||
}
|
||||
if test.maxUnavailable != nil {
|
||||
d.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable = *test.maxUnavailable
|
||||
}
|
||||
client := fake.NewSimpleClientset(d).Extensions()
|
||||
dsv := &DaemonSetStatusViewer{c: client}
|
||||
msg, done, err := dsv.Status("bar", "foo", 0)
|
||||
|
Loading…
Reference in New Issue
Block a user