diff --git a/pkg/controller/daemon/daemoncontroller.go b/pkg/controller/daemon/daemoncontroller.go index 8d86376593c..a3a0ae761f9 100644 --- a/pkg/controller/daemon/daemoncontroller.go +++ b/pkg/controller/daemon/daemoncontroller.go @@ -578,7 +578,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error { return utilerrors.NewAggregate(errors) } -// syncNodes deletes given pods and creates new daemon set pods on the given node +// syncNodes deletes given pods and creates new daemon set pods on the given nodes // returns slice with erros if any func (dsc *DaemonSetsController) syncNodes(ds *extensions.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string) []error { // We need to set expectations before creating/deleting pods to avoid race conditions. diff --git a/pkg/controller/deployment/util/pod_util.go b/pkg/controller/deployment/util/pod_util.go index 5101e6fa05b..b2d4e8464eb 100644 --- a/pkg/controller/deployment/util/pod_util.go +++ b/pkg/controller/deployment/util/pod_util.go @@ -44,8 +44,6 @@ func GetInternalPodTemplateSpecHash(template api.PodTemplateSpec) uint32 { return podTemplateSpecHasher.Sum32() } -// TODO: move it to a better place. It's also used by DaemonSets -// Maybe to pkg/api/v1/resource_helpers.go func GetPodTemplateSpecHashFnv(template v1.PodTemplateSpec) uint32 { podTemplateSpecHasher := fnv.New32a() hashutil.DeepHashObject(podTemplateSpecHasher, template) diff --git a/pkg/kubectl/rollout_status.go b/pkg/kubectl/rollout_status.go index 91855f7ed2d..67f4f7f84cb 100644 --- a/pkg/kubectl/rollout_status.go +++ b/pkg/kubectl/rollout_status.go @@ -21,6 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + intstrutil "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" @@ -94,12 +95,18 @@ func (s *DaemonSetStatusViewer) Status(namespace, name string, revision int64) ( if err != nil { return "", false, err } + if daemon.Spec.UpdateStrategy.Type != extensions.RollingUpdateDaemonSetStrategyType { + return "", true, fmt.Errorf("Status is available only for RollingUpdate strategy type") + } if daemon.Generation <= daemon.Status.ObservedGeneration { if daemon.Status.UpdatedNumberScheduled < daemon.Status.DesiredNumberScheduled { - return fmt.Sprintf("Waiting for rollout to finish: %d out of %d new pod have been updated...\n", daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled), false, nil + return fmt.Sprintf("Waiting for rollout to finish: %d out of %d new pods have been updated...\n", daemon.Status.UpdatedNumberScheduled, daemon.Status.DesiredNumberScheduled), false, nil } - if daemon.Status.NumberAvailable < daemon.Status.DesiredNumberScheduled { - return fmt.Sprintf("Waiting for rollout to finish: %d of %d updated pods are available...\n", daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled), false, nil + + maxUnavailable, _ := intstrutil.GetValueFromIntOrPercent(&daemon.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable, int(daemon.Status.DesiredNumberScheduled), true) + minRequired := daemon.Status.DesiredNumberScheduled - int32(maxUnavailable) + if daemon.Status.NumberAvailable < minRequired { + return fmt.Sprintf("Waiting for rollout to finish: %d of %d updated pods are available (minimum required: %d)...\n", daemon.Status.NumberAvailable, daemon.Status.DesiredNumberScheduled, minRequired), false, nil } return fmt.Sprintf("daemon set %q successfully rolled out\n", name), true, nil } diff --git a/pkg/kubectl/rollout_status_test.go b/pkg/kubectl/rollout_status_test.go index 2f659f5ea86..acf308156cf 100644 --- a/pkg/kubectl/rollout_status_test.go +++ b/pkg/kubectl/rollout_status_test.go @@ -170,3 +170,138 @@ func TestDeploymentStatusViewerStatus(t *testing.T) { } } } + +func TestDaemonSetStatusViewerStatus(t *testing.T) { + tests := []struct { + generation int64 + maxUnavailable *intstrutil.IntOrString + status extensions.DaemonSetStatus + msg string + done bool + }{ + { + generation: 0, + status: extensions.DaemonSetStatus{ + ObservedGeneration: 1, + UpdatedNumberScheduled: 0, + DesiredNumberScheduled: 1, + NumberAvailable: 0, + }, + + msg: "Waiting for rollout to finish: 0 out of 1 new pods have been updated...\n", + done: false, + }, + { + generation: 1, + maxUnavailable: intOrStringP(0), + status: extensions.DaemonSetStatus{ + ObservedGeneration: 1, + UpdatedNumberScheduled: 2, + DesiredNumberScheduled: 2, + NumberAvailable: 1, + }, + + msg: "Waiting for rollout to finish: 1 of 2 updated pods are available (minimum required: 2)...\n", + done: false, + }, + { + generation: 1, + status: extensions.DaemonSetStatus{ + ObservedGeneration: 1, + UpdatedNumberScheduled: 2, + DesiredNumberScheduled: 2, + NumberAvailable: 2, + }, + + msg: "daemon set \"foo\" successfully rolled out\n", + done: true, + }, + { + generation: 2, + status: extensions.DaemonSetStatus{ + ObservedGeneration: 1, + UpdatedNumberScheduled: 2, + DesiredNumberScheduled: 2, + NumberAvailable: 2, + }, + + msg: "Waiting for daemon set spec update to be observed...\n", + done: false, + }, + { + generation: 1, + maxUnavailable: intOrStringP(1), + status: extensions.DaemonSetStatus{ + ObservedGeneration: 1, + UpdatedNumberScheduled: 2, + DesiredNumberScheduled: 2, + NumberAvailable: 1, + }, + + msg: "daemon set \"foo\" successfully rolled out\n", + done: true, + }, + } + + for i := range tests { + test := tests[i] + t.Logf("testing scenario %d", i) + d := &extensions.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "bar", + Name: "foo", + UID: "8764ae47-9092-11e4-8393-42010af018ff", + Generation: test.generation, + }, + Spec: extensions.DaemonSetSpec{ + UpdateStrategy: extensions.DaemonSetUpdateStrategy{ + Type: extensions.RollingUpdateDaemonSetStrategyType, + RollingUpdate: &extensions.RollingUpdateDaemonSet{}, + }, + }, + Status: test.status, + } + if test.maxUnavailable != nil { + d.Spec.UpdateStrategy.RollingUpdate.MaxUnavailable = *test.maxUnavailable + } + client := fake.NewSimpleClientset(d).Extensions() + dsv := &DaemonSetStatusViewer{c: client} + msg, done, err := dsv.Status("bar", "foo", 0) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if done != test.done || msg != test.msg { + t.Errorf("daemon set with generation %d, %d pods specified, and status:\n%+v\nreturned:\n%q, %t\nwant:\n%q, %t", + test.generation, + d.Status.DesiredNumberScheduled, + test.status, + msg, + done, + test.msg, + test.done, + ) + } + } +} + +func TestDaemonSetStatusViewerStatusWithWrongUpdateStrategyType(t *testing.T) { + d := &extensions.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "bar", + Name: "foo", + UID: "8764ae47-9092-11e4-8393-42010af018ff", + }, + Spec: extensions.DaemonSetSpec{ + UpdateStrategy: extensions.DaemonSetUpdateStrategy{ + Type: extensions.OnDeleteDaemonSetStrategyType, + }, + }, + } + client := fake.NewSimpleClientset(d).Extensions() + dsv := &DaemonSetStatusViewer{c: client} + msg, done, err := dsv.Status("bar", "foo", 0) + errMsg := "Status is available only for RollingUpdate strategy type" + if err == nil || err.Error() != errMsg { + t.Errorf("Status for daemon sets with UpdateStrategy type different than RollingUpdate should return error. Instead got: msg: %s\ndone: %t\n err: %v", msg, done, err) + } +}