From c8ea0c27e96e60dafa171b58df85b91ae65a493f Mon Sep 17 00:00:00 2001 From: Prashanth Balasubramanian Date: Wed, 29 Apr 2015 15:06:42 -0700 Subject: [PATCH] Fix rollingupdate --image --- docs/kubectl.md | 2 +- hack/test-cmd.sh | 14 ++++++++++- pkg/kubectl/cmd/rollingupdate.go | 34 +++++++++++++++++---------- pkg/kubectl/cmd/rollingupdate_test.go | 2 +- 4 files changed, 37 insertions(+), 15 deletions(-) diff --git a/docs/kubectl.md b/docs/kubectl.md index 5edf57e182e..0c0c2f56a85 100644 --- a/docs/kubectl.md +++ b/docs/kubectl.md @@ -66,4 +66,4 @@ kubectl * [kubectl update](kubectl_update.md) - Update a resource by filename or stdin. * [kubectl version](kubectl_version.md) - Print the client and server version information. -###### Auto generated by spf13/cobra at 2015-04-30 06:01:21.516683017 +0000 UTC +###### Auto generated by spf13/cobra at 2015-04-30 16:43:53.288271038 +0000 UTC diff --git a/hack/test-cmd.sh b/hack/test-cmd.sh index 2d425e044f1..a56e8527a01 100755 --- a/hack/test-cmd.sh +++ b/hack/test-cmd.sh @@ -139,12 +139,16 @@ for version in "${kube_api_versions[@]}"; do labels_field=".metadata.labels" service_selector_field=".spec.selector" rc_replicas_field=".spec.replicas" + rc_status_replicas_field=".status.replicas" + rc_container_image_field=".spec.template.spec.containers" port_field="(index .spec.ports 0).port" if [ "${version}" = "v1beta1" ] || [ "${version}" = "v1beta2" ]; then id_field=".id" labels_field=".labels" service_selector_field=".selector" rc_replicas_field=".desiredState.replicas" + rc_status_replicas_field=".currentState.replicas" + rc_container_image_field=".desiredState.podTemplate.desiredState.manifest.containers" port_field=".port" fi @@ -541,6 +545,14 @@ __EOF__ kubectl delete pod valid-pod "${kube_flags[@]}" kubectl delete service frontend{,-2,-3} "${kube_flags[@]}" + ### Perform a rolling update with --image + # Pre-condition status.Replicas is 3, otherwise the rcmanager could update it and interfere with the rolling update + kube::test::get_object_assert 'rc frontend' "{{$rc_status_replicas_field}}" '3' + # Command + kubectl rolling-update frontend --image=kubernetes/pause --update-period=10ns --poll-interval=10ms "${kube_flags[@]}" + # Post-condition: current image IS kubernetes/pause + kube::test::get_object_assert 'rc frontend' '{{range \$c:=$rc_container_image_field}} {{\$c.image}} {{end}}' ' +kubernetes/pause +' + ### Delete replication controller with id # Pre-condition: frontend replication controller is running kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' @@ -647,7 +659,7 @@ __EOF__ ##################### # Resource aliasing # - ##################### + ##################### kube::log::status "Testing resource aliasing" kubectl create -f examples/cassandra/cassandra.yaml "${kube_flags[@]}" diff --git a/pkg/kubectl/cmd/rollingupdate.go b/pkg/kubectl/cmd/rollingupdate.go index f5397cd73be..ed5439975e0 100644 --- a/pkg/kubectl/cmd/rollingupdate.go +++ b/pkg/kubectl/cmd/rollingupdate.go @@ -155,6 +155,9 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg return cmdutil.UsageError(cmd, "%s does not specify a valid ReplicationController", filename) } } + // If the --image option is specified, we need to create a new rc with at least one different selector + // than the old rc. This selector is the hash of the rc, which will differ because the new rc has a + // different image. if len(image) != 0 { var newName string var err error @@ -205,7 +208,7 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg kubectl.SetNextControllerAnnotation(oldRc, newName) if _, found := oldRc.Spec.Selector[deploymentKey]; !found { - if err := addDeploymentKeyToReplicationController(oldRc, client, deploymentKey, cmdNamespace, out); err != nil { + if oldRc, err = addDeploymentKeyToReplicationController(oldRc, client, deploymentKey, cmdNamespace, out); err != nil { return err } } @@ -219,6 +222,9 @@ func RunRollingUpdate(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, arg updater := kubectl.NewRollingUpdater(newRc.Namespace, kubectl.NewRollingUpdaterClient(client)) + // To successfully pull off a rolling update the new and old rc have to differ + // by at least one selector. Every new pod should have the selector and every + // old pod should not have the selector. var hasLabel bool for key, oldValue := range oldRc.Spec.Selector { if newValue, ok := newRc.Spec.Selector[key]; ok && newValue != oldValue { @@ -281,25 +287,25 @@ func hashObject(obj runtime.Object, codec runtime.Codec) (string, error) { const MaxRetries = 3 -func addDeploymentKeyToReplicationController(oldRc *api.ReplicationController, client *client.Client, deploymentKey, namespace string, out io.Writer) error { +func addDeploymentKeyToReplicationController(oldRc *api.ReplicationController, client *client.Client, deploymentKey, namespace string, out io.Writer) (*api.ReplicationController, error) { oldHash, err := hashObject(oldRc, client.Codec) if err != nil { - return err + return nil, err } // First, update the template label. This ensures that any newly created pods will have the new label if oldRc.Spec.Template.Labels == nil { oldRc.Spec.Template.Labels = map[string]string{} } oldRc.Spec.Template.Labels[deploymentKey] = oldHash - if _, err := client.ReplicationControllers(namespace).Update(oldRc); err != nil { - return err + if oldRc, err = client.ReplicationControllers(namespace).Update(oldRc); err != nil { + return nil, err } - // Update all labels to include the new hash, so they are correctly adopted + // Update all pods managed by the rc to have the new hash label, so they are correctly adopted // TODO: extract the code from the label command and re-use it here. podList, err := client.Pods(namespace).List(labels.SelectorFromSet(oldRc.Spec.Selector), fields.Everything()) if err != nil { - return err + return nil, err } for ix := range podList.Items { pod := &podList.Items[ix] @@ -323,7 +329,7 @@ func addDeploymentKeyToReplicationController(oldRc *api.ReplicationController, c } } if err != nil { - return err + return nil, err } } @@ -337,19 +343,23 @@ func addDeploymentKeyToReplicationController(oldRc *api.ReplicationController, c } oldRc.Spec.Selector[deploymentKey] = oldHash - if _, err := client.ReplicationControllers(namespace).Update(oldRc); err != nil { - return err + // Update the selector of the rc so it manages all the pods we updated above + if oldRc, err = client.ReplicationControllers(namespace).Update(oldRc); err != nil { + return nil, err } + // Clean up any orphaned pods that don't have the new label, this can happen if the rc manager + // doesn't see the update to its pod template and creates a new pod with the old labels after + // we've finished re-adopting existing pods to the rc. podList, err = client.Pods(namespace).List(labels.SelectorFromSet(selectorCopy), fields.Everything()) for ix := range podList.Items { pod := &podList.Items[ix] if value, found := pod.Labels[deploymentKey]; !found || value != oldHash { if err := client.Pods(namespace).Delete(pod.Name, nil); err != nil { - return err + return nil, err } } } - return nil + return oldRc, nil } diff --git a/pkg/kubectl/cmd/rollingupdate_test.go b/pkg/kubectl/cmd/rollingupdate_test.go index 440d40db241..4a5c881697c 100644 --- a/pkg/kubectl/cmd/rollingupdate_test.go +++ b/pkg/kubectl/cmd/rollingupdate_test.go @@ -170,7 +170,7 @@ func TestAddDeploymentHash(t *testing.T) { return } - if err := addDeploymentKeyToReplicationController(rc, client, "hash", api.NamespaceDefault, buf); err != nil { + if _, err := addDeploymentKeyToReplicationController(rc, client, "hash", api.NamespaceDefault, buf); err != nil { t.Errorf("unexpected error: %v", err) } for _, pod := range podList.Items {