Merge pull request #10474 from kargakis/scale-multiple-controllers

Enable scaling multiple controllers
This commit is contained in:
Mike Danese 2015-07-31 14:51:43 -07:00
commit cf4cb1a6a3
6 changed files with 52 additions and 13 deletions

View File

@ -499,6 +499,7 @@ _kubectl_scale()
two_word_flags+=("-o")
flags+=("--replicas=")
flags+=("--resource-version=")
flags+=("--timeout=")
must_have_one_flag=()
must_have_one_flag+=("--replicas=")

View File

@ -43,6 +43,10 @@ scale is sent to the server.
\fB\-\-resource\-version\fP=""
Precondition for resource version. Requires that the current resource version match this value in order to scale.
.PP
\fB\-\-timeout\fP=0
The length of time to wait before giving up on a scale operation, zero means don't wait.
.SH OPTIONS INHERITED FROM PARENT COMMANDS
.PP
@ -153,6 +157,9 @@ $ kubectl scale \-\-replicas=3 replicationcontrollers foo
// If the replication controller named foo's current size is 2, scale foo to 3.
$ kubectl scale \-\-current\-replicas=2 \-\-replicas=3 replicationcontrollers foo
// Scale multiple replication controllers.
$ kubectl scale \-\-replicas=5 rc/foo rc/bar
.fi
.RE

View File

@ -57,6 +57,9 @@ $ kubectl scale --replicas=3 replicationcontrollers foo
// If the replication controller named foo's current size is 2, scale foo to 3.
$ kubectl scale --current-replicas=2 --replicas=3 replicationcontrollers foo
// Scale multiple replication controllers.
$ kubectl scale --replicas=5 rc/foo rc/bar
```
### Options
@ -67,6 +70,7 @@ $ kubectl scale --current-replicas=2 --replicas=3 replicationcontrollers foo
-o, --output="": Output mode. Use "-o name" for shorter output (resource/name).
--replicas=-1: The new desired number of replicas. Required.
--resource-version="": Precondition for resource version. Requires that the current resource version match this value in order to scale.
--timeout=0: The length of time to wait before giving up on a scale operation, zero means don't wait.
```
### Options inherited from parent commands
@ -102,7 +106,7 @@ $ kubectl scale --current-replicas=2 --replicas=3 replicationcontrollers foo
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra at 2015-07-14 00:11:42.956739933 +0000 UTC
###### Auto generated by spf13/cobra at 2015-07-30 08:50:55.94117889 +0000 UTC
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->

View File

@ -617,6 +617,17 @@ __EOF__
# Post-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'
### Scale multiple replication controllers
kubectl create -f examples/guestbook/redis-master-controller.yaml "${kube_flags[@]}"
kubectl create -f examples/guestbook/redis-slave-controller.yaml "${kube_flags[@]}"
# Command
kubectl scale rc/redis-master rc/redis-slave --replicas=4
# Post-condition: 4 replicas each
kube::test::get_object_assert 'rc redis-master' "{{$rc_replicas_field}}" '4'
kube::test::get_object_assert 'rc redis-slave' "{{$rc_replicas_field}}" '4'
# Clean-up
kubectl delete rc redis-{master,slave} "${kube_flags[@]}"
### Expose replication controller as service
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '3'

View File

@ -26,6 +26,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl"
cmdutil "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/errors"
)
const (
@ -39,7 +40,10 @@ scale is sent to the server.`
$ kubectl scale --replicas=3 replicationcontrollers foo
// If the replication controller named foo's current size is 2, scale foo to 3.
$ kubectl scale --current-replicas=2 --replicas=3 replicationcontrollers foo`
$ kubectl scale --current-replicas=2 --replicas=3 replicationcontrollers foo
// Scale multiple replication controllers.
$ kubectl scale --replicas=5 rc/foo rc/bar`
)
// NewCmdScale returns a cobra command with the appropriate configuration and flags to run scale
@ -61,6 +65,7 @@ func NewCmdScale(f *cmdutil.Factory, out io.Writer) *cobra.Command {
cmd.Flags().String("resource-version", "", "Precondition for resource version. Requires that the current resource version match this value in order to scale.")
cmd.Flags().Int("current-replicas", -1, "Precondition for current size. Requires that the current size of the replication controller match this value in order to scale.")
cmd.Flags().Int("replicas", -1, "The new desired number of replicas. Required.")
cmd.Flags().Duration("timeout", 0, "The length of time to wait before giving up on a scale operation, zero means don't wait.")
cmd.MarkFlagRequired("replicas")
cmdutil.AddOutputFlagsForMutation(cmd)
return cmd
@ -102,10 +107,6 @@ func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri
if err != nil {
return err
}
if len(infos) > 1 {
return fmt.Errorf("multiple resources provided: %v", args)
}
info := infos[0]
scaler, err := f.Scaler(mapping)
if err != nil {
@ -113,13 +114,28 @@ func RunScale(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []stri
}
resourceVersion := cmdutil.GetFlagString(cmd, "resource-version")
if len(resourceVersion) != 0 && len(infos) > 1 {
return fmt.Errorf("cannot use --resource-version with multiple controllers")
}
currentSize := cmdutil.GetFlagInt(cmd, "current-replicas")
if currentSize != -1 && len(infos) > 1 {
return fmt.Errorf("cannot use --current-replicas with multiple controllers")
}
precondition := &kubectl.ScalePrecondition{currentSize, resourceVersion}
retry := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout)
waitForReplicas := kubectl.NewRetryParams(kubectl.Interval, kubectl.Timeout)
if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil {
return err
var waitForReplicas *kubectl.RetryParams
if timeout := cmdutil.GetFlagDuration(cmd, "timeout"); timeout != 0 {
waitForReplicas = kubectl.NewRetryParams(kubectl.Interval, timeout)
}
cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "scaled")
return nil
errs := []error{}
for _, info := range infos {
if err := scaler.Scale(info.Namespace, info.Name, uint(count), precondition, retry, waitForReplicas); err != nil {
errs = append(errs, err)
continue
}
cmdutil.PrintSuccess(mapper, shortOutput, out, info.Mapping.Resource, info.Name, "scaled")
}
return errors.NewAggregate(errs)
}

View File

@ -104,10 +104,10 @@ var _ = Describe("Kubectl client", func() {
runKubectl("create", "-f", nautilusPath, fmt.Sprintf("--namespace=%v", ns))
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("scaling down the replication controller")
runKubectl("scale", "rc", "update-demo-nautilus", "--replicas=1", fmt.Sprintf("--namespace=%v", ns))
runKubectl("scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns))
validateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("scaling up the replication controller")
runKubectl("scale", "rc", "update-demo-nautilus", "--replicas=2", fmt.Sprintf("--namespace=%v", ns))
runKubectl("scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m", fmt.Sprintf("--namespace=%v", ns))
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})