rename run-container to run in kubectl

This commit is contained in:
Anastasis Andronidis 2015-05-21 22:53:10 +02:00
parent b8a808b1ae
commit d4a47bdb9e
23 changed files with 89 additions and 82 deletions

View File

@ -537,9 +537,9 @@ _kubectl_proxy()
must_have_one_noun=()
}
_kubectl_run-container()
_kubectl_run()
{
last_command="kubectl_run-container"
last_command="kubectl_run"
commands=()
flags=()
@ -901,7 +901,7 @@ _kubectl()
commands+=("exec")
commands+=("port-forward")
commands+=("proxy")
commands+=("run-container")
commands+=("run")
commands+=("stop")
commands+=("expose")
commands+=("label")

View File

@ -22,7 +22,7 @@ kubectl_port-forward.md
kubectl_proxy.md
kubectl_resize.md
kubectl_rolling-update.md
kubectl_run-container.md
kubectl_run.md
kubectl_stop.md
kubectl_update.md
kubectl_version.md

View File

@ -47,7 +47,7 @@ See also issues with the following labels:
1. A fairly general-purpose way to specify fields on the command line during creation and update, not just from a config file
1. Extensible API-based generator framework (i.e. invoke generators via an API/URL rather than building them into kubectl), so that complex client libraries dont need to be rewritten in multiple languages, and so that the abstractions are available through all interfaces: API, CLI, UI, logs, ... [#5280](https://github.com/GoogleCloudPlatform/kubernetes/issues/5280)
1. Need schema registry, and some way to invoke generator (e.g., using a container)
1. Convert run-container to API-based generator
1. Convert run command to API-based generator
1. Transformation framework
1. More intelligent defaulting of fields (e.g., [#2643](https://github.com/GoogleCloudPlatform/kubernetes/issues/2643))
1. Update preconditions based on the values of arbitrary object fields.

View File

@ -191,7 +191,7 @@ NAME IMAGE(S SELECTOR REPLICAS
Start a container running nginx with a replication controller and three replicas
```
$ cluster/kubectl.sh run-container my-nginx --image=nginx --replicas=3 --port=80
$ cluster/kubectl.sh run my-nginx --image=nginx --replicas=3 --port=80
```
When listing the pods, you will see that three containers have been started and are in Waiting state:

View File

@ -46,7 +46,7 @@ The `kubectl.sh` line below spins up two containers running
[Nginx](http://nginx.org/en/) running on port 80:
```bash
cluster/kubectl.sh run-container my-nginx --image=nginx --replicas=2 --port=80
cluster/kubectl.sh run my-nginx --image=nginx --replicas=2 --port=80
```
To stop the containers:

View File

@ -18,7 +18,7 @@ If the status of any node is ```Unknown``` or ```NotReady``` your cluster is bro
### Run an application
```sh
kubectl -s http://localhost:8080 run-container nginx --image=nginx --port=80
kubectl -s http://localhost:8080 run nginx --image=nginx --port=80
```
now run ```docker ps``` you should see nginx running. You may need to wait a few minutes for the image to get pulled.
@ -31,7 +31,7 @@ kubectl expose rc nginx --port=80
This should print:
```
NAME LABELS SELECTOR IP PORT(S)
nginx <none> run-container=nginx <ip-addr> 80/TCP
nginx <none> run=nginx <ip-addr> 80/TCP
```
Hit the webserver:

View File

@ -51,7 +51,7 @@ If you are running different kubernetes clusters, you may need to specify ```-s
### Run an application
```sh
kubectl -s http://localhost:8080 run-container nginx --image=nginx --port=80
kubectl -s http://localhost:8080 run nginx --image=nginx --port=80
```
now run ```docker ps``` you should see nginx running. You may need to wait a few minutes for the image to get pulled.
@ -64,7 +64,7 @@ kubectl expose rc nginx --port=80
This should print:
```
NAME LABELS SELECTOR IP PORT(S)
nginx <none> run-container=nginx <ip-addr> 80/TCP
nginx <none> run=nginx <ip-addr> 80/TCP
```
Hit the webserver:

View File

@ -47,7 +47,7 @@ You can now use any of the cluster/kubectl.sh commands to interact with your loc
cluster/kubectl.sh get pods
cluster/kubectl.sh get services
cluster/kubectl.sh get replicationcontrollers
cluster/kubectl.sh run-container my-nginx --image=nginx --replicas=2 --port=80
cluster/kubectl.sh run my-nginx --image=nginx --replicas=2 --port=80
## begin wait for provision to complete, you can monitor the docker pull by opening a new terminal

View File

@ -164,7 +164,7 @@ NAME IMAGE(S SELECTOR REPLICAS
Start a container running nginx with a replication controller and three replicas
```sh
$ ./cluster/kubectl.sh run-container my-nginx --image=nginx --replicas=3 --port=80
$ ./cluster/kubectl.sh run my-nginx --image=nginx --replicas=3 --port=80
```
When listing the pods, you will see that three containers have been started and are in Waiting state:

View File

@ -60,7 +60,7 @@ kubectl
* [kubectl proxy](kubectl_proxy.md) - Run a proxy to the Kubernetes API server
* [kubectl resize](kubectl_resize.md) - Set a new size for a Replication Controller.
* [kubectl rolling-update](kubectl_rolling-update.md) - Perform a rolling update of the given ReplicationController.
* [kubectl run-container](kubectl_run-container.md) - Run a particular image on the cluster.
* [kubectl run](kubectl_run.md) - Run a particular image on the cluster.
* [kubectl stop](kubectl_stop.md) - Gracefully shut down a resource by id or filename.
* [kubectl update](kubectl_update.md) - Update a resource by filename or stdin.
* [kubectl version](kubectl_version.md) - Print the client and server version information.

View File

@ -1,4 +1,4 @@
## kubectl run-container
## kubectl run
Run a particular image on the cluster.
@ -9,34 +9,34 @@ Create and run a particular image, possibly replicated.
Creates a replication controller to manage the created container(s).
```
kubectl run-container NAME --image=image [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json]
kubectl run NAME --image=image [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json]
```
### Examples
```
// Starts a single instance of nginx.
$ kubectl run-container nginx --image=nginx
$ kubectl run nginx --image=nginx
// Starts a replicated instance of nginx.
$ kubectl run-container nginx --image=nginx --replicas=5
$ kubectl run nginx --image=nginx --replicas=5
// Dry run. Print the corresponding API objects without creating them.
$ kubectl run-container nginx --image=nginx --dry-run
$ kubectl run nginx --image=nginx --dry-run
// Start a single instance of nginx, but overload the spec of the replication controller with a partial set of values parsed from JSON.
$ kubectl run-container nginx --image=nginx --overrides='{ "apiVersion": "v1beta3", "spec": { ... } }'
$ kubectl run nginx --image=nginx --overrides='{ "apiVersion": "v1beta3", "spec": { ... } }'
```
### Options
```
--dry-run=false: If true, only print the object that would be sent, without sending it.
--generator="run-container/v1": The name of the API generator to use. Default is 'run-container-controller/v1'.
-h, --help=false: help for run-container
--generator="run/v1": The name of the API generator to use. Default is 'run-controller/v1'.
-h, --help=false: help for run
--hostport=-1: The host port mapping for the container port. To demonstrate a single-machine container.
--image="": The image for the container to run.
-l, --labels="": Labels to apply to the pod(s) created by this call to run-container.
-l, --labels="": Labels to apply to the pod(s).
--no-headers=false: When using the default output, don't print headers.
-o, --output="": Output format. One of: json|yaml|template|templatefile.
--output-version="": Output the formatted object with the given version (default api-version).
@ -80,4 +80,4 @@ $ kubectl run-container nginx --image=nginx --overrides='{ "apiVersion": "v1beta
###### Auto generated by spf13/cobra at 2015-05-21 10:33:11.189857293 +0000 UTC
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/kubectl_run-container.md?pixel)]()
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/kubectl_run.md?pixel)]()

View File

@ -21,7 +21,7 @@ kubectl-port-forward.1
kubectl-proxy.1
kubectl-resize.1
kubectl-rolling-update.1
kubectl-run-container.1
kubectl-run.1
kubectl-stop.1
kubectl-update.1
kubectl-version.1

View File

@ -3,12 +3,12 @@
.SH NAME
.PP
kubectl run\-container \- Run a particular image on the cluster.
kubectl run \- Run a particular image on the cluster.
.SH SYNOPSIS
.PP
\fBkubectl run\-container\fP [OPTIONS]
\fBkubectl run\fP [OPTIONS]
.SH DESCRIPTION
@ -23,12 +23,12 @@ Creates a replication controller to manage the created container(s).
If true, only print the object that would be sent, without sending it.
.PP
\fB\-\-generator\fP="run\-container/v1"
The name of the API generator to use. Default is 'run\-container\-controller/v1'.
\fB\-\-generator\fP="run/v1"
The name of the API generator to use. Default is 'run\-controller/v1'.
.PP
\fB\-h\fP, \fB\-\-help\fP=false
help for run\-container
help for run
.PP
\fB\-\-hostport\fP=\-1
@ -40,7 +40,7 @@ Creates a replication controller to manage the created container(s).
.PP
\fB\-l\fP, \fB\-\-labels\fP=""
Labels to apply to the pod(s) created by this call to run\-container.
Labels to apply to the pod(s).
.PP
\fB\-\-no\-headers\fP=false
@ -176,16 +176,16 @@ Creates a replication controller to manage the created container(s).
.nf
// Starts a single instance of nginx.
$ kubectl run\-container nginx \-\-image=nginx
$ kubectl run nginx \-\-image=nginx
// Starts a replicated instance of nginx.
$ kubectl run\-container nginx \-\-image=nginx \-\-replicas=5
$ kubectl run nginx \-\-image=nginx \-\-replicas=5
// Dry run. Print the corresponding API objects without creating them.
$ kubectl run\-container nginx \-\-image=nginx \-\-dry\-run
$ kubectl run nginx \-\-image=nginx \-\-dry\-run
// Start a single instance of nginx, but overload the spec of the replication controller with a partial set of values parsed from JSON.
$ kubectl run\-container nginx \-\-image=nginx \-\-overrides='\{ "apiVersion": "v1beta3", "spec": \{ ... \} \}'
$ kubectl run nginx \-\-image=nginx \-\-overrides='\{ "apiVersion": "v1beta3", "spec": \{ ... \} \}'
.fi
.RE

View File

@ -124,7 +124,7 @@ Find more information at
.SH SEE ALSO
.PP
\fBkubectl\-get(1)\fP, \fBkubectl\-describe(1)\fP, \fBkubectl\-create(1)\fP, \fBkubectl\-update(1)\fP, \fBkubectl\-delete(1)\fP, \fBkubectl\-namespace(1)\fP, \fBkubectl\-logs(1)\fP, \fBkubectl\-rolling\-update(1)\fP, \fBkubectl\-resize(1)\fP, \fBkubectl\-exec(1)\fP, \fBkubectl\-port\-forward(1)\fP, \fBkubectl\-proxy(1)\fP, \fBkubectl\-run\-container(1)\fP, \fBkubectl\-stop(1)\fP, \fBkubectl\-expose(1)\fP, \fBkubectl\-label(1)\fP, \fBkubectl\-config(1)\fP, \fBkubectl\-cluster\-info(1)\fP, \fBkubectl\-api\-versions(1)\fP, \fBkubectl\-version(1)\fP,
\fBkubectl\-get(1)\fP, \fBkubectl\-describe(1)\fP, \fBkubectl\-create(1)\fP, \fBkubectl\-update(1)\fP, \fBkubectl\-delete(1)\fP, \fBkubectl\-namespace(1)\fP, \fBkubectl\-logs(1)\fP, \fBkubectl\-rolling\-update(1)\fP, \fBkubectl\-resize(1)\fP, \fBkubectl\-exec(1)\fP, \fBkubectl\-port\-forward(1)\fP, \fBkubectl\-proxy(1)\fP, \fBkubectl\-run(1)\fP, \fBkubectl\-stop(1)\fP, \fBkubectl\-expose(1)\fP, \fBkubectl\-label(1)\fP, \fBkubectl\-config(1)\fP, \fBkubectl\-cluster\-info(1)\fP, \fBkubectl\-api\-versions(1)\fP, \fBkubectl\-version(1)\fP,
.SH HISTORY

View File

@ -36,7 +36,7 @@ The replication controller simply ensures that the desired number of pods matche
The replication controller is forever constrained to this narrow responsibility. It itself will not perform readiness nor liveness probes. Rather than performing auto-scaling, it is intended to be controlled by an external auto-scaler (as discussed in [#492](https://github.com/GoogleCloudPlatform/kubernetes/issues/492)), which would change its `replicas` field. We will not add scheduling policies (e.g., [spreading](https://github.com/GoogleCloudPlatform/kubernetes/issues/367#issuecomment-48428019)) to replication controller. Nor should it verify that the pods controlled match the currently specified template, as that would obstruct auto-sizing and other automated processes. Similarly, completion deadlines, ordering dependencies, configuration expansion, and other features belong elsewhere. We even plan to factor out the mechanism for bulk pod creation ([#170](https://github.com/GoogleCloudPlatform/kubernetes/issues/170)).
The replication controller is intended to be a composable building-block primitive. We expect higher-level APIs and/or tools to be built on top of it and other complementary primitives for user convenience in the future. The "macro" operations currently supported by kubectl (run-container, stop, resize, rolling-update) are proof-of-concept examples of this. For instance, we could imagine something like [Asgard](http://techblog.netflix.com/2012/06/asgard-web-based-cloud-management-and.html) managing replication controllers, auto-scalers, services, scheduling policies, canaries, etc.
The replication controller is intended to be a composable building-block primitive. We expect higher-level APIs and/or tools to be built on top of it and other complementary primitives for user convenience in the future. The "macro" operations currently supported by kubectl (run, stop, resize, rolling-update) are proof-of-concept examples of this. For instance, we could imagine something like [Asgard](http://techblog.netflix.com/2012/06/asgard-web-based-cloud-management-and.html) managing replication controllers, auto-scalers, services, scheduling policies, canaries, etc.
## Common usage patterns

View File

@ -184,7 +184,7 @@ At this point, all requests we make to the Kubernetes cluster from the command l
Let's create some content.
```shell
$ cluster/kubectl.sh run-container snowflake --image=kubernetes/serve_hostname --replicas=2
$ cluster/kubectl.sh run snowflake --image=kubernetes/serve_hostname --replicas=2
```
We have just created a replication controller whose replica size is 2 that is running the pod called snowflake with a basic container that just serves the hostname.
@ -192,14 +192,14 @@ We have just created a replication controller whose replica size is 2 that is ru
```shell
cluster/kubectl.sh get rc
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
snowflake snowflake kubernetes/serve_hostname run-container=snowflake 2
snowflake snowflake kubernetes/serve_hostname run=snowflake 2
$ cluster/kubectl.sh get pods
POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED MESSAGE
snowflake-mbrfi 10.244.2.4 kubernetes-minion-ilqx/104.197.8.214 run-container=snowflake Running About an hour
snowflake kubernetes/serve_hostname Running About an hour
snowflake-p78ev 10.244.2.5 kubernetes-minion-ilqx/104.197.8.214 run-container=snowflake Running About an hour
snowflake kubernetes/serve_hostname Running About an hour
snowflake-mbrfi 10.244.2.4 kubernetes-minion-ilqx/104.197.8.214 run=snowflake Running About an hour
snowflake kubernetes/serve_hostname Running About an hour
snowflake-p78ev 10.244.2.5 kubernetes-minion-ilqx/104.197.8.214 run=snowflake Running About an hour
snowflake kubernetes/serve_hostname Running About an hour
```
And this is great, developers are able to do what they want, and they do not have to worry about affecting content in the production namespace.
@ -223,23 +223,23 @@ POD IP CONTAINER(S) IMAGE(S)
Production likes to run cattle, so let's create some cattle pods.
```shell
$ cluster/kubectl.sh run-container cattle --image=kubernetes/serve_hostname --replicas=5
$ cluster/kubectl.sh run cattle --image=kubernetes/serve_hostname --replicas=5
$ cluster/kubectl.sh get rc
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
cattle cattle kubernetes/serve_hostname run-container=cattle 5
cattle cattle kubernetes/serve_hostname run=cattle 5
$ cluster/kubectl.sh get pods
POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS CREATED MESSAGE
cattle-1kyvj 10.244.0.4 kubernetes-minion-7s1y/23.236.54.97 run-container=cattle Running About an hour
cattle kubernetes/serve_hostname Running About an hour
cattle-kobrk 10.244.1.4 kubernetes-minion-cfs6/104.154.61.231 run-container=cattle Running About an hour
cattle kubernetes/serve_hostname Running About an hour
cattle-l1v9t 10.244.0.5 kubernetes-minion-7s1y/23.236.54.97 run-container=cattle Running About an hour
cattle kubernetes/serve_hostname Running About an hour
cattle-ne2sj 10.244.3.7 kubernetes-minion-x8gx/104.154.47.83 run-container=cattle Running About an hour
cattle kubernetes/serve_hostname Running About an hour
cattle-qrk4x 10.244.0.6 kubernetes-minion-7s1y/23.236.54.97 run-container=cattle Running About an hour
cattle-1kyvj 10.244.0.4 kubernetes-minion-7s1y/23.236.54.97 run=cattle Running About an hour
cattle kubernetes/serve_hostname Running About an hour
cattle-kobrk 10.244.1.4 kubernetes-minion-cfs6/104.154.61.231 run=cattle Running About an hour
cattle kubernetes/serve_hostname Running About an hour
cattle-l1v9t 10.244.0.5 kubernetes-minion-7s1y/23.236.54.97 run=cattle Running About an hour
cattle kubernetes/serve_hostname Running About an hour
cattle-ne2sj 10.244.3.7 kubernetes-minion-x8gx/104.154.47.83 run=cattle Running About an hour
cattle kubernetes/serve_hostname Running About an hour
cattle-qrk4x 10.244.0.6 kubernetes-minion-7s1y/23.236.54.97 run=cattle Running About an hour
cattle kubernetes/serve_hostname
```

View File

@ -12,7 +12,7 @@ The `kubectl` line below spins up two containers running
[Nginx](http://nginx.org/en/) running on port 80:
```bash
kubectl run-container my-nginx --image=nginx --replicas=2 --port=80
kubectl run my-nginx --image=nginx --replicas=2 --port=80
```
Once the pods are created, you can list them to see what is up and running:

View File

@ -126,7 +126,7 @@ Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
cmds.AddCommand(NewCmdPortForward(f))
cmds.AddCommand(NewCmdProxy(f, out))
cmds.AddCommand(NewCmdRunContainer(f, out))
cmds.AddCommand(NewCmdRun(f, out))
cmds.AddCommand(NewCmdStop(f, out))
cmds.AddCommand(NewCmdExposeService(f, out))

View File

@ -173,8 +173,8 @@ func NewAPIFactory() (*cmdutil.Factory, *testFactory, runtime.Codec) {
Validator: validation.NullSchema{},
}
generators := map[string]kubectl.Generator{
"run-container/v1": kubectl.BasicReplicationController{},
"service/v1": kubectl.ServiceGenerator{},
"run/v1": kubectl.BasicReplicationController{},
"service/v1": kubectl.ServiceGenerator{},
}
return &cmdutil.Factory{
Object: func() (meta.RESTMapper, runtime.ObjectTyper) {
@ -245,7 +245,7 @@ func ExamplePrintReplicationController() {
Codec: codec,
Client: nil,
}
cmd := NewCmdRunContainer(f, os.Stdout)
cmd := NewCmdRun(f, os.Stdout)
ctrl := &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: "foo",

View File

@ -19,6 +19,7 @@ package cmd
import (
"fmt"
"io"
"os"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl"
@ -30,31 +31,33 @@ const (
run_long = `Create and run a particular image, possibly replicated.
Creates a replication controller to manage the created container(s).`
run_example = `// Starts a single instance of nginx.
$ kubectl run-container nginx --image=nginx
$ kubectl run nginx --image=nginx
// Starts a replicated instance of nginx.
$ kubectl run-container nginx --image=nginx --replicas=5
$ kubectl run nginx --image=nginx --replicas=5
// Dry run. Print the corresponding API objects without creating them.
$ kubectl run-container nginx --image=nginx --dry-run
$ kubectl run nginx --image=nginx --dry-run
// Start a single instance of nginx, but overload the spec of the replication controller with a partial set of values parsed from JSON.
$ kubectl run-container nginx --image=nginx --overrides='{ "apiVersion": "v1beta3", "spec": { ... } }'`
$ kubectl run nginx --image=nginx --overrides='{ "apiVersion": "v1beta3", "spec": { ... } }'`
)
func NewCmdRunContainer(f *cmdutil.Factory, out io.Writer) *cobra.Command {
func NewCmdRun(f *cmdutil.Factory, out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "run-container NAME --image=image [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json]",
Use: "run NAME --image=image [--port=port] [--replicas=replicas] [--dry-run=bool] [--overrides=inline-json]",
// run-container is deprecated
Aliases: []string{"run-container"},
Short: "Run a particular image on the cluster.",
Long: run_long,
Example: run_example,
Run: func(cmd *cobra.Command, args []string) {
err := RunRunContainer(f, out, cmd, args)
err := Run(f, out, cmd, args)
cmdutil.CheckErr(err)
},
}
cmdutil.AddPrinterFlags(cmd)
cmd.Flags().String("generator", "run-container/v1", "The name of the API generator to use. Default is 'run-container-controller/v1'.")
cmd.Flags().String("generator", "run/v1", "The name of the API generator to use. Default is 'run-controller/v1'.")
cmd.Flags().String("image", "", "The image for the container to run.")
cmd.MarkFlagRequired("image")
cmd.Flags().IntP("replicas", "r", 1, "Number of replicas to create for this container. Default is 1.")
@ -62,13 +65,17 @@ func NewCmdRunContainer(f *cmdutil.Factory, out io.Writer) *cobra.Command {
cmd.Flags().String("overrides", "", "An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field.")
cmd.Flags().Int("port", -1, "The port that this container exposes.")
cmd.Flags().Int("hostport", -1, "The host port mapping for the container port. To demonstrate a single-machine container.")
cmd.Flags().StringP("labels", "l", "", "Labels to apply to the pod(s) created by this call to run-container.")
cmd.Flags().StringP("labels", "l", "", "Labels to apply to the pod(s).")
return cmd
}
func RunRunContainer(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
func Run(f *cmdutil.Factory, out io.Writer, cmd *cobra.Command, args []string) error {
if os.Args[1] == "run-container" {
printDeprecationWarning("run", "run-container")
}
if len(args) != 1 {
return cmdutil.UsageError(cmd, "NAME is required for run-container")
return cmdutil.UsageError(cmd, "NAME is required for run")
}
namespace, err := f.DefaultNamespace()

View File

@ -93,8 +93,8 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
flags.SetNormalizeFunc(util.WarnWordSepNormalizeFunc) // Warn for "_" flags
generators := map[string]kubectl.Generator{
"run-container/v1": kubectl.BasicReplicationController{},
"service/v1": kubectl.ServiceGenerator{},
"run/v1": kubectl.BasicReplicationController{},
"service/v1": kubectl.ServiceGenerator{},
}
clientConfig := optionalClientConfig

View File

@ -49,7 +49,7 @@ func (BasicReplicationController) Generate(params map[string]string) (runtime.Ob
}
} else {
labels = map[string]string{
"run-container": params["name"],
"run": params["name"],
}
}
count, err := strconv.Atoi(params["replicas"])

View File

@ -39,14 +39,14 @@ func TestGenerate(t *testing.T) {
expected: &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Labels: map[string]string{"run-container": "foo"},
Labels: map[string]string{"run": "foo"},
},
Spec: api.ReplicationControllerSpec{
Replicas: 1,
Selector: map[string]string{"run-container": "foo"},
Selector: map[string]string{"run": "foo"},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"run-container": "foo"},
Labels: map[string]string{"run": "foo"},
},
Spec: api.PodSpec{
Containers: []api.Container{
@ -70,14 +70,14 @@ func TestGenerate(t *testing.T) {
expected: &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Labels: map[string]string{"run-container": "foo"},
Labels: map[string]string{"run": "foo"},
},
Spec: api.ReplicationControllerSpec{
Replicas: 1,
Selector: map[string]string{"run-container": "foo"},
Selector: map[string]string{"run": "foo"},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"run-container": "foo"},
Labels: map[string]string{"run": "foo"},
},
Spec: api.PodSpec{
Containers: []api.Container{
@ -107,14 +107,14 @@ func TestGenerate(t *testing.T) {
expected: &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Name: "foo",
Labels: map[string]string{"run-container": "foo"},
Labels: map[string]string{"run": "foo"},
},
Spec: api.ReplicationControllerSpec{
Replicas: 1,
Selector: map[string]string{"run-container": "foo"},
Selector: map[string]string{"run": "foo"},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"run-container": "foo"},
Labels: map[string]string{"run": "foo"},
},
Spec: api.PodSpec{
Containers: []api.Container{