Allow replica set to be exposed as a service and add kubectl command tests for replica sets.

This commit is contained in:
Madhusudan.C.S 2016-02-09 13:18:45 -08:00
parent 9d3633a16d
commit 293793cf0d
7 changed files with 218 additions and 21 deletions

View File

@ -13,13 +13,15 @@ kubectl expose \- Take a replication controller, service or pod and expose it as
.SH DESCRIPTION
.PP
Take a replication controller, service, or pod and expose it as a new Kubernetes service.
Take a replication controller, service, replica set or pod and expose it as a new Kubernetes service.
.PP
Looks up a replication controller, service, or pod by name and uses the selector for that resource as the
selector for a new service on the specified port. Note that if no port is specified via \-\-port and the
exposed resource has multiple ports, all will be re\-used by the new service. Also if no labels are specified,
the new service will re\-use the labels from the resource it exposes.
Looks up a replication controller, service, replica set or pod by name and uses the selector for that
resource as the selector for a new service on the specified port. A replica set will be exposed as a
service only if it's selector is convertible to a selector that service supports, i.e. when the
replica set selector contains only the matchLabels component. Note that if no port is specified
via \-\-port and the exposed resource has multiple ports, all will be re\-used by the new service. Also
if no labels are specified, the new service will re\-use the labels from the resource it exposes.
.SH OPTIONS
@ -95,7 +97,7 @@ the new service will re\-use the labels from the resource it exposes.
.PP
\fB\-\-selector\fP=""
A label selector to use for this service. If empty (the default) infer the selector from the replication controller.
A label selector to use for this service. Only equality\-based selector requirements are supported. If empty (the default) infer the selector from the replication controller or replica set.
.PP
\fB\-\-session\-affinity\fP=""
@ -241,6 +243,9 @@ $ kubectl expose service nginx \-\-port=443 \-\-target\-port=8443 \-\-name=nginx
# Create a service for a replicated streaming application on port 4100 balancing UDP traffic and named 'video\-stream'.
$ kubectl expose rc streamer \-\-port=4100 \-\-protocol=udp \-\-name=video\-stream
# Create a service for a replicated nginx using replica set, which serves on port 80 and connects to the containers on port 8000.
$ kubectl expose rs nginx \-\-port=80 \-\-target\-port=8000
.fi
.RE

View File

@ -39,12 +39,14 @@ Take a replication controller, service or pod and expose it as a new Kubernetes
### Synopsis
Take a replication controller, service, or pod and expose it as a new Kubernetes service.
Take a replication controller, service, replica set or pod and expose it as a new Kubernetes service.
Looks up a replication controller, service, or pod by name and uses the selector for that resource as the
selector for a new service on the specified port. Note that if no port is specified via --port and the
exposed resource has multiple ports, all will be re-used by the new service. Also if no labels are specified,
the new service will re-use the labels from the resource it exposes.
Looks up a replication controller, service, replica set or pod by name and uses the selector for that
resource as the selector for a new service on the specified port. A replica set will be exposed as a
service only if it's selector is convertible to a selector that service supports, i.e. when the
replica set selector contains only the matchLabels component. Note that if no port is specified
via --port and the exposed resource has multiple ports, all will be re-used by the new service. Also
if no labels are specified, the new service will re-use the labels from the resource it exposes.
```
kubectl expose (-f FILENAME | TYPE NAME) [--port=port] [--protocol=TCP|UDP] [--target-port=number-or-name] [--name=name] [--external-ip=external-ip-of-service] [--type=type]
@ -67,6 +69,9 @@ $ kubectl expose service nginx --port=443 --target-port=8443 --name=nginx-https
# Create a service for a replicated streaming application on port 4100 balancing UDP traffic and named 'video-stream'.
$ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream
# Create a service for a replicated nginx using replica set, which serves on port 80 and connects to the containers on port 8000.
$ kubectl expose rs nginx --port=80 --target-port=8000
```
### Options
@ -88,7 +93,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream
--protocol="TCP": The network protocol for the service to be created. Default is 'tcp'.
--record[=false]: Record current kubectl command in the resource annotation.
--save-config[=false]: If true, the configuration of current object will be saved in its annotation. This is useful when you want to perform kubectl apply on this object in the future.
--selector="": A label selector to use for this service. If empty (the default) infer the selector from the replication controller.
--selector="": A label selector to use for this service. Only equality-based selector requirements are supported. If empty (the default) infer the selector from the replication controller or replica set.
--session-affinity="": If non-empty, set the session affinity for the service to this; legal values: 'None', 'ClientIP'
-a, --show-all[=false]: When printing, show all resources (default hide terminated pods.)
--show-labels[=false]: When printing, show all labels as the last column (default hide labels column)
@ -130,7 +135,7 @@ $ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream
* [kubectl](kubectl.md) - kubectl controls the Kubernetes cluster manager
###### Auto generated by spf13/cobra on 22-Jan-2016
###### Auto generated by spf13/cobra on 10-Feb-2016
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/user-guide/kubectl/kubectl_expose.md?pixel)]()

View File

@ -0,0 +1,42 @@
apiVersion: extensions/v1beta1
kind: ReplicaSet
metadata:
name: frontend
# these labels can be applied automatically
# from the labels in the pod template if not set
# labels:
# app: guestbook
# tier: frontend
spec:
# this replicas value is default
# modify it according to your case
replicas: 3
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# matchLabels:
# app: guestbook
# tier: frontend
template:
metadata:
labels:
app: guestbook
tier: frontend
spec:
containers:
- name: php-redis
image: gcr.io/google_samples/gb-frontend:v3
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access environment variables to find service host
# info, comment out the 'value: dns' line above, and uncomment the
# line below.
# value: env
ports:
- containerPort: 80

View File

@ -0,0 +1,44 @@
apiVersion: extensions/v1beta1
kind: ReplicaSet
metadata:
name: redis-slave
# these labels can be applied automatically
# from the labels in the pod template if not set
# labels:
# app: redis
# role: slave
# tier: backend
spec:
# this replicas value is default
# modify it according to your case
replicas: 2
# selector can be applied automatically
# from the labels in the pod template if not set
# selector:
# app: guestbook
# role: slave
# tier: backend
template:
metadata:
labels:
app: redis
role: slave
tier: backend
spec:
containers:
- name: slave
image: gcr.io/google_samples/gb-redisslave:v1
resources:
requests:
cpu: 100m
memory: 100Mi
env:
- name: GET_HOSTS_FROM
value: dns
# If your cluster config does not include a dns service, then to
# instead access an environment variable to find the master
# service's host, comment out the 'value: dns' line above, and
# uncomment the line below.
# value: env
ports:
- containerPort: 6379

View File

@ -234,6 +234,7 @@ runTests() {
rc_replicas_field=".spec.replicas"
rc_status_replicas_field=".status.replicas"
rc_container_image_field=".spec.template.spec.containers"
rs_replicas_field=".spec.replicas"
port_field="(index .spec.ports 0).port"
port_name="(index .spec.ports 0).name"
second_port_field="(index .spec.ports 1).port"
@ -943,6 +944,7 @@ __EOF__
kubectl scale --replicas=2 -f examples/guestbook/frontend-controller.yaml "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Clean-up
kubectl delete rc frontend "${kube_flags[@]}"
### Scale multiple replication controllers
@ -1135,6 +1137,85 @@ __EOF__
kubectl delete deployment nginx-deployment "${kube_flags[@]}"
kubectl delete rs -l pod-template-hash "${kube_flags[@]}"
######################
# Replica Sets #
######################
kube::log::status "Testing kubectl(${version}:replicasets)"
### Create and stop a replica set, make sure it doesn't leak pods
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f docs/user-guide/replicaset/frontend.yaml "${kube_flags[@]}"
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replica set frontend from YAML
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f docs/user-guide/replicaset/frontend.yaml "${kube_flags[@]}"
# Post-condition: frontend replica set is created
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# TODO(madhusudancs): Add describe tests once PR #20886 that implements describe for ReplicaSet is merged.
### Scale replica set frontend with current-replicas and replicas
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}"
# Post-condition: 2 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2'
# Clean-up
kubectl delete rs frontend "${kube_flags[@]}"
# TODO(madhusudancs): Fix this when Scale group issues are resolved (see issue #18528).
### Expose replica set as service
kubectl create -f docs/user-guide/replicaset/frontend.yaml "${kube_flags[@]}"
# Pre-condition: 3 replicas
kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3'
# Command
kubectl expose rs frontend --port=80 "${kube_flags[@]}"
# Post-condition: service exists and the port is unnamed
kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" '<no value> 80'
# Create a service using service/v1 generator
kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}"
# Post-condition: service exists and the port is named default.
kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80'
# Cleanup services
kubectl delete service frontend{,-2} "${kube_flags[@]}"
### Delete replica set with id
# Pre-condition: frontend replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:'
# Command
kubectl delete rs frontend "${kube_flags[@]}"
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
### Create two replica sets
# Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
kubectl create -f docs/user-guide/replicaset/frontend.yaml "${kube_flags[@]}"
kubectl create -f docs/user-guide/replicaset/redis-slave.yaml "${kube_flags[@]}"
# Post-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
### Delete multiple replica sets at once
# Pre-condition: frontend and redis-slave
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:'
# Command
kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once
# Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" ''
######################
# ConfigMap #
######################

View File

@ -37,12 +37,14 @@ type ExposeOptions struct {
}
const (
expose_long = `Take a replication controller, service, or pod and expose it as a new Kubernetes service.
expose_long = `Take a replication controller, service, replica set or pod and expose it as a new Kubernetes service.
Looks up a replication controller, service, or pod by name and uses the selector for that resource as the
selector for a new service on the specified port. Note that if no port is specified via --port and the
exposed resource has multiple ports, all will be re-used by the new service. Also if no labels are specified,
the new service will re-use the labels from the resource it exposes.`
Looks up a replication controller, service, replica set or pod by name and uses the selector for that
resource as the selector for a new service on the specified port. A replica set will be exposed as a
service only if it's selector is convertible to a selector that service supports, i.e. when the
replica set selector contains only the matchLabels component. Note that if no port is specified
via --port and the exposed resource has multiple ports, all will be re-used by the new service. Also
if no labels are specified, the new service will re-use the labels from the resource it exposes.`
expose_example = `# Create a service for a replicated nginx, which serves on port 80 and connects to the containers on port 8000.
$ kubectl expose rc nginx --port=80 --target-port=8000
@ -57,7 +59,10 @@ $ kubectl expose pod valid-pod --port=444 --name=frontend
$ kubectl expose service nginx --port=443 --target-port=8443 --name=nginx-https
# Create a service for a replicated streaming application on port 4100 balancing UDP traffic and named 'video-stream'.
$ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream`
$ kubectl expose rc streamer --port=4100 --protocol=udp --name=video-stream
# Create a service for a replicated nginx using replica set, which serves on port 80 and connects to the containers on port 8000.
$ kubectl expose rs nginx --port=80 --target-port=8000`
)
func NewCmdExposeService(f *cmdutil.Factory, out io.Writer) *cobra.Command {
@ -82,7 +87,7 @@ func NewCmdExposeService(f *cmdutil.Factory, out io.Writer) *cobra.Command {
cmd.Flags().Bool("create-external-load-balancer", false, "If true, create an external load balancer for this service (trumped by --type). Implementation is cloud provider dependent. Default is 'false'.")
cmd.Flags().MarkDeprecated("create-external-load-balancer", "use --type=\"LoadBalancer\" instead")
cmd.Flags().String("load-balancer-ip", "", "IP to assign to to the Load Balancer. If empty, an ephemeral IP will be created and used (cloud-provider specific).")
cmd.Flags().String("selector", "", "A label selector to use for this service. If empty (the default) infer the selector from the replication controller.")
cmd.Flags().String("selector", "", "A label selector to use for this service. Only equality-based selector requirements are supported. If empty (the default) infer the selector from the replication controller or replica set.")
cmd.Flags().StringP("labels", "l", "", "Labels to apply to the service created by this call.")
cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, without creating it.")
cmd.Flags().String("container-port", "", "Synonym for --target-port")

View File

@ -266,6 +266,12 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
return "", fmt.Errorf("failed to convert label selector to selector: %v", err)
}
return selector.String(), nil
case *extensions.ReplicaSet:
selector, err := unversioned.LabelSelectorAsSelector(t.Spec.Selector)
if err != nil {
return "", fmt.Errorf("failed to convert label selector to selector: %v", err)
}
return selector.String(), nil
default:
gvk, err := api.Scheme.ObjectKind(object)
if err != nil {
@ -296,6 +302,13 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format")
}
return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
case *extensions.ReplicaSet:
// TODO(madhusudancs): Make this smarter by admitting MatchExpressions with Equals
// operator, DoubleEquals operator and In operator with only one element in the set.
if len(t.Spec.Selector.MatchExpressions) > 0 {
return "", fmt.Errorf("couldn't convert expressions - \"%+v\" to map-based selector format")
}
return kubectl.MakeLabels(t.Spec.Selector.MatchLabels), nil
default:
gvk, err := api.Scheme.ObjectKind(object)
if err != nil {
@ -315,6 +328,8 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
return getServicePorts(t.Spec), nil
case *extensions.Deployment:
return getPorts(t.Spec.Template.Spec), nil
case *extensions.ReplicaSet:
return getPorts(t.Spec.Template.Spec), nil
default:
gvk, err := api.Scheme.ObjectKind(object)
if err != nil {
@ -462,7 +477,7 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
},
CanBeExposed: func(kind unversioned.GroupKind) error {
switch kind {
case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"):
case api.Kind("ReplicationController"), api.Kind("Service"), api.Kind("Pod"), extensions.Kind("Deployment"), extensions.Kind("ReplicaSet"):
// nothing to do here
default:
return fmt.Errorf("cannot expose a %s", kind)