mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-01 07:47:56 +00:00
Fix the service printer to be a single line per service
This commit is contained in:
parent
68bc931c65
commit
79fb674679
@ -67,8 +67,8 @@ kubectl expose rc nginx --port=80
|
|||||||
This should print:
|
This should print:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
NAME LABELS SELECTOR IP PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
nginx <none> run=nginx <ip-addr> 80/TCP
|
nginx 10.179.240.1 <none> 80/TCP run=nginx 8d
|
||||||
```
|
```
|
||||||
|
|
||||||
Hit the webserver:
|
Hit the webserver:
|
||||||
|
@ -151,11 +151,11 @@ kubectl expose rc nginx --port=80
|
|||||||
This should print:
|
This should print:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
NAME LABELS SELECTOR IP PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
nginx run=nginx run=nginx <ip-addr> 80/TCP
|
nginx 10.0.93.211 <none> 80/TCP run=nginx 1h
|
||||||
```
|
```
|
||||||
|
|
||||||
If ip-addr is blank run the following command to obtain it. Know issue #10836
|
If `CLUSTER_IP` is blank run the following command to obtain it. Know issue #10836
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
kubectl get svc nginx
|
kubectl get svc nginx
|
||||||
|
@ -145,15 +145,11 @@ $ kubectl get --all-namespaces services
|
|||||||
should show a set of [services](../user-guide/services.md) that look something like this:
|
should show a set of [services](../user-guide/services.md) that look something like this:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
NAMESPACE NAME LABELS SELECTOR IP(S) PORT(S)
|
NAMESPACE NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
default kubernetes component=apiserver,provider=kubernetes <none> 10.0.0.1 443/TCP
|
default kubernetes 10.0.0.1 <none> 443/TCP <none> 1d
|
||||||
kube-system kube-dns k8s-app=kube-dns,kubernetes.io/cluster-service=true,kubernetes.io/name=KubeDNS k8s-app=kube-dns 10.0.0.10 53/UDP
|
kube-system kube-dns 10.0.0.2 <none> 53/TCP,53/UDP k8s-app=kube-dns 1d
|
||||||
53/TCP
|
kube-system kube-ui 10.0.0.3 <none> 80/TCP k8s-app=kube-ui 1d
|
||||||
kube-system kube-ui k8s-app=kube-ui,kubernetes.io/cluster-service=true,kubernetes.io/name=KubeUI k8s-app=kube-ui 10.0.59.25 80/TCP
|
... 8086/TCP
|
||||||
kube-system monitoring-grafana kubernetes.io/cluster-service=true,kubernetes.io/name=Grafana k8s-app=influxGrafana 10.0.41.246 80/TCP
|
|
||||||
kube-system monitoring-heapster kubernetes.io/cluster-service=true,kubernetes.io/name=Heapster k8s-app=heapster 10.0.59.48 80/TCP
|
|
||||||
kube-system monitoring-influxdb kubernetes.io/cluster-service=true,kubernetes.io/name=InfluxDB k8s-app=influxGrafana 10.0.210.156 8083/TCP
|
|
||||||
8086/TCP
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Similarly, you can take a look at the set of [pods](../user-guide/pods.md) that were created during cluster startup.
|
Similarly, you can take a look at the set of [pods](../user-guide/pods.md) that were created during cluster startup.
|
||||||
|
@ -227,7 +227,7 @@ $ ./cluster/kubectl.sh get pods
|
|||||||
NAME READY STATUS RESTARTS AGE
|
NAME READY STATUS RESTARTS AGE
|
||||||
|
|
||||||
$ ./cluster/kubectl.sh get services
|
$ ./cluster/kubectl.sh get services
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
|
|
||||||
$ ./cluster/kubectl.sh get replicationcontrollers
|
$ ./cluster/kubectl.sh get replicationcontrollers
|
||||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||||
@ -282,11 +282,8 @@ my-nginx-gr3hh 1/1 Running 0 1m
|
|||||||
my-nginx-xql4j 1/1 Running 0 1m
|
my-nginx-xql4j 1/1 Running 0 1m
|
||||||
|
|
||||||
$ ./cluster/kubectl.sh get services
|
$ ./cluster/kubectl.sh get services
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
|
my-nginx 10.0.0.1 <none> 80/TCP run=my-nginx 1h
|
||||||
$ ./cluster/kubectl.sh get replicationcontrollers
|
|
||||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
|
||||||
my-nginx my-nginx nginx run=my-nginx 3
|
|
||||||
```
|
```
|
||||||
|
|
||||||
We did not start any services, hence there are none listed. But we see three replicas displayed properly.
|
We did not start any services, hence there are none listed. But we see three replicas displayed properly.
|
||||||
|
@ -133,7 +133,7 @@ Check your Service:
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
$ kubectl get svc
|
$ kubectl get svc
|
||||||
NAME CLUSTER IP EXTERNAL IP PORT(S) SELECTOR AGE
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
kubernetes 10.179.240.1 <none> 443/TCP <none> 8d
|
kubernetes 10.179.240.1 <none> 443/TCP <none> 8d
|
||||||
nginxsvc 10.179.252.126 122.222.183.144 80/TCP,81/TCP,82/TCP run=nginx2 11m
|
nginxsvc 10.179.252.126 122.222.183.144 80/TCP,81/TCP,82/TCP run=nginx2 11m
|
||||||
```
|
```
|
||||||
@ -196,7 +196,7 @@ Kubernetes offers a DNS cluster addon Service that uses skydns to automatically
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
$ kubectl get services kube-dns --namespace=kube-system
|
$ kubectl get services kube-dns --namespace=kube-system
|
||||||
NAME CLUSTER IP EXTERNAL IP PORT(S) SELECTOR AGE
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
kube-dns 10.179.240.10 <none> 53/UDP,53/TCP k8s-app=kube-dns 8d
|
kube-dns 10.179.240.10 <none> 53/UDP,53/TCP k8s-app=kube-dns 8d
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -413,7 +413,7 @@ Lets now recreate the Service to use a cloud load balancer, just change the `Typ
|
|||||||
$ kubectl delete rc, svc -l app=nginx
|
$ kubectl delete rc, svc -l app=nginx
|
||||||
$ kubectl create -f ./nginx-app.yaml
|
$ kubectl create -f ./nginx-app.yaml
|
||||||
$ kubectl get svc nginxsvc
|
$ kubectl get svc nginxsvc
|
||||||
NAME CLUSTER IP EXTERNAL IP PORT(S) SELECTOR AGE
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
nginxsvc 10.179.252.126 162.222.184.144 80/TCP,81/TCP,82/TCP run=nginx2 13m
|
nginxsvc 10.179.252.126 162.222.184.144 80/TCP,81/TCP,82/TCP run=nginx2 13m
|
||||||
|
|
||||||
$ curl https://162.22.184.144 -k
|
$ curl https://162.22.184.144 -k
|
||||||
@ -421,7 +421,8 @@ $ curl https://162.22.184.144 -k
|
|||||||
<title>Welcome to nginx!</title>
|
<title>Welcome to nginx!</title>
|
||||||
```
|
```
|
||||||
|
|
||||||
You can generally tell the external IP of the service, since it will be the one that doesn't start with a `10.*`
|
The IP address in the `EXTERNAL_IP` column is the one that is available on the public internet. The `CLUSTER_IP` is only available inside your
|
||||||
|
cluster/private cloud network.
|
||||||
|
|
||||||
## What's next?
|
## What's next?
|
||||||
|
|
||||||
|
@ -209,16 +209,16 @@ walk-through - you can use your own `Service`'s details here.
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
$ kubectl expose rc hostnames --port=80 --target-port=9376
|
$ kubectl expose rc hostnames --port=80 --target-port=9376
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
hostnames app=hostnames app=hostnames 80/TCP
|
hostnames 10.0.0.1 <none> 80/TCP run=hostnames 1h
|
||||||
```
|
```
|
||||||
|
|
||||||
And read it back, just to be sure:
|
And read it back, just to be sure:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ kubectl get svc hostnames
|
$ kubectl get svc hostnames
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
hostnames app=hostnames app=hostnames 10.0.1.175 80/TCP
|
hostnames 10.0.0.1 <none> 80/TCP run=hostnames 1h
|
||||||
```
|
```
|
||||||
|
|
||||||
As before, this is the same as if you had started the `Service` with YAML:
|
As before, this is the same as if you had started the `Service` with YAML:
|
||||||
|
@ -74,8 +74,6 @@ CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
|||||||
nginx-app nginx-app nginx run=nginx-app 1
|
nginx-app nginx-app nginx run=nginx-app 1
|
||||||
# expose a port through with a service
|
# expose a port through with a service
|
||||||
$ kubectl expose rc nginx-app --port=80 --name=nginx-http
|
$ kubectl expose rc nginx-app --port=80 --name=nginx-http
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
|
||||||
nginx-http run=nginx-app run=nginx-app 80/TCP
|
|
||||||
```
|
```
|
||||||
|
|
||||||
With kubectl, we create a [replication controller](replication-controller.md) which will make sure that N pods are running nginx (where N is the number of replicas stated in the spec, which defaults to 1). We also create a [service](services.md) with a selector that matches the replication controller's selector. See the [Quick start](quick-start.md) for more information.
|
With kubectl, we create a [replication controller](replication-controller.md) which will make sure that N pods are running nginx (where N is the number of replicas stated in the spec, which defaults to 1). We also create a [service](services.md) with a selector that matches the replication controller's selector. See the [Quick start](quick-start.md) for more information.
|
||||||
|
@ -107,9 +107,9 @@ mypod 1/1 Running 0 1h
|
|||||||
|
|
||||||
$ kubectl create -f docs/user-guide/persistent-volumes/simpletest/service.json
|
$ kubectl create -f docs/user-guide/persistent-volumes/simpletest/service.json
|
||||||
$ kubectl get services
|
$ kubectl get services
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
frontendservice <none> name=frontendhttp 10.0.0.241 3000/TCP
|
frontendservice 10.0.0.241 <none> 3000/TCP name=frontendhttp 1d
|
||||||
kubernetes component=apiserver,provider=kubernetes <none> 10.0.0.2 443/TCP
|
kubernetes 10.0.0.2 <none> 443/TCP <none> 2d
|
||||||
```
|
```
|
||||||
|
|
||||||
## Next steps
|
## Next steps
|
||||||
|
@ -76,17 +76,20 @@ Through integration with some cloud providers (for example Google Compute Engine
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
$ kubectl expose rc my-nginx --port=80 --type=LoadBalancer
|
$ kubectl expose rc my-nginx --port=80 --type=LoadBalancer
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
my-nginx run=my-nginx run=my-nginx 80/TCP
|
my-nginx 10.179.240.1 <none> 80/TCP run=nginx 8d
|
||||||
```
|
```
|
||||||
|
|
||||||
To find the public IP address assigned to your application, execute:
|
To find the public IP address assigned to your application, execute:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ kubectl get svc my-nginx -o json | grep \"ip\"
|
$ kubectl get svc my-nginx
|
||||||
"ip": "130.111.122.213"
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
|
my-nginx 10.179.240.1 25.1.2.3 80/TCP run=nginx 8d
|
||||||
```
|
```
|
||||||
|
|
||||||
|
You may need to wait for a minute or two for the external ip address to be provisioned.
|
||||||
|
|
||||||
In order to access your nginx landing page, you also have to make sure that traffic from external IPs is allowed. Do this by opening a [firewall to allow traffic on port 80](services-firewalls.md).
|
In order to access your nginx landing page, you also have to make sure that traffic from external IPs is allowed. Do this by opening a [firewall to allow traffic on port 80](services-firewalls.md).
|
||||||
|
|
||||||
## Killing the application
|
## Killing the application
|
||||||
|
@ -113,8 +113,8 @@ Once that's up you can list the service in the cluster:
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
$ kubectl get service dns-backend
|
$ kubectl get service dns-backend
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
dns-backend <none> name=dns-backend 10.0.236.129 8000/TCP
|
dns-backend 10.0.2.3 <none> 8000/TCP name=dns-backend 1d
|
||||||
```
|
```
|
||||||
|
|
||||||
Again, repeat the same process for prod namespace:
|
Again, repeat the same process for prod namespace:
|
||||||
@ -123,8 +123,8 @@ Again, repeat the same process for prod namespace:
|
|||||||
$ kubectl config use-context prod
|
$ kubectl config use-context prod
|
||||||
$ kubectl create -f examples/cluster-dns/dns-backend-service.yaml
|
$ kubectl create -f examples/cluster-dns/dns-backend-service.yaml
|
||||||
$ kubectl get service dns-backend
|
$ kubectl get service dns-backend
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
dns-backend <none> name=dns-backend 10.0.35.246 8000/TCP
|
dns-backend 10.0.2.4 <none> 8000/TCP name=dns-backend 1d
|
||||||
```
|
```
|
||||||
|
|
||||||
### Step Four: Create client pod in one namespace
|
### Step Four: Create client pod in one namespace
|
||||||
|
@ -117,8 +117,8 @@ Services find the containers to load balance based on pod labels. The pod that y
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
$ kubectl get services
|
$ kubectl get services
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
redis-master app=redis,role=master app=redis,role=master 10.0.136.3 6379/TCP
|
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -183,9 +183,9 @@ Just like the master, we want to have a service to proxy connections to the read
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
$ kubectl get services
|
$ kubectl get services
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
redis-master app=redis,role=master app=redis,role=master 10.0.136.3 6379/TCP
|
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||||
redis-slave app=redis,role=slave app=redis,role=slave 10.0.21.92 6379/TCP
|
redis-slave 10.0.21.92 <none> 6379/TCP app-redis,role=slave 1h
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -246,11 +246,10 @@ Just like the others, we create a service to group the guestbook pods but this t
|
|||||||
|
|
||||||
```
|
```
|
||||||
$ kubectl get services
|
$ kubectl get services
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
guestbook app=guestbook app=guestbook 10.0.217.218 3000/TCP
|
guestbook 10.0.217.218 146.148.81.8 3000/TCP app=guestbook 1h
|
||||||
146.148.81.8
|
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||||
redis-master app=redis,role=master app=redis,role=master 10.0.136.3 6379/TCP
|
redis-slave 10.0.21.92 <none> 6379/TCP app-redis,role=slave 1h
|
||||||
redis-slave app=redis,role=slave app=redis,role=slave 10.0.21.92 6379/TCP
|
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -235,8 +235,9 @@ Then check the list of services, which should include the redis-master:
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
$ kubectl get services
|
$ kubectl get services
|
||||||
NAME LABELS SELECTOR IP PORT
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
redis-master name=redis-master name=redis-master 10.0.246.242 6379
|
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||||
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
This will cause all pods to see the redis master apparently running on <ip>:6379. A service can map an incoming port to any `targetPort` in the backend pod. Once created, the service proxy on each node is configured to set up a proxy on the specified port (in this case port 6379).
|
This will cause all pods to see the redis master apparently running on <ip>:6379. A service can map an incoming port to any `targetPort` in the backend pod. Once created, the service proxy on each node is configured to set up a proxy on the specified port (in this case port 6379).
|
||||||
@ -358,9 +359,9 @@ $ kubectl create -f examples/guestbook/redis-slave-service.yaml
|
|||||||
services/redis-slave
|
services/redis-slave
|
||||||
|
|
||||||
$ kubectl get services
|
$ kubectl get services
|
||||||
NAME LABELS SELECTOR IP PORT
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
redis-master name=redis-master name=redis-master 10.0.246.242 6379
|
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||||
redis-slave name=redis-slave name=redis-slave 10.0.72.62 6379
|
redis-slave 10.0.21.92 <none> 6379/TCP app-redis,role=slave 1h
|
||||||
```
|
```
|
||||||
|
|
||||||
### Step Five: Create the frontend replicated pods
|
### Step Five: Create the frontend replicated pods
|
||||||
@ -525,10 +526,10 @@ Then, list all your services again:
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
$ kubectl get services
|
$ kubectl get services
|
||||||
NAME LABELS SELECTOR IP PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
frontend name=frontend name=frontend 10.0.93.211 80/TCP
|
frontend 10.0.93.211 <none> 80/TCP name=frontend 1h
|
||||||
redis-master name=redis-master name=redis-master 10.0.246.242 6379/TCP
|
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||||
redis-slave name=redis-slave name=redis-slave 10.0.72.62 6379/TCP
|
redis-slave 10.0.21.92 <none> 6379/TCP app-redis,role=slave 1h
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
@ -544,11 +545,10 @@ If the `LoadBalancer` specification is used, it can take a short period for an e
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
$ kubectl get services
|
$ kubectl get services
|
||||||
NAME LABELS SELECTOR IP PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
frontend name=frontend name=frontend 10.0.93.211 80/TCP
|
frontend 10.0.93.211 130.211.188.51 80/TCP name=frontend 1h
|
||||||
130.211.135.84
|
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||||
redis-master name=redis-master name=redis-master 10.0.246.242 6379/TCP
|
redis-slave 10.0.21.92 <none> 6379/TCP app-redis,role=slave 1h
|
||||||
redis-slave name=redis-slave name=redis-slave 10.0.72.62 6379/TCP
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Once you've exposed the service to an external IP, visit the IP to see your guestbook in action. E.g., `http://130.211.188.51:80` in the example above.
|
Once you've exposed the service to an external IP, visit the IP to see your guestbook in action. E.g., `http://130.211.188.51:80` in the example above.
|
||||||
|
@ -302,8 +302,10 @@ $ kubectl get services
|
|||||||
|
|
||||||
Then, find the external IP for your WordPress service by running:
|
Then, find the external IP for your WordPress service by running:
|
||||||
|
|
||||||
```
|
```console
|
||||||
$ kubectl get services/wpfrontend --template="{{range .status.loadBalancer.ingress}} {{.ip}} {{end}}"
|
$ kubectl get services/wpfrontend
|
||||||
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
|
wpfrontend 10.0.0.2 1.2.3.4 80/TCP ... ...
|
||||||
```
|
```
|
||||||
|
|
||||||
or by listing the forwarding rules for your project:
|
or by listing the forwarding rules for your project:
|
||||||
|
@ -247,10 +247,12 @@ $ kubectl create -f examples/phabricator/phabricator-service.json
|
|||||||
phabricator
|
phabricator
|
||||||
```
|
```
|
||||||
|
|
||||||
To play with the service itself, find the external IP of the load balancer:
|
To play with the service itself, find the `EXTERNAL_IP` of the load balancer:
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
$ kubectl get services phabricator -o template --template='{{(index .status.loadBalancer.ingress 0).ip}}{{"\n"}}'
|
$ kubectl get services phabricator
|
||||||
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
|
phabricator 10.0.0.2 1.2.3.4 8080/TCP ... ...
|
||||||
```
|
```
|
||||||
|
|
||||||
and then visit port 80 of that IP address.
|
and then visit port 80 of that IP address.
|
||||||
|
@ -56,9 +56,9 @@ check out:
|
|||||||
|
|
||||||
```sh
|
```sh
|
||||||
$kubectl get services
|
$kubectl get services
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
|
rethinkdb-driver 10.0.27.114 <none> 28015/TCP db=rethinkdb 10m
|
||||||
[...]
|
[...]
|
||||||
rethinkdb-driver db=influxdb db=rethinkdb 10.0.27.114 28015/TCP
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Step 2**
|
**Step 2**
|
||||||
@ -115,13 +115,12 @@ kubectl create -f examples/rethinkdb/admin-service.yaml
|
|||||||
|
|
||||||
find the service
|
find the service
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
$kubectl get services
|
$kubectl get services
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
[...]
|
[...]
|
||||||
rethinkdb-admin db=influxdb db=rethinkdb,role=admin 10.0.131.19 8080/TCP
|
rethinkdb-admin 10.0.131.19 104.197.19.120 8080/TCP db=rethinkdb,role=admin 10m
|
||||||
104.197.19.120
|
rethinkdb-driver 10.0.27.114 <none> 28015/TCP db=rethinkdb 20m
|
||||||
rethinkdb-driver db=influxdb db=rethinkdb 10.0.27.114 28015/TCP
|
|
||||||
```
|
```
|
||||||
|
|
||||||
We request an external load balancer in the [admin-service.yaml](admin-service.yaml) file:
|
We request an external load balancer in the [admin-service.yaml](admin-service.yaml) file:
|
||||||
|
@ -91,11 +91,11 @@ zookeeper 1/1 Running 0 43s
|
|||||||
|
|
||||||
### Check to see if ZooKeeper is accessible
|
### Check to see if ZooKeeper is accessible
|
||||||
|
|
||||||
```sh
|
```console
|
||||||
$ kubectl get services
|
$ kubectl get services
|
||||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||||
kubernetes component=apiserver,provider=kubernetes <none> 10.254.0.2 443
|
zookeeper 10.254.139.141 <none> 2181/TCP name=zookeeper 10m
|
||||||
zookeeper name=zookeeper name=zookeeper 10.254.139.141 2181
|
kubernetes 10.0.0.2 <none> 443/TCP <none> 1d
|
||||||
|
|
||||||
$ echo ruok | nc 10.254.139.141 2181; echo
|
$ echo ruok | nc 10.254.139.141 2181; echo
|
||||||
imok
|
imok
|
||||||
|
@ -391,11 +391,9 @@ func ExamplePrintServiceWithNamespacesAndLabels() {
|
|||||||
fmt.Printf("Unexpected error: %v", err)
|
fmt.Printf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
// Output:
|
// Output:
|
||||||
// |NAMESPACE NAME LABELS SELECTOR IP(S) PORT(S) AGE L1|
|
// |NAMESPACE NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE L1|
|
||||||
// |ns1 svc1 l1=value s=magic 10.1.1.1 53/UDP 10y value|
|
// |ns1 svc1 10.1.1.1 unknown 53/UDP,53/TCP s=magic 10y value|
|
||||||
// | 53/TCP |
|
// |ns2 svc2 10.1.1.2 unknown 80/TCP,8080/TCP s=kazam 10y dolla-bill-yall|
|
||||||
// |ns2 svc2 l1=dolla-bill-yall s=kazam 10.1.1.2 80/TCP 10y dolla-bill-yall|
|
|
||||||
// | 8080/TCP |
|
|
||||||
// ||
|
// ||
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -259,7 +259,7 @@ func (h *HumanReadablePrinter) HandledResources() []string {
|
|||||||
var podColumns = []string{"NAME", "READY", "STATUS", "RESTARTS", "AGE"}
|
var podColumns = []string{"NAME", "READY", "STATUS", "RESTARTS", "AGE"}
|
||||||
var podTemplateColumns = []string{"TEMPLATE", "CONTAINER(S)", "IMAGE(S)", "PODLABELS"}
|
var podTemplateColumns = []string{"TEMPLATE", "CONTAINER(S)", "IMAGE(S)", "PODLABELS"}
|
||||||
var replicationControllerColumns = []string{"CONTROLLER", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "REPLICAS", "AGE"}
|
var replicationControllerColumns = []string{"CONTROLLER", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "REPLICAS", "AGE"}
|
||||||
var serviceColumns = []string{"NAME", "LABELS", "SELECTOR", "IP(S)", "PORT(S)", "AGE"}
|
var serviceColumns = []string{"NAME", "CLUSTER_IP", "EXTERNAL_IP", "PORT(S)", "SELECTOR", "AGE"}
|
||||||
var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"}
|
var endpointColumns = []string{"NAME", "ENDPOINTS", "AGE"}
|
||||||
var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"}
|
var nodeColumns = []string{"NAME", "LABELS", "STATUS", "AGE"}
|
||||||
var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"}
|
var eventColumns = []string{"FIRSTSEEN", "LASTSEEN", "COUNT", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"}
|
||||||
@ -557,29 +557,52 @@ func printReplicationControllerList(list *api.ReplicationControllerList, w io.Wr
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getServiceExternalIP(svc *api.Service) string {
|
||||||
|
switch svc.Spec.Type {
|
||||||
|
case api.ServiceTypeClusterIP:
|
||||||
|
return "<none>"
|
||||||
|
case api.ServiceTypeNodePort:
|
||||||
|
return "nodes"
|
||||||
|
case api.ServiceTypeLoadBalancer:
|
||||||
|
ingress := svc.Status.LoadBalancer.Ingress
|
||||||
|
result := []string{}
|
||||||
|
for i := range ingress {
|
||||||
|
if ingress[i].IP != "" {
|
||||||
|
result = append(result, ingress[i].IP)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strings.Join(result, ",")
|
||||||
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
func makePortString(ports []api.ServicePort) string {
|
||||||
|
pieces := make([]string, len(ports))
|
||||||
|
for ix := range ports {
|
||||||
|
port := &ports[ix]
|
||||||
|
pieces[ix] = fmt.Sprintf("%d/%s", port.Port, port.Protocol)
|
||||||
|
}
|
||||||
|
return strings.Join(pieces, ",")
|
||||||
|
}
|
||||||
|
|
||||||
func printService(svc *api.Service, w io.Writer, withNamespace bool, wide bool, columnLabels []string) error {
|
func printService(svc *api.Service, w io.Writer, withNamespace bool, wide bool, columnLabels []string) error {
|
||||||
name := svc.Name
|
name := svc.Name
|
||||||
namespace := svc.Namespace
|
namespace := svc.Namespace
|
||||||
|
|
||||||
ips := []string{svc.Spec.ClusterIP}
|
internalIP := svc.Spec.ClusterIP
|
||||||
|
externalIP := getServiceExternalIP(svc)
|
||||||
ingress := svc.Status.LoadBalancer.Ingress
|
|
||||||
for i := range ingress {
|
|
||||||
if ingress[i].IP != "" {
|
|
||||||
ips = append(ips, ingress[i].IP)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if withNamespace {
|
if withNamespace {
|
||||||
if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil {
|
if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%d/%s\t%s",
|
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s",
|
||||||
name,
|
name,
|
||||||
formatLabels(svc.Labels),
|
internalIP,
|
||||||
|
externalIP,
|
||||||
|
makePortString(svc.Spec.Ports),
|
||||||
formatLabels(svc.Spec.Selector),
|
formatLabels(svc.Spec.Selector),
|
||||||
ips[0], svc.Spec.Ports[0].Port, svc.Spec.Ports[0].Protocol,
|
|
||||||
translateTimestamp(svc.CreationTimestamp),
|
translateTimestamp(svc.CreationTimestamp),
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -587,33 +610,6 @@ func printService(svc *api.Service, w io.Writer, withNamespace bool, wide bool,
|
|||||||
if _, err := fmt.Fprint(w, appendLabels(svc.Labels, columnLabels)); err != nil {
|
if _, err := fmt.Fprint(w, appendLabels(svc.Labels, columnLabels)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
extraLinePrefix := "\t\t\t"
|
|
||||||
if withNamespace {
|
|
||||||
extraLinePrefix = "\t\t\t\t"
|
|
||||||
}
|
|
||||||
count := len(svc.Spec.Ports)
|
|
||||||
if len(ips) > count {
|
|
||||||
count = len(ips)
|
|
||||||
}
|
|
||||||
for i := 1; i < count; i++ {
|
|
||||||
ip := ""
|
|
||||||
if len(ips) > i {
|
|
||||||
ip = ips[i]
|
|
||||||
}
|
|
||||||
port := ""
|
|
||||||
if len(svc.Spec.Ports) > i {
|
|
||||||
port = fmt.Sprintf("%d/%s", svc.Spec.Ports[i].Port, svc.Spec.Ports[i].Protocol)
|
|
||||||
}
|
|
||||||
// Lay out additional ports.
|
|
||||||
if _, err := fmt.Fprintf(w, "%s%s\t%s", extraLinePrefix, ip, port); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if _, err := fmt.Fprint(w, appendLabelTabs(columnLabels)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -984,7 +980,7 @@ func appendLabels(itemLabels map[string]string, columnLabels []string) string {
|
|||||||
if il, ok := itemLabels[cl]; ok {
|
if il, ok := itemLabels[cl]; ok {
|
||||||
buffer.WriteString(fmt.Sprint(il))
|
buffer.WriteString(fmt.Sprint(il))
|
||||||
} else {
|
} else {
|
||||||
buffer.WriteString("<n/a>")
|
buffer.WriteString("<none>")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
buffer.WriteString("\n")
|
buffer.WriteString("\n")
|
||||||
|
@ -632,6 +632,7 @@ func TestPrintHumanReadableService(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Spec: api.ServiceSpec{
|
Spec: api.ServiceSpec{
|
||||||
ClusterIP: "1.2.3.4",
|
ClusterIP: "1.2.3.4",
|
||||||
|
Type: "LoadBalancer",
|
||||||
Ports: []api.ServicePort{
|
Ports: []api.ServicePort{
|
||||||
{
|
{
|
||||||
Port: 80,
|
Port: 80,
|
||||||
@ -674,6 +675,7 @@ func TestPrintHumanReadableService(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Spec: api.ServiceSpec{
|
Spec: api.ServiceSpec{
|
||||||
ClusterIP: "1.2.3.4",
|
ClusterIP: "1.2.3.4",
|
||||||
|
Type: "LoadBalancer",
|
||||||
Ports: []api.ServicePort{
|
Ports: []api.ServicePort{
|
||||||
{
|
{
|
||||||
Port: 80,
|
Port: 80,
|
||||||
@ -702,6 +704,7 @@ func TestPrintHumanReadableService(t *testing.T) {
|
|||||||
{
|
{
|
||||||
Spec: api.ServiceSpec{
|
Spec: api.ServiceSpec{
|
||||||
ClusterIP: "1.2.3.4",
|
ClusterIP: "1.2.3.4",
|
||||||
|
Type: "LoadBalancer",
|
||||||
Ports: []api.ServicePort{
|
Ports: []api.ServicePort{
|
||||||
{
|
{
|
||||||
Port: 80,
|
Port: 80,
|
||||||
@ -758,13 +761,9 @@ func TestPrintHumanReadableService(t *testing.T) {
|
|||||||
t.Errorf("expected to contain port: %s, but doesn't: %s", portSpec, output)
|
t.Errorf("expected to contain port: %s, but doesn't: %s", portSpec, output)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Max of # ports and (# public ip + cluster ip)
|
// Each service should print on one line
|
||||||
count := len(svc.Spec.Ports)
|
if 1 != strings.Count(output, "\n") {
|
||||||
if len(svc.Status.LoadBalancer.Ingress)+1 > count {
|
t.Errorf("expected a single newline, found %d", strings.Count(output, "\n"))
|
||||||
count = len(svc.Status.LoadBalancer.Ingress) + 1
|
|
||||||
}
|
|
||||||
if count != strings.Count(output, "\n") {
|
|
||||||
t.Errorf("expected %d newlines, found %d", count, strings.Count(output, "\n"))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user