Merge pull request #12621 from satnam6502/elasticsearch-example

Update Elasticsearch example to remove use of secrets
This commit is contained in:
Robert Bailey 2015-08-18 11:49:29 -07:00
commit a2caee0d33
7 changed files with 86 additions and 120 deletions

View File

@ -1,8 +1,8 @@
.PHONY: elasticsearch_discovery build push all .PHONY: elasticsearch_discovery build push all
TAG = 1.0 TAG = 1.1
build: build: elasticsearch_discovery
docker build -t kubernetes/elasticsearch:$(TAG) . docker build -t kubernetes/elasticsearch:$(TAG) .
push: push:

View File

@ -42,9 +42,7 @@ image detects other Elasticsearch [pods](../../docs/user-guide/pods.md) running
label selector. The detected instances are used to form a list of peer hosts which label selector. The detected instances are used to form a list of peer hosts which
are used as part of the unicast discovery mechanism for Elasticsearch. The detection are used as part of the unicast discovery mechanism for Elasticsearch. The detection
of the peer nodes is done by a program which communicates with the Kubernetes API of the peer nodes is done by a program which communicates with the Kubernetes API
server to get a list of matching Elasticsearch pods. To enable authenticated server to get a list of matching Elasticsearch pods.
communication this image needs a [secret](../../docs/user-guide/secrets.md) to be mounted at `/etc/apiserver-secret`
with the basic authentication username and password.
Here is an example replication controller specification that creates 4 instances of Elasticsearch. Here is an example replication controller specification that creates 4 instances of Elasticsearch.
@ -69,7 +67,7 @@ spec:
spec: spec:
containers: containers:
- name: es - name: es
image: kubernetes/elasticsearch:1.0 image: kubernetes/elasticsearch:1.1
env: env:
- name: "CLUSTER_NAME" - name: "CLUSTER_NAME"
value: "mytunes-db" value: "mytunes-db"
@ -82,14 +80,6 @@ spec:
containerPort: 9200 containerPort: 9200
- name: es-transport - name: es-transport
containerPort: 9300 containerPort: 9300
volumeMounts:
- name: apiserver-secret
mountPath: /etc/apiserver-secret
readOnly: true
volumes:
- name: apiserver-secret
secret:
secretName: apiserver-secret
``` ```
[Download example](music-rc.yaml) [Download example](music-rc.yaml)
@ -104,55 +94,24 @@ The `NAMESPACE` variable identifies the namespace
to be used to search for Elasticsearch pods and this should be the same as the namespace specified to be used to search for Elasticsearch pods and this should be the same as the namespace specified
for the replication controller (in this case `mytunes`). for the replication controller (in this case `mytunes`).
Before creating pods with the replication controller a secret containing the bearer authentication token
should be set up.
<!-- BEGIN MUNGE: EXAMPLE apiserver-secret.yaml --> Replace `NAMESPACE` with the actual namespace to be used. In this example we shall use
the namespace `mytunes`.
```yaml ```yaml
kind: Namespace
apiVersion: v1 apiVersion: v1
kind: Secret
metadata: metadata:
name: apiserver-secret name: mytunes
namespace: NAMESPACE labels:
data: name: mytunes
token: "TOKEN"
``` ```
[Download example](apiserver-secret.yaml) First, let's create the namespace:
<!-- END MUNGE: EXAMPLE apiserver-secret.yaml -->
Replace `NAMESPACE` with the actual namespace to be used and `TOKEN` with the basic64 encoded
versions of the bearer token reported by `kubectl config view` e.g.
```console ```console
$ kubectl config view $ kubectl create -f examples/elasticsearch/mytunes-namespace.yaml
... namespaces/mytunes
- name: kubernetes-logging_kubernetes-basic-auth
...
token: yGlDcMvSZPX4PyP0Q5bHgAYgi1iyEHv2
...
$ echo yGlDcMvSZPX4PyP0Q5bHgAYgi1iyEHv2 | base64
eUdsRGNNdlNaUFg0UHlQMFE1YkhnQVlnaTFpeUVIdjIK=
```
resulting in the file:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: apiserver-secret
namespace: mytunes
data:
token: "eUdsRGNNdlNaUFg0UHlQMFE1YkhnQVlnaTFpeUVIdjIK="
```
which can be used to create the secret in your namespace:
```console
kubectl create -f examples/elasticsearch/apiserver-secret.yaml --namespace=mytunes
secrets/apiserver-secret
``` ```
Now you are ready to create the replication controller which will then create the pods: Now you are ready to create the replication controller which will then create the pods:
@ -162,6 +121,19 @@ $ kubectl create -f examples/elasticsearch/music-rc.yaml --namespace=mytunes
replicationcontrollers/music-db replicationcontrollers/music-db
``` ```
Let's check to see if the replication controller and pods are running:
```console
$ kubectl get rc,pods --namespace=mytunes
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
music-db es kubernetes/elasticsearch:1.1 name=music-db 4
NAME READY STATUS RESTARTS AGE
music-db-5p46b 1/1 Running 0 34s
music-db-8re0f 1/1 Running 0 34s
music-db-eq8j0 1/1 Running 0 34s
music-db-uq5px 1/1 Running 0 34s
```
It's also useful to have a [service](../../docs/user-guide/services.md) with an load balancer for accessing the Elasticsearch It's also useful to have a [service](../../docs/user-guide/services.md) with an load balancer for accessing the Elasticsearch
cluster. cluster.
@ -195,29 +167,50 @@ $ kubectl create -f examples/elasticsearch/music-service.yaml --namespace=mytune
services/music-server services/music-server
``` ```
Let's check the status of the service:
```console
$ kubectl get service --namespace=mytunes
NAME LABELS SELECTOR IP(S) PORT(S)
music-server name=music-db name=music-db 10.0.185.179 9200/TCP
```
Although this service has an IP address `10.0.185.179` internal to the cluster we don't yet have
an external IP address provisioned. Let's wait a bit and try again...
```console
$ kubectl get service --namespace=mytunes
NAME LABELS SELECTOR IP(S) PORT(S)
music-server name=music-db name=music-db 10.0.185.179 9200/TCP
104.197.114.130
```
Now we have an external IP address `104.197.114.130` available for accessing the service
from outside the cluster.
Let's see what we've got: Let's see what we've got:
```console ```console
$ kubectl get pods,rc,services,secrets --namespace=mytunes $ kubectl get pods,rc,services --namespace=mytunes
NAME READY STATUS RESTARTS AGE NAME READY STATUS RESTARTS AGE
music-db-cl4hw 1/1 Running 0 27m music-db-5p46b 1/1 Running 0 7m
music-db-x8dbq 1/1 Running 0 27m music-db-8re0f 1/1 Running 0 7m
music-db-xkebl 1/1 Running 0 27m music-db-eq8j0 1/1 Running 0 7m
music-db-ycjim 1/1 Running 0 27m music-db-uq5px 1/1 Running 0 7m
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
music-db es kubernetes/elasticsearch:1.0 name=music-db 4 music-db es kubernetes/elasticsearch:1.1 name=music-db 4
NAME LABELS SELECTOR IP(S) PORT(S) NAME LABELS SELECTOR IP(S) PORT(S)
music-server name=music-db name=music-db 10.0.45.177 9200/TCP music-server name=music-db name=music-db 10.0.185.179 9200/TCP
104.197.12.157 104.197.114.130
NAME TYPE DATA NAME TYPE DATA
apiserver-secret Opaque 1 default-token-gcilu kubernetes.io/service-account-token 2
``` ```
This shows 4 instances of Elasticsearch running. After making sure that port 9200 is accessible for this cluster (e.g. using a firewall rule for Google Compute Engine) we can make queries via the service which will be fielded by the matching Elasticsearch pods. This shows 4 instances of Elasticsearch running. After making sure that port 9200 is accessible for this cluster (e.g. using a firewall rule for Google Compute Engine) we can make queries via the service which will be fielded by the matching Elasticsearch pods.
```console ```console
$ curl 104.197.12.157:9200 $ curl 104.197.114.130:9200
{ {
"status" : 200, "status" : 200,
"name" : "Warpath", "name" : "Warpath",
@ -231,7 +224,7 @@ $ curl 104.197.12.157:9200
}, },
"tagline" : "You Know, for Search" "tagline" : "You Know, for Search"
} }
$ curl 104.197.12.157:9200 $ curl 104.197.114.130:9200
{ {
"status" : 200, "status" : 200,
"name" : "Callisto", "name" : "Callisto",
@ -250,7 +243,7 @@ $ curl 104.197.12.157:9200
We can query the nodes to confirm that an Elasticsearch cluster has been formed. We can query the nodes to confirm that an Elasticsearch cluster has been formed.
```console ```console
$ curl 104.197.12.157:9200/_nodes?pretty=true $ curl 104.197.114.130:9200/_nodes?pretty=true
{ {
"cluster_name" : "mytunes-db", "cluster_name" : "mytunes-db",
"nodes" : { "nodes" : {
@ -299,22 +292,22 @@ $ kubectl scale --replicas=10 replicationcontrollers music-db --namespace=mytune
scaled scaled
$ kubectl get pods --namespace=mytunes $ kubectl get pods --namespace=mytunes
NAME READY STATUS RESTARTS AGE NAME READY STATUS RESTARTS AGE
music-db-063vy 1/1 Running 0 38s music-db-0n8rm 0/1 Running 0 9s
music-db-5ej4e 1/1 Running 0 38s music-db-4izba 1/1 Running 0 9s
music-db-dl43y 1/1 Running 0 38s music-db-5dqes 0/1 Running 0 9s
music-db-lw1lo 1/1 Running 0 1m music-db-5p46b 1/1 Running 0 10m
music-db-s8hq2 1/1 Running 0 38s music-db-8re0f 1/1 Running 0 10m
music-db-t98iw 1/1 Running 0 38s music-db-eq8j0 1/1 Running 0 10m
music-db-u1ru3 1/1 Running 0 38s music-db-p9ajw 0/1 Running 0 9s
music-db-wnss2 1/1 Running 0 1m music-db-p9u1k 1/1 Running 0 9s
music-db-x7j2w 1/1 Running 0 1m music-db-rav1q 0/1 Running 0 9s
music-db-zjqyv 1/1 Running 0 1m music-db-uq5px 1/1 Running 0 10m
``` ```
Let's check to make sure that these 10 nodes are part of the same Elasticsearch cluster: Let's check to make sure that these 10 nodes are part of the same Elasticsearch cluster:
```console ```console
$ curl 104.197.12.157:9200/_nodes?pretty=true | grep name $ curl 104.197.114.130:9200/_nodes?pretty=true | grep name
"cluster_name" : "mytunes-db", "cluster_name" : "mytunes-db",
"name" : "Killraven", "name" : "Killraven",
"name" : "Killraven", "name" : "Killraven",
@ -371,4 +364,4 @@ $ curl 104.197.12.157:9200/_nodes?pretty=true | grep name
<!-- BEGIN MUNGE: GENERATED_ANALYTICS --> <!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/elasticsearch/README.md?pixel)]() [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/examples/elasticsearch/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS --> <!-- END MUNGE: GENERATED_ANALYTICS -->

View File

@ -1,8 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: apiserver-secret
namespace: NAMESPACE
data:
token: "TOKEN"

View File

@ -19,7 +19,6 @@ package main
import ( import (
"flag" "flag"
"fmt" "fmt"
"os"
"strings" "strings"
"time" "time"
@ -31,8 +30,6 @@ import (
) )
var ( var (
token = flag.String("token", "", "Bearer token for authentication to the API server.")
server = flag.String("server", "", "The address and port of the Kubernetes API server")
namespace = flag.String("namespace", api.NamespaceDefault, "The namespace containing Elasticsearch pods") namespace = flag.String("namespace", api.NamespaceDefault, "The namespace containing Elasticsearch pods")
selector = flag.String("selector", "", "Selector (label query) for selecting Elasticsearch pods") selector = flag.String("selector", "", "Selector (label query) for selecting Elasticsearch pods")
) )
@ -40,26 +37,11 @@ var (
func main() { func main() {
flag.Parse() flag.Parse()
glog.Info("Elasticsearch discovery") glog.Info("Elasticsearch discovery")
apiServer := *server
if apiServer == "" {
kubernetesService := os.Getenv("KUBERNETES_SERVICE_HOST")
if kubernetesService == "" {
glog.Fatalf("Please specify the Kubernetes server with --server")
}
apiServer = fmt.Sprintf("https://%s:%s", kubernetesService, os.Getenv("KUBERNETES_SERVICE_PORT"))
}
glog.Infof("Server: %s", apiServer)
glog.Infof("Namespace: %q", *namespace) glog.Infof("Namespace: %q", *namespace)
glog.Infof("selector: %q", *selector) glog.Infof("selector: %q", *selector)
config := client.Config{ c, err := client.NewInCluster()
Host: apiServer,
BearerToken: *token,
Insecure: true,
}
c, err := client.New(&config)
if err != nil { if err != nil {
glog.Fatalf("Failed to make client: %v", err) glog.Fatalf("Failed to make client: %v", err)
} }

View File

@ -16,7 +16,7 @@ spec:
spec: spec:
containers: containers:
- name: es - name: es
image: kubernetes/elasticsearch:1.0 image: kubernetes/elasticsearch:1.1
env: env:
- name: "CLUSTER_NAME" - name: "CLUSTER_NAME"
value: "mytunes-db" value: "mytunes-db"
@ -29,11 +29,4 @@ spec:
containerPort: 9200 containerPort: 9200
- name: es-transport - name: es-transport
containerPort: 9300 containerPort: 9300
volumeMounts:
- name: apiserver-secret
mountPath: /etc/apiserver-secret
readOnly: true
volumes:
- name: apiserver-secret
secret:
secretName: apiserver-secret

View File

@ -0,0 +1,6 @@
kind: Namespace
apiVersion: v1
metadata:
name: mytunes
labels:
name: mytunes

View File

@ -237,9 +237,9 @@ func TestExampleObjectSchemas(t *testing.T) {
"dapi-pod": &api.Pod{}, "dapi-pod": &api.Pod{},
}, },
"../examples/elasticsearch": { "../examples/elasticsearch": {
"apiserver-secret": nil, "mytunes-namespace": &api.Namespace{},
"music-rc": &api.ReplicationController{}, "music-rc": &api.ReplicationController{},
"music-service": &api.Service{}, "music-service": &api.Service{},
}, },
"../examples/explorer": { "../examples/explorer": {
"pod": &api.Pod{}, "pod": &api.Pod{},