From 180798cfa4363e024d178e112bd2b6ddcfd61e54 Mon Sep 17 00:00:00 2001 From: Janet Kuo Date: Mon, 20 Jul 2015 15:46:20 -0700 Subject: [PATCH] Use example syncer tags instead of hard-coded examples in doc --- .../salt/fluentd-gcp/fluentd-gcp.yaml | 4 +- cmd/mungedocs/example_syncer.go | 4 +- docs/admin/namespaces/README.md | 5 ++ docs/getting-started-guides/logging.md | 44 ++++++--- docs/user-guide/downward-api.md | 5 ++ docs/user-guide/downward-api/dapi-pod.yaml | 4 +- docs/user-guide/logging.md | 25 +++--- docs/user-guide/pod.yaml | 2 +- docs/user-guide/simple-yaml.md | 10 +++ docs/user-guide/walkthrough/README.md | 5 ++ docs/user-guide/walkthrough/k8s201.md | 20 +++++ docs/user-guide/walkthrough/pod-redis.yaml | 4 +- .../pod-with-http-healthcheck.yaml | 28 +++--- .../walkthrough/replication-controller.yaml | 16 ++-- docs/user-guide/walkthrough/service.yaml | 12 +-- examples/cassandra/README.md | 89 ++++++++++++------- examples/celery-rabbitmq/README.md | 25 +++++- examples/elasticsearch/README.md | 22 ++++- examples/guestbook/README.md | 32 ++++++- examples/hazelcast/README.md | 16 +++- examples/mysql-wordpress-pd/README.md | 21 ++++- examples/phabricator/README.md | 17 +++- 22 files changed, 306 insertions(+), 104 deletions(-) diff --git a/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml b/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml index 6ea7adb2047..2e8087f8e47 100644 --- a/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml +++ b/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml @@ -12,8 +12,8 @@ spec: cpu: 100m memory: 200Mi env: - - name: "FLUENTD_ARGS" - value: "-qq" + - name: FLUENTD_ARGS + value: -qq volumeMounts: - name: varlog mountPath: /varlog diff --git a/cmd/mungedocs/example_syncer.go b/cmd/mungedocs/example_syncer.go index e0a48a249d9..b00c385c35f 100644 --- a/cmd/mungedocs/example_syncer.go +++ b/cmd/mungedocs/example_syncer.go @@ -63,7 +63,9 @@ func exampleContent(filePath, linkPath, fileType string) (content string, err er if err != nil { return content, err } - content = fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, string(dat), linkPath) + // remove leading and trailing spaces and newlines + trimmedFileContent := strings.TrimSpace(string(dat)) + content = fmt.Sprintf("\n```%s\n%s\n```\n\n[Download example](%s)", fileType, trimmedFileContent, linkPath) return } diff --git a/docs/admin/namespaces/README.md b/docs/admin/namespaces/README.md index 06819bdcf4b..806b888eba2 100644 --- a/docs/admin/namespaces/README.md +++ b/docs/admin/namespaces/README.md @@ -83,6 +83,8 @@ Let's create two new namespaces to hold our work. Use the file [`namespace-dev.json`](namespace-dev.json) which describes a development namespace: + + ```json { "kind": "Namespace", @@ -96,6 +98,9 @@ Use the file [`namespace-dev.json`](namespace-dev.json) which describes a develo } ``` +[Download example](namespace-dev.json) + + Create the development namespace using kubectl. ```console diff --git a/docs/getting-started-guides/logging.md b/docs/getting-started-guides/logging.md index 63715f408a6..fabcbf93df8 100644 --- a/docs/getting-started-guides/logging.md +++ b/docs/getting-started-guides/logging.md @@ -58,20 +58,24 @@ This diagram shows four nodes created on a Google Compute Engine cluster with th To help explain how cluster level logging works let’s start off with a synthetic log generator pod specification [counter-pod.yaml](../../examples/blog-logging/counter-pod.yaml): + + ```yaml - apiVersion: v1 - kind: Pod - metadata: - name: counter - namespace: default - spec: - containers: - - name: count - image: ubuntu:14.04 - args: [bash, -c, - 'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done'] +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: ubuntu:14.04 + args: [bash, -c, + 'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done'] ``` +[Download example](../../examples/blog-logging/counter-pod.yaml) + + This pod specification has one container which runs a bash script when the container is born. This script simply writes out the value of a counter and the date once per second and runs indefinitely. Let’s create the pod in the default namespace. @@ -152,7 +156,9 @@ We’ve lost the log lines from the first invocation of the container in this po When a Kubernetes cluster is created with logging to Google Cloud Logging enabled, the system creates a pod called `fluentd-cloud-logging` on each node of the cluster to collect Docker container logs. These pods were shown at the start of this blog article in the response to the first get pods command. -This log collection pod has a specification which looks something like this [fluentd-gcp.yaml](http://releases.k8s.io/HEAD/cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml): +This log collection pod has a specification which looks something like this: + + ```yaml apiVersion: v1 @@ -163,19 +169,31 @@ metadata: spec: containers: - name: fluentd-cloud-logging - image: gcr.io/google_containers/fluentd-gcp:1.6 + image: gcr.io/google_containers/fluentd-gcp:1.9 + resources: + limits: + cpu: 100m + memory: 200Mi env: - name: FLUENTD_ARGS value: -qq volumeMounts: + - name: varlog + mountPath: /varlog - name: containers mountPath: /var/lib/docker/containers volumes: + - name: varlog + hostPath: + path: /var/log - name: containers hostPath: path: /var/lib/docker/containers ``` +[Download example](../../cluster/saltbase/salt/fluentd-gcp/fluentd-gcp.yaml) + + This pod specification maps the directory on the host containing the Docker log files, `/var/lib/docker/containers`, to a directory inside the container which has the same path. The pod runs one image, `gcr.io/google_containers/fluentd-gcp:1.6`, which is configured to collect the Docker log files from the logs directory and ingest them into Google Cloud Logging. One instance of this pod runs on each node of the cluster. Kubernetes will notice if this pod fails and automatically restart it. We can click on the Logs item under the Monitoring section of the Google Developer Console and select the logs for the counter container, which will be called kubernetes.counter_default_count. This identifies the name of the pod (counter), the namespace (default) and the name of the container (count) for which the log collection occurred. Using this name we can select just the logs for our counter container from the drop down menu: diff --git a/docs/user-guide/downward-api.md b/docs/user-guide/downward-api.md index a0f976b0011..5da6fd68846 100644 --- a/docs/user-guide/downward-api.md +++ b/docs/user-guide/downward-api.md @@ -80,6 +80,8 @@ environment variable they want. This is an example of a pod that consumes its name and namespace via the downward API: + + ```yaml apiVersion: v1 kind: Pod @@ -102,6 +104,9 @@ spec: restartPolicy: Never ``` +[Download example](downward-api/dapi-pod.yaml) + + Some more thorough examples: * [environment variables](environment-guide/) * [downward API](downward-api/) diff --git a/docs/user-guide/downward-api/dapi-pod.yaml b/docs/user-guide/downward-api/dapi-pod.yaml index b16f6e5f5a6..e02ce6e78fd 100644 --- a/docs/user-guide/downward-api/dapi-pod.yaml +++ b/docs/user-guide/downward-api/dapi-pod.yaml @@ -8,11 +8,11 @@ spec: image: gcr.io/google_containers/busybox command: [ "/bin/sh", "-c", "env" ] env: - - name: POD_NAME + - name: MY_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - - name: POD_NAMESPACE + - name: MY_POD_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace diff --git a/docs/user-guide/logging.md b/docs/user-guide/logging.md index 9f62c599041..484ae2b0af7 100644 --- a/docs/user-guide/logging.md +++ b/docs/user-guide/logging.md @@ -43,19 +43,24 @@ The logs of a running container may be fetched using the command `kubectl logs`. this pod specification [counter-pod.yaml](../../examples/blog-logging/counter-pod.yaml), which has a container which writes out some text to standard output every second. (You can find different pod specifications [here](logging-demo/).) + + ```yaml - apiVersion: v1 - kind: Pod - metadata: - name: counter - spec: - containers: - - name: count - image: ubuntu:14.04 - args: [bash, -c, - 'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done'] +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: ubuntu:14.04 + args: [bash, -c, + 'for ((i = 0; ; i++)); do echo "$i: $(date)"; sleep 1; done'] ``` +[Download example](../../examples/blog-logging/counter-pod.yaml) + + we can run the pod: ```console diff --git a/docs/user-guide/pod.yaml b/docs/user-guide/pod.yaml index 5b7b1efdcbf..7053af0be4b 100644 --- a/docs/user-guide/pod.yaml +++ b/docs/user-guide/pod.yaml @@ -3,7 +3,7 @@ kind: Pod metadata: name: nginx labels: - name: nginx + app: nginx spec: containers: - name: nginx diff --git a/docs/user-guide/simple-yaml.md b/docs/user-guide/simple-yaml.md index be48d3c49c3..1c9795724e1 100644 --- a/docs/user-guide/simple-yaml.md +++ b/docs/user-guide/simple-yaml.md @@ -47,6 +47,8 @@ $ kubectl create -f ./pod.yaml Where pod.yaml contains something like: + + ```yaml apiVersion: v1 kind: Pod @@ -62,6 +64,9 @@ spec: - containerPort: 80 ``` +[Download example](pod.yaml) + + You can see your cluster's pods: ```console @@ -87,6 +92,8 @@ $ kubectl create -f ./replication.yaml Where `replication.yaml` contains: + + ```yaml apiVersion: v1 kind: ReplicationController @@ -109,6 +116,9 @@ spec: - containerPort: 80 ``` +[Download example](replication.yaml) + + To delete the replication controller (and the pods it created): ```console diff --git a/docs/user-guide/walkthrough/README.md b/docs/user-guide/walkthrough/README.md index f7e2a5fe045..62f298f4976 100644 --- a/docs/user-guide/walkthrough/README.md +++ b/docs/user-guide/walkthrough/README.md @@ -146,6 +146,8 @@ For this example we'll be creating a Redis pod with a named volume and volume mo Example Redis pod definition with a persistent storage volume ([pod-redis.yaml](pod-redis.yaml)): + + ```yaml apiVersion: v1 kind: Pod @@ -163,6 +165,9 @@ spec: emptyDir: {} ``` +[Download example](pod-redis.yaml) + + Notes: - The volume mount name is a reference to a specific empty dir volume. - The volume mount path is the path to mount the empty dir volume within the container. diff --git a/docs/user-guide/walkthrough/k8s201.md b/docs/user-guide/walkthrough/k8s201.md index 108297358ca..297a4f69a70 100644 --- a/docs/user-guide/walkthrough/k8s201.md +++ b/docs/user-guide/walkthrough/k8s201.md @@ -69,6 +69,8 @@ To add a label, add a labels section under metadata in the pod definition: For example, here is the nginx pod definition with labels ([pod-nginx-with-label.yaml](pod-nginx-with-label.yaml)): + + ```yaml apiVersion: v1 kind: Pod @@ -84,6 +86,9 @@ spec: - containerPort: 80 ``` +[Download example](pod-nginx-with-label.yaml) + + Create the labeled pod ([pod-nginx-with-label.yaml](pod-nginx-with-label.yaml)): ```console @@ -108,6 +113,8 @@ Replication controllers are the objects to answer these questions. A replicatio For example, here is a replication controller that instantiates two nginx pods ([replication-controller.yaml](replication-controller.yaml)): + + ```yaml apiVersion: v1 kind: ReplicationController @@ -135,6 +142,9 @@ spec: - containerPort: 80 ``` +[Download example](replication-controller.yaml) + + #### Replication Controller Management Create an nginx replication controller ([replication-controller.yaml](replication-controller.yaml)): @@ -164,6 +174,8 @@ Once you have a replicated set of pods, you need an abstraction that enables con For example, here is a service that balances across the pods created in the previous nginx replication controller example ([service.yaml](service.yaml)): + + ```yaml apiVersion: v1 kind: Service @@ -183,6 +195,9 @@ spec: app: nginx ``` +[Download example](service.yaml) + + #### Service Management Create an nginx service ([service.yaml](service.yaml)): @@ -271,6 +286,8 @@ The container health checks are configured in the `livenessProbe` section of you Here is an example config for a pod with an HTTP health check ([pod-with-http-healthcheck.yaml](pod-with-http-healthcheck.yaml)): + + ```yaml apiVersion: v1 kind: Pod @@ -294,6 +311,9 @@ spec: - containerPort: 80 ``` +[Download example](pod-with-http-healthcheck.yaml) + + For more information about health checking, see [Container Probes](../pod-states.md#container-probes). diff --git a/docs/user-guide/walkthrough/pod-redis.yaml b/docs/user-guide/walkthrough/pod-redis.yaml index 4b8613347ba..f000658079b 100644 --- a/docs/user-guide/walkthrough/pod-redis.yaml +++ b/docs/user-guide/walkthrough/pod-redis.yaml @@ -10,5 +10,5 @@ spec: - name: redis-persistent-storage mountPath: /data/redis volumes: - - name: redis-persistent-storage - emptyDir: {} + - name: redis-persistent-storage + emptyDir: {} diff --git a/docs/user-guide/walkthrough/pod-with-http-healthcheck.yaml b/docs/user-guide/walkthrough/pod-with-http-healthcheck.yaml index b11a5471e03..c697eba1dfe 100644 --- a/docs/user-guide/walkthrough/pod-with-http-healthcheck.yaml +++ b/docs/user-guide/walkthrough/pod-with-http-healthcheck.yaml @@ -4,17 +4,17 @@ metadata: name: pod-with-healthcheck spec: containers: - - name: nginx - image: nginx - # defines the health checking - livenessProbe: - # an http probe - httpGet: - path: /_status/healthz - port: 80 - # length of time to wait for a pod to initialize - # after pod startup, before applying health checking - initialDelaySeconds: 30 - timeoutSeconds: 1 - ports: - - containerPort: 80 + - name: nginx + image: nginx + # defines the health checking + livenessProbe: + # an http probe + httpGet: + path: /_status/healthz + port: 80 + # length of time to wait for a pod to initialize + # after pod startup, before applying health checking + initialDelaySeconds: 30 + timeoutSeconds: 1 + ports: + - containerPort: 80 diff --git a/docs/user-guide/walkthrough/replication-controller.yaml b/docs/user-guide/walkthrough/replication-controller.yaml index 013a7a041f9..e0dd6f73dde 100644 --- a/docs/user-guide/walkthrough/replication-controller.yaml +++ b/docs/user-guide/walkthrough/replication-controller.yaml @@ -4,21 +4,21 @@ metadata: name: nginx-controller spec: replicas: 2 - # selector identifies the set of pods that this + # selector identifies the set of Pods that this # replication controller is responsible for managing selector: - name: nginx - # template defines the 'cookie cutter' used for creating + app: nginx + # podTemplate defines the 'cookie cutter' used for creating # new pods when necessary template: metadata: labels: # Important: these labels need to match the selector above # The api server enforces this constraint. - name: nginx + app: nginx spec: containers: - - name: nginx - image: nginx - ports: - - containerPort: 80 + - name: nginx + image: nginx + ports: + - containerPort: 80 diff --git a/docs/user-guide/walkthrough/service.yaml b/docs/user-guide/walkthrough/service.yaml index e3335367afa..f2304d3d7f9 100644 --- a/docs/user-guide/walkthrough/service.yaml +++ b/docs/user-guide/walkthrough/service.yaml @@ -4,13 +4,13 @@ metadata: name: nginx-service spec: ports: - - port: 8000 # the port that this service should serve on - # the container on each pod to connect to, can be a name - # (e.g. 'www') or a number (e.g. 80) - targetPort: 80 - protocol: TCP + - port: 8000 # the port that this service should serve on + # the container on each pod to connect to, can be a name + # (e.g. 'www') or a number (e.g. 80) + targetPort: 80 + protocol: TCP # just like the selector in the replication controller, # but this time it identifies the set of pods to load balance # traffic to. selector: - name: nginx + app: nginx diff --git a/examples/cassandra/README.md b/examples/cassandra/README.md index b11577fe813..aa3d73f8df9 100644 --- a/examples/cassandra/README.md +++ b/examples/cassandra/README.md @@ -52,44 +52,57 @@ This is a somewhat long tutorial. If you want to jump straight to the "do it no In Kubernetes, the atomic unit of an application is a [_Pod_](../../docs/user-guide/pods.md). A Pod is one or more containers that _must_ be scheduled onto the same host. All containers in a pod share a network namespace, and may optionally share mounted volumes. In this simple case, we define a single container running Cassandra for our pod: + + ```yaml apiVersion: v1 -kind: Pod +kind: ReplicationController metadata: labels: name: cassandra name: cassandra spec: - containers: - - name: cassandra - image: gcr.io/google_containers/cassandra:v5 - args: - - /run.sh - resources: - limits: - cpu: "0.5" - ports: - - name: cql - containerPort: 9042 - - name: thrift - containerPort: 9160 - volumeMounts: - - name: data - mountPath: /cassandra_data - env: - - name: MAX_HEAP_SIZE - value: 512M - - name: HEAP_NEWSIZE - value: 100M - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumes: - - name: data - emptyDir: {} + replicas: 1 + selector: + name: cassandra + template: + metadata: + labels: + name: cassandra + spec: + containers: + - command: + - /run.sh + resources: + limits: + cpu: 0.1 + env: + - name: MAX_HEAP_SIZE + value: 512M + - name: HEAP_NEWSIZE + value: 100M + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: gcr.io/google_containers/cassandra:v6 + name: cassandra + ports: + - containerPort: 9042 + name: cql + - containerPort: 9160 + name: thrift + volumeMounts: + - mountPath: /cassandra_data + name: data + volumes: + - name: data + emptyDir: {} ``` +[Download example](cassandra-controller.yaml) + + There are a few things to note in this description. First is that we are running the ```kubernetes/cassandra``` image. This is a standard Cassandra installation on top of Debian. However it also adds a custom [```SeedProvider```](https://svn.apache.org/repos/asf/cassandra/trunk/src/java/org/apache/cassandra/locator/SeedProvider.java) to Cassandra. In Cassandra, a ```SeedProvider``` bootstraps the gossip protocol that Cassandra uses to find other nodes. The ```KubernetesSeedProvider``` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later) You may also note that we are setting some Cassandra parameters (```MAX_HEAP_SIZE``` and ```HEAP_NEWSIZE```) and adding information about the [namespace](../../docs/user-guide/namespaces.md). We also tell Kubernetes that the container exposes both the ```CQL``` and ```Thrift``` API ports. Finally, we tell the cluster manager that we need 0.5 cpu (0.5 core). @@ -102,6 +115,8 @@ In Kubernetes a _[Service](../../docs/user-guide/services.md)_ describes a set o Here is the service description: + + ```yaml apiVersion: v1 kind: Service @@ -116,6 +131,9 @@ spec: name: cassandra ``` +[Download example](cassandra-service.yaml) + + The important thing to note here is the ```selector```. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is ```name=cassandra```. If you look back at the Pod specification above, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service. Create this service as follows: @@ -175,6 +193,8 @@ In Kubernetes a _[Replication Controller](../../docs/user-guide/replication-cont Replication controllers will "adopt" existing pods that match their selector query, so let's create a replication controller with a single replica to adopt our existing Cassandra pod. + + ```yaml apiVersion: v1 kind: ReplicationController @@ -192,13 +212,11 @@ spec: name: cassandra spec: containers: - - name: cassandra - image: gcr.io/google_containers/cassandra:v5 - command: + - command: - /run.sh resources: limits: - cpu: 0.5 + cpu: 0.1 env: - name: MAX_HEAP_SIZE value: 512M @@ -208,6 +226,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.namespace + image: gcr.io/google_containers/cassandra:v6 + name: cassandra ports: - containerPort: 9042 name: cql @@ -221,6 +241,9 @@ spec: emptyDir: {} ``` +[Download example](cassandra-controller.yaml) + + Most of this replication controller definition is identical to the Cassandra pod definition above, it simply gives the resplication controller a recipe to use when it creates new Cassandra pods. The other differentiating parts are the ```selector``` attribute which contains the controller's selector query, and the ```replicas``` attribute which specifies the desired number of replicas, in this case 1. Create this controller: diff --git a/examples/celery-rabbitmq/README.md b/examples/celery-rabbitmq/README.md index f1e31719574..2c36469df0f 100644 --- a/examples/celery-rabbitmq/README.md +++ b/examples/celery-rabbitmq/README.md @@ -64,7 +64,7 @@ You should already have turned up a Kubernetes cluster. To get the most of this The Celery task queue will need to communicate with the RabbitMQ broker. RabbitMQ will eventually appear on a separate pod, but since pods are ephemeral we need a service that can transparently route requests to RabbitMQ. -Use the file [`examples/celery-rabbitmq/rabbitmq-service.yaml`](rabbitmq-service.yaml): + ```yaml apiVersion: v1 @@ -81,6 +81,9 @@ spec: component: rabbitmq ``` +[Download example](rabbitmq-service.yaml) + + To start the service, run: ```sh @@ -94,6 +97,8 @@ This service allows other pods to connect to the rabbitmq. To them, it will be s A RabbitMQ broker can be turned up using the file [`examples/celery-rabbitmq/rabbitmq-controller.yaml`](rabbitmq-controller.yaml): + + ```yaml apiVersion: v1 kind: ReplicationController @@ -121,6 +126,9 @@ spec: cpu: 100m ``` +[Download example](rabbitmq-controller.yaml) + + Running `$ kubectl create -f examples/celery-rabbitmq/rabbitmq-controller.yaml` brings up a replication controller that ensures one pod exists which is running a RabbitMQ instance. Note that bringing up the pod includes pulling down a docker image, which may take a few moments. This applies to all other pods in this example. @@ -130,6 +138,8 @@ Note that bringing up the pod includes pulling down a docker image, which may ta Bringing up the celery worker is done by running `$ kubectl create -f examples/celery-rabbitmq/celery-controller.yaml`, which contains this: + + ```yaml apiVersion: v1 kind: ReplicationController @@ -157,6 +167,9 @@ spec: cpu: 100m ``` +[Download example](celery-controller.yaml) + + There are several things to point out here... Like the RabbitMQ controller, this controller ensures that there is always a pod is running a Celery worker instance. The celery-app-add Docker image is an extension of the standard Celery image. This is the Dockerfile: @@ -207,6 +220,8 @@ Flower is a web-based tool for monitoring and administrating Celery clusters. By First, start the flower service with `$ kubectl create -f examples/celery-rabbitmq/flower-service.yaml`. The service is defined as below: + + ```yaml apiVersion: v1 kind: Service @@ -223,6 +238,9 @@ spec: type: LoadBalancer ``` +[Download example](flower-service.yaml) + + It is marked as external (LoadBalanced). However on many platforms you will have to add an explicit firewall rule to open port 5555. On GCE this can be done with: @@ -234,6 +252,8 @@ Please remember to delete the rule after you are done with the example (on GCE: To bring up the pods, run this command `$ kubectl create -f examples/celery-rabbitmq/flower-controller.yaml`. This controller is defined as so: + + ```yaml apiVersion: v1 kind: ReplicationController @@ -259,6 +279,9 @@ spec: cpu: 100m ``` +[Download example](flower-controller.yaml) + + This will bring up a new pod with Flower installed and port 5555 (Flower's default port) exposed through the service endpoint. This image uses the following command to start Flower: ```sh diff --git a/examples/elasticsearch/README.md b/examples/elasticsearch/README.md index 4accb3a1b9c..adbab558b93 100644 --- a/examples/elasticsearch/README.md +++ b/examples/elasticsearch/README.md @@ -46,8 +46,9 @@ server to get a list of matching Elasticsearch pods. To enable authenticated communication this image needs a [secret](../../docs/user-guide/secrets.md) to be mounted at `/etc/apiserver-secret` with the basic authentication username and password. -Here is an example replication controller specification that creates 4 instances of Elasticsearch which is in the file -[music-rc.yaml](music-rc.yaml). +Here is an example replication controller specification that creates 4 instances of Elasticsearch. + + ```yaml apiVersion: v1 @@ -91,6 +92,9 @@ spec: secretName: apiserver-secret ``` +[Download example](music-rc.yaml) + + The `CLUSTER_NAME` variable gives a name to the cluster and allows multiple separate clusters to exist in the same namespace. The `SELECTOR` variable should be set to a label query that identifies the Elasticsearch @@ -101,7 +105,9 @@ to be used to search for Elasticsearch pods and this should be the same as the n for the replication controller (in this case `mytunes`). Before creating pods with the replication controller a secret containing the bearer authentication token -should be set up. A template is provided in the file [apiserver-secret.yaml](apiserver-secret.yaml): +should be set up. + + ```yaml apiVersion: v1 @@ -113,6 +119,9 @@ data: token: "TOKEN" ``` +[Download example](apiserver-secret.yaml) + + Replace `NAMESPACE` with the actual namespace to be used and `TOKEN` with the basic64 encoded versions of the bearer token reported by `kubectl config view` e.g. @@ -154,7 +163,9 @@ replicationcontrollers/music-db ``` It's also useful to have a [service](../../docs/user-guide/services.md) with an load balancer for accessing the Elasticsearch -cluster which can be found in the file [music-service.yaml](music-service.yaml). +cluster. + + ```yaml apiVersion: v1 @@ -174,6 +185,9 @@ spec: type: LoadBalancer ``` +[Download example](music-service.yaml) + + Let's create the service with an external load balancer: ```console diff --git a/examples/guestbook/README.md b/examples/guestbook/README.md index 27f987b8edd..0bceeed02fc 100644 --- a/examples/guestbook/README.md +++ b/examples/guestbook/README.md @@ -75,7 +75,7 @@ To start the redis master, use the file `examples/guestbook/redis-master-control Although we have a single instance of our redis master, we are using a [replication controller](../../docs/user-guide/replication-controller.md) to enforce that exactly one pod keeps running. E.g., if the node were to go down, the replication controller will ensure that the redis master gets restarted on a healthy node. (In our simplified example, this could result in data loss.) -Here is `redis-master-controller.yaml`: + ```yaml apiVersion: v1 @@ -100,6 +100,9 @@ spec: - containerPort: 6379 ``` +[Download example](redis-master-controller.yaml) + + Change to the `/examples/guestbook` directory if you're not already there. Create the redis master pod in your Kubernetes cluster by running: ```console @@ -200,6 +203,8 @@ The selector field of the service description determines which pods will receive The file `examples/guestbook/redis-master-service.yaml` defines the redis master service: + + ```yaml apiVersion: v1 kind: Service @@ -216,6 +221,9 @@ spec: name: redis-master ``` +[Download example](redis-master-service.yaml) + + Create the service by running: ```console @@ -262,6 +270,8 @@ In Kubernetes, a replication controller is responsible for managing multiple ins To create the replicated pod, use the file `examples/guestbook/redis-slave-controller.yaml`, which looks like this: + + ```yaml apiVersion: v1 kind: ReplicationController @@ -285,6 +295,9 @@ spec: - containerPort: 6379 ``` +[Download example](redis-slave-controller.yaml) + + and create the replication controller by running: ```console @@ -316,6 +329,8 @@ Just like the master, we want to have a service to proxy connections to the redi The service specification for the slaves is in `examples/guestbook/redis-slave-service.yaml`: + + ```yaml apiVersion: v1 kind: Service @@ -331,6 +346,9 @@ spec: name: redis-slave ``` +[Download example](redis-slave-service.yaml) + + This time the selector for the service is `name=redis-slave`, because that identifies the pods running redis slaves. It may also be helpful to set labels on your service itself as we've done here to make it easy to locate them with the `kubectl get services -l "label=value"` command. Now that you have created the service specification, create it in your cluster by running: @@ -354,6 +372,8 @@ Again we'll create a set of replicated frontend pods instantiated by a replicati The pod is described in the file `examples/guestbook/frontend-controller.yaml`: + + ```yaml apiVersion: v1 kind: ReplicationController @@ -377,6 +397,9 @@ spec: - containerPort: 80 ``` +[Download example](frontend-controller.yaml) + + Using this file, you can turn up your frontend with: ```console @@ -457,6 +480,8 @@ Note the use of the `redis-master` and `redis-slave` host names-- we're finding As with the other pods, we now want to create a service to group your frontend pods. The service is described in the file `frontend-service.yaml`: + + ```yaml apiVersion: v1 kind: Service @@ -470,11 +495,14 @@ spec: # type: LoadBalancer ports: # the port that this service should serve on - - port: 80 + - port: 80 selector: name: frontend ``` +[Download example](frontend-service.yaml) + + #### Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific) For supported cloud providers, such as Google Compute Engine or Google Container Engine, you can specify to use an external load balancer diff --git a/examples/hazelcast/README.md b/examples/hazelcast/README.md index 59a2b6043ed..2e2a2f43e9d 100644 --- a/examples/hazelcast/README.md +++ b/examples/hazelcast/README.md @@ -67,20 +67,25 @@ In Kubernetes a _[Service](../../docs/user-guide/services.md)_ describes a set o Here is the service description: + + ```yaml apiVersion: v1 kind: Service -metadata: - labels: +metadata: + labels: name: hazelcast name: hazelcast spec: ports: - port: 5701 - selector: + selector: name: hazelcast ``` +[Download example](hazelcast-service.yaml) + + The important thing to note here is the `selector`. It is a query over labels, that identifies the set of _Pods_ contained by the _Service_. In this case the selector is `name: hazelcast`. If you look at the Replication Controller specification below, you'll see that the pod has the corresponding label, so it will be selected for membership in this Service. Create this service as follows: @@ -97,6 +102,8 @@ In Kubernetes a _[Replication Controller](../../docs/user-guide/replication-cont Replication Controllers will "adopt" existing pods that match their selector query, so let's create a Replication Controller with a single replica to adopt our existing Hazelcast Pod. + + ```yaml apiVersion: v1 kind: ReplicationController @@ -131,6 +138,9 @@ spec: name: hazelcast ``` +[Download example](hazelcast-controller.yaml) + + There are a few things to note in this description. First is that we are running the `quay.io/pires/hazelcast-kubernetes` image, tag `0.5`. This is a `busybox` installation with JRE 8 Update 45. However it also adds a custom [`application`](https://github.com/pires/hazelcast-kubernetes-bootstrapper) that finds any Hazelcast nodes in the cluster and bootstraps an Hazelcast instance accordingle. The `HazelcastDiscoveryController` discovers the Kubernetes API Server using the built in Kubernetes discovery service, and then uses the Kubernetes API to find new nodes (more on this later). You may also note that we tell Kubernetes that the container exposes the `hazelcast` port. Finally, we tell the cluster manager that we need 1 cpu core. diff --git a/examples/mysql-wordpress-pd/README.md b/examples/mysql-wordpress-pd/README.md index 6a6352cd5a8..46d6f29b29a 100644 --- a/examples/mysql-wordpress-pd/README.md +++ b/examples/mysql-wordpress-pd/README.md @@ -95,6 +95,8 @@ Now that the persistent disks are defined, the Kubernetes pods can be launched. First, **edit [`mysql.yaml`](mysql.yaml)**, the mysql pod definition, to use a database password that you specify. `mysql.yaml` looks like this: + + ```yaml apiVersion: v1 kind: Pod @@ -127,9 +129,11 @@ spec: # This GCE PD must already exist. pdName: mysql-disk fsType: ext4 - ``` +[Download example](mysql.yaml) + + Note that we've defined a volume mount for `/var/lib/mysql`, and specified a volume that uses the persistent disk (`mysql-disk`) that you created. Once you've edited the file to set your database password, create the pod as follows, where `` is the path to your Kubernetes installation: @@ -164,6 +168,8 @@ So if we label our Kubernetes mysql service `mysql`, the wordpress pod will be a The [`mysql-service.yaml`](mysql-service.yaml) file looks like this: + + ```yaml apiVersion: v1 kind: Service @@ -180,6 +186,9 @@ spec: name: mysql ``` +[Download example](mysql-service.yaml) + + Start the service like this: ```sh @@ -199,6 +208,8 @@ Once the mysql service is up, start the wordpress pod, specified in [`wordpress.yaml`](wordpress.yaml). Before you start it, **edit `wordpress.yaml`** and **set the database password to be the same as you used in `mysql.yaml`**. Note that this config file also defines a volume, this one using the `wordpress-disk` persistent disk that you created. + + ```yaml apiVersion: v1 kind: Pod @@ -230,6 +241,9 @@ spec: fsType: ext4 ``` +[Download example](wordpress.yaml) + + Create the pod: ```sh @@ -249,6 +263,8 @@ Once the wordpress pod is running, start its service, specified by [`wordpress-s The service config file looks like this: + + ```yaml apiVersion: v1 kind: Service @@ -266,6 +282,9 @@ spec: type: LoadBalancer ``` +[Download example](wordpress-service.yaml) + + Note the `type: LoadBalancer` setting. This will set up the wordpress service behind an external IP. Note also that we've set the service port to 80. We'll return to that shortly. diff --git a/examples/phabricator/README.md b/examples/phabricator/README.md index a63a4c69a04..ffab31d04df 100644 --- a/examples/phabricator/README.md +++ b/examples/phabricator/README.md @@ -56,6 +56,8 @@ In the remaining part of this example we will assume that your instance is named To start Phabricator server use the file [`examples/phabricator/phabricator-controller.json`](phabricator-controller.json) which describes a [replication controller](../../docs/user-guide/replication-controller.md) with a single [pod](../../docs/user-guide/pods.md) running an Apache server with Phabricator PHP source: + + ```json { "kind": "ReplicationController", @@ -96,6 +98,9 @@ To start Phabricator server use the file [`examples/phabricator/phabricator-cont } ``` +[Download example](phabricator-controller.json) + + Create the phabricator pod in your Kubernetes cluster by running: ```sh @@ -147,6 +152,8 @@ gcloud sql instances patch phabricator-db --authorized-networks 130.211.141.151 To automate this process and make sure that a proper host is authorized even if pod is rescheduled to a new machine we need a separate pod that periodically lists pods and authorizes hosts. Use the file [`examples/phabricator/authenticator-controller.json`](authenticator-controller.json): + + ```json { "kind": "ReplicationController", @@ -172,7 +179,7 @@ To automate this process and make sure that a proper host is authorized even if "containers": [ { "name": "authenticator", - "image": "gcr.io.google_containers/cloudsql-authenticator:v1" + "image": "gcr.io/google_containers/cloudsql-authenticator:v1" } ] } @@ -181,6 +188,9 @@ To automate this process and make sure that a proper host is authorized even if } ``` +[Download example](authenticator-controller.json) + + To create the pod run: ```sh @@ -203,6 +213,8 @@ phabricator us-central1 107.178.210.6 RESERVED Use the file [`examples/phabricator/phabricator-service.json`](phabricator-service.json): + + ```json { "kind": "Service", @@ -225,6 +237,9 @@ Use the file [`examples/phabricator/phabricator-service.json`](phabricator-servi } ``` +[Download example](phabricator-service.json) + + To create the service run: ```sh