diff --git a/cluster/saltbase/salt/base.sls b/cluster/saltbase/salt/base.sls index ac7924d7b4d..196f1cf6db1 100644 --- a/cluster/saltbase/salt/base.sls +++ b/cluster/saltbase/salt/base.sls @@ -10,6 +10,7 @@ pkg-core: - apt-transport-https - python-apt - glusterfs-client + - nfs-common - socat {% endif %} # Ubuntu installs netcat-openbsd by default, but on GCE/Debian netcat-traditional is installed. diff --git a/docs/user-guide/volumes.md b/docs/user-guide/volumes.md index b89591070be..5af76ad0254 100644 --- a/docs/user-guide/volumes.md +++ b/docs/user-guide/volumes.md @@ -291,15 +291,6 @@ before you can use it__ See the [NFS example](../../examples/nfs/) for more details. -For example, [this file](../../examples/nfs/nfs-web-pod.yaml) demonstrates how to -specify the usage of an NFS volume within a pod. - -In this example one can see that a `volumeMount` called `nfs` is being mounted -onto `/usr/share/nginx/html` in the container `web`. The volume "nfs" is defined as -type `nfs`, with the NFS server serving from `nfs-server.default.kube.local` -and exporting directory `/` as the share. The mount being created in this -example is writeable. - ### iscsi An `iscsi` volume allows an existing iSCSI (SCSI over IP) volume to be mounted diff --git a/examples/examples_test.go b/examples/examples_test.go index 7a25c57e578..e57eb13aadc 100644 --- a/examples/examples_test.go +++ b/examples/examples_test.go @@ -306,9 +306,13 @@ func TestExampleObjectSchemas(t *testing.T) { "wordpress": &api.Pod{}, }, "../examples/nfs": { - "nfs-server-pod": &api.Pod{}, + "nfs-busybox-rc": &api.ReplicationController{}, + "nfs-server-rc": &api.ReplicationController{}, "nfs-server-service": &api.Service{}, - "nfs-web-pod": &api.Pod{}, + "nfs-pv": &api.PersistentVolume{}, + "nfs-pvc": &api.PersistentVolumeClaim{}, + "nfs-web-rc": &api.ReplicationController{}, + "nfs-web-service": &api.Service{}, }, "../docs/user-guide/node-selection": { "pod": &api.Pod{}, diff --git a/examples/nfs/README.md b/examples/nfs/README.md index bc4529118c8..430ef3fa7ba 100644 --- a/examples/nfs/README.md +++ b/examples/nfs/README.md @@ -33,57 +33,115 @@ Documentation for other releases can be found at # Example of NFS volume -See [nfs-web-pod.yaml](nfs-web-pod.yaml) for a quick example, how to use NFS volume -in a pod. +See [nfs-web-rc.yaml](nfs-web-rc.yaml) for a quick example of how to use an NFS +volume claim in a replication controller. It relies on the +[NFS persistent volume](nfs-pv.yaml) and +[NFS persistent volume claim](nfs-pvc.yaml) in this example as well. ## Complete setup -The example below shows how to export a NFS share from a pod and imports it -into another one. - -### Prerequisites - -The nfs server pod creates a privileged container, so if you are using a Salt based KUBERNETES_PROVIDER (**gce**, **vagrant**, **aws**), you have to enable the ability to create privileged containers by API. - -```sh -# At the root of Kubernetes source code -$ vi cluster/saltbase/pillar/privilege.sls - -# If true, allow privileged containers to be created by API -allow_privileged: true -``` - -For other non-salt based provider, you can set `--allow-privileged=true` for both api-server and kubelet, and then restart these components. - -Rebuild the Kubernetes and spin up a cluster using your preferred KUBERNETES_PROVIDER. +The example below shows how to export a NFS share from a single pod replication +controller and import it into two replication controllers. ### NFS server part -Define [NFS server pod](nfs-server-pod.yaml) and +Define [NFS server controller](nfs-server-rc.yaml) and [NFS service](nfs-server-service.yaml): - $ kubectl create -f nfs-server-pod.yaml - $ kubectl create -f nfs-server-service.yaml +```console +$ kubectl create -f examples/nfs/nfs-server-rc.yaml +$ kubectl create -f examples/nfs/nfs-server-service.yaml +``` -The server exports `/mnt/data` directory as `/` (fsid=0). The directory contains -dummy `index.html`. Wait until the pod is running! +The server exports `/mnt/data` directory as `/` (fsid=0). The +directory contains dummy `index.html`. Wait until the pod is running +by checking `kubectl get pods -lrole=nfs-server`. -### NFS client +### Create the NFS claim -[WEB server pod](nfs-web-pod.yaml) uses the NFS share exported above as a NFS -volume and runs simple web server on it. The pod assumes your DNS is configured -and the NFS service is reachable as `nfs-server.default.kube.local`. Edit the -yaml file to supply another name or directly its IP address (use -`kubectl get services` to get it). +The [NFS busybox controller](nfs-busybox-rc.yaml) uses a simple script to +generate data written to the NFS server we just started. First, you'll need to +find the cluster IP of the server: + +```console +$ kubectl describe services nfs-server +``` + +Replace the invalid IP in the [nfs PV](nfs-pv.yaml). (In the future, +we'll be able to tie these together using the service names, but for +now, you have to hardcode the IP.) + +Create the the [persistent volume](../../docs/user-guide/persistent-volumes.md) +and the persistent volume claim for your NFS server. The persistent volume and +claim gives us an indirection that allow multiple pods to refer to the NFS +server using a symbolic name rather than the hardcoded server address. + +```console +$ kubectl create -f examples/nfs/nfs-pv.yaml +$ kubectl create -f examples/nfs/nfs-pvc.yaml +``` + +## Setup the fake backend + +The [NFS busybox controller](nfs-busybox-rc.yaml) updates `index.html` on the +NFS server every 10 seconds. Let's start that now: + +```console +$ kubectl create -f examples/nfs/nfs-busybox-rc.yaml +``` + +Conveniently, it's also a `busybox` pod, so we can get an early check +that our mounts are working now. Find a busybox pod and exec: + +```console +$ kubectl get pod -lname=nfs-busybox +NAME READY STATUS RESTARTS AGE +nfs-busybox-jdhf3 1/1 Running 0 25m +nfs-busybox-w3s4t 1/1 Running 0 25m +$ kubectl exec nfs-busybox-jdhf3 -- cat /mnt/index.html +Thu Oct 22 19:20:18 UTC 2015 +nfs-busybox-w3s4t +``` + +You should see output similar to the above if everything is working well. If +it's not, make sure you changed the invalid IP in the [NFS PV](nfs-pv.yaml) file +and make sure the `describe services` command above had endpoints listed +(indicating the service was associated with a running pod). + +### Setup the web server + +The [web server controller](nfs-web-rc.yaml) is an another simple replication +controller demonstrates reading from the NFS share exported above as a NFS +volume and runs a simple web server on it. Define the pod: - $ kubectl create -f nfs-web-pod.yaml +```console +$ kubectl create -f examples/nfs/nfs-web-rc.yaml +``` -Now the pod serves `index.html` from the NFS server: +This creates two pods, each of which serve the `index.html` from above. We can +then use a simple service to front it: - $ curl http:/// - Hello World! +```console +kubectl create -f examples/nfs/nfs-web-service.yaml +``` + +We can then use the busybox container we launched before to check that `nginx` +is serving the data appropriately: + +```console +$ kubectl get pod -lname=nfs-busybox +NAME READY STATUS RESTARTS AGE +nfs-busybox-jdhf3 1/1 Running 0 1h +nfs-busybox-w3s4t 1/1 Running 0 1h +$ kubectl get services nfs-web +NAME LABELS SELECTOR IP(S) PORT(S) +nfs-web role=web-frontend 10.0.68.37 80/TCP +$ kubectl exec nfs-busybox-jdhf3 -- wget -qO- http://10.0.68.37 +Thu Oct 22 19:28:55 UTC 2015 +nfs-busybox-w3s4t +``` diff --git a/examples/nfs/nfs-busybox-rc.yaml b/examples/nfs/nfs-busybox-rc.yaml new file mode 100644 index 00000000000..617d0275585 --- /dev/null +++ b/examples/nfs/nfs-busybox-rc.yaml @@ -0,0 +1,32 @@ +# This mounts the nfs volume claim into /mnt and continuously +# overwrites /mnt/index.html with the time and hostname of the pod. + +apiVersion: v1 +kind: ReplicationController +metadata: + name: nfs-busybox +spec: + replicas: 2 + selector: + name: nfs-busybox + template: + metadata: + labels: + name: nfs-busybox + spec: + containers: + - image: busybox + command: + - sh + - -c + - 'while true; do date > /mnt/index.html; hostname >> /mnt/index.html; sleep $(($RANDOM % 5 + 5)); done' + imagePullPolicy: IfNotPresent + name: busybox + volumeMounts: + # name must match the volume name below + - name: nfs + mountPath: "/mnt" + volumes: + - name: nfs + persistentVolumeClaim: + claimName: nfs diff --git a/examples/nfs/nfs-pv.yaml b/examples/nfs/nfs-pv.yaml new file mode 100644 index 00000000000..1bc043f905f --- /dev/null +++ b/examples/nfs/nfs-pv.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: nfs +spec: + capacity: + storage: 1Mi + accessModes: + - ReadWriteMany + nfs: + # FIXME: use the right IP + server: 10.999.999.999 + path: "/" diff --git a/examples/nfs/nfs-pvc.yaml b/examples/nfs/nfs-pvc.yaml new file mode 100644 index 00000000000..9c1821f7c43 --- /dev/null +++ b/examples/nfs/nfs-pvc.yaml @@ -0,0 +1,10 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: nfs +spec: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 1Mi diff --git a/examples/nfs/nfs-server-pod.yaml b/examples/nfs/nfs-server-pod.yaml deleted file mode 100644 index baf5641fcb6..00000000000 --- a/examples/nfs/nfs-server-pod.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: nfs-server - labels: - role: nfs-server -spec: - containers: - - name: nfs-server - image: jsafrane/nfs-data - ports: - - name: nfs - containerPort: 2049 - securityContext: - privileged: true diff --git a/examples/nfs/nfs-server-rc.yaml b/examples/nfs/nfs-server-rc.yaml new file mode 100644 index 00000000000..e6f7f073773 --- /dev/null +++ b/examples/nfs/nfs-server-rc.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: nfs-server +spec: + replicas: 1 + selector: + role: nfs-server + template: + metadata: + labels: + role: nfs-server + spec: + containers: + - name: nfs-server + # TODO(zmerlynn): change to gcr.io/google_containers/volume-nfs + image: jsafrane/nfs-data + ports: + - name: nfs + containerPort: 2049 + securityContext: + privileged: true diff --git a/examples/nfs/nfs-web-pod.yaml b/examples/nfs/nfs-web-pod.yaml deleted file mode 100644 index 12d15d96e4e..00000000000 --- a/examples/nfs/nfs-web-pod.yaml +++ /dev/null @@ -1,25 +0,0 @@ -# -# This pod imports nfs-server.default.kube.local:/ into /usr/share/nginx/html -# - -apiVersion: v1 -kind: Pod -metadata: - name: nfs-web -spec: - containers: - - name: web - image: nginx - ports: - - name: web - containerPort: 80 - volumeMounts: - # name must match the volume name below - - name: nfs - mountPath: "/usr/share/nginx/html" - volumes: - - name: nfs - nfs: - # FIXME: use the right hostname - server: nfs-server.default.kube.local - path: "/" diff --git a/examples/nfs/nfs-web-rc.yaml b/examples/nfs/nfs-web-rc.yaml new file mode 100644 index 00000000000..6c96682cb18 --- /dev/null +++ b/examples/nfs/nfs-web-rc.yaml @@ -0,0 +1,30 @@ +# This pod mounts the nfs volume claim into /usr/share/nginx/html and +# serves a simple web page. + +apiVersion: v1 +kind: ReplicationController +metadata: + name: nfs-web +spec: + replicas: 2 + selector: + role: web-frontend + template: + metadata: + labels: + role: web-frontend + spec: + containers: + - name: web + image: nginx + ports: + - name: web + containerPort: 80 + volumeMounts: + # name must match the volume name below + - name: nfs + mountPath: "/usr/share/nginx/html" + volumes: + - name: nfs + persistentVolumeClaim: + claimName: nfs diff --git a/examples/nfs/nfs-web-service.yaml b/examples/nfs/nfs-web-service.yaml new file mode 100644 index 00000000000..b73cac2bc94 --- /dev/null +++ b/examples/nfs/nfs-web-service.yaml @@ -0,0 +1,9 @@ +kind: Service +apiVersion: v1 +metadata: + name: nfs-web +spec: + ports: + - port: 80 + selector: + role: web-frontend