From 326e30772a62f077bdb4bf7e3413b92904705c90 Mon Sep 17 00:00:00 2001 From: Klaus Ma Date: Tue, 19 Apr 2016 00:34:47 +0800 Subject: [PATCH] Added namespace to Spark example. --- examples/examples_test.go | 1 + examples/spark/README.md | 41 +++++++++++++++---- examples/spark/namespace-spark-cluster.yaml | 6 +++ .../spark-gluster/glusterfs-endpoints.yaml | 1 + .../spark-master-controller.yaml | 1 + .../spark-gluster/spark-master-service.yaml | 1 + .../spark-worker-controller.yaml | 1 + examples/spark/spark-master-controller.yaml | 1 + examples/spark/spark-master-service.yaml | 1 + examples/spark/spark-webui.yaml | 1 + examples/spark/spark-worker-controller.yaml | 1 + examples/spark/zeppelin-controller.yaml | 1 + examples/spark/zeppelin-service.yaml | 1 + 13 files changed, 50 insertions(+), 8 deletions(-) create mode 100644 examples/spark/namespace-spark-cluster.yaml diff --git a/examples/examples_test.go b/examples/examples_test.go index 5432e95c60d..342e2f6993b 100644 --- a/examples/examples_test.go +++ b/examples/examples_test.go @@ -372,6 +372,7 @@ func TestExampleObjectSchemas(t *testing.T) { "secret-env-pod": &api.Pod{}, }, "../examples/spark": { + "namespace-spark-cluster": &api.Namespace{}, "spark-master-controller": &api.ReplicationController{}, "spark-master-service": &api.Service{}, "spark-webui": &api.Service{}, diff --git a/examples/spark/README.md b/examples/spark/README.md index c9845b3f0bf..391ac94a55f 100644 --- a/examples/spark/README.md +++ b/examples/spark/README.md @@ -58,7 +58,31 @@ This example assumes For details, you can look at the Dockerfiles in the Sources section. -## Step One: Start your Master service +## Step One: Create namespace + +```sh +$ kubectl create -f examples/spark/namespace-spark-cluster.yaml +``` + +Now list all namespaces: + +```sh +$ kubectl get namespaces +NAME LABELS STATUS +default Active +spark-cluster name=spark-cluster Active +``` + +For kubectl client to work with namespace, we define one context and use it: + +```sh +$ kubectl config set-context spark --namespace=spark-cluster --cluster=${CLUSTER_NAME} --user=${USER_NAME} +$ kubectl config use-context spark +``` + +You can view your cluster name and user name in kubernetes config at ~/.kube/config. + +## Step Two: Start your Master service The Master [service](../../docs/user-guide/services.md) is the master service for a Spark cluster. @@ -71,7 +95,7 @@ running the Spark Master service. ```console $ kubectl create -f examples/spark/spark-master-controller.yaml -replicationcontrollers/spark-master-controller +replicationcontroller "spark-master-controller" created ``` Then, use the @@ -81,14 +105,14 @@ Master pod. ```console $ kubectl create -f examples/spark/spark-master-service.yaml -services/spark-master +service "spark-master" created ``` You can then create a service for the Spark Master WebUI: ```console $ kubectl create -f examples/spark/spark-webui.yaml -services/spark-webui +service "spark-webui" created ``` ### Check to see if Master is running and accessible @@ -134,7 +158,7 @@ kubectl proxy --port=8001 At which point the UI will be available at [http://localhost:8001/api/v1/proxy/namespaces/default/services/spark-webui/](http://localhost:8001/api/v1/proxy/namespaces/default/services/spark-webui/). -## Step Two: Start your Spark workers +## Step Three: Start your Spark workers The Spark workers do the heavy lifting in a Spark cluster. They provide execution resources and data cache capabilities for your @@ -147,6 +171,7 @@ Use the [`examples/spark/spark-worker-controller.yaml`](spark-worker-controller. ```console $ kubectl create -f examples/spark/spark-worker-controller.yaml +replicationcontroller "spark-worker-controller" created ``` ### Check to see if the workers are running @@ -175,7 +200,7 @@ you should now see the workers in the UI as well. *Note:* The UI will have links to worker Web UIs. The worker UI links do not work (the links will attempt to connect to cluster IPs, which Kubernetes won't proxy automatically). -## Step Three: Start the Zeppelin UI to launch jobs on your Spark cluster +## Step Four: Start the Zeppelin UI to launch jobs on your Spark cluster The Zeppelin UI pod can be used to launch jobs into the Spark cluster either via a web notebook frontend or the traditional Spark command line. See @@ -185,7 +210,7 @@ for more details. ```console $ kubectl create -f examples/spark/zeppelin-controller.yaml -replicationcontrollers/zeppelin-controller +replicationcontroller "zeppelin-controller" created ``` Zeppelin needs the Master service to be running. @@ -198,7 +223,7 @@ NAME READY STATUS RESTARTS AGE zeppelin-controller-ja09s 1/1 Running 0 53s ``` -## Step Four: Do something with the cluster +## Step Five: Do something with the cluster Now you have two choices, depending on your predilections. You can do something graphical with the Spark cluster, or you can stay in the CLI. diff --git a/examples/spark/namespace-spark-cluster.yaml b/examples/spark/namespace-spark-cluster.yaml new file mode 100644 index 00000000000..1f3dce83cf9 --- /dev/null +++ b/examples/spark/namespace-spark-cluster.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: "spark-cluster" + labels: + name: "spark-cluster" diff --git a/examples/spark/spark-gluster/glusterfs-endpoints.yaml b/examples/spark/spark-gluster/glusterfs-endpoints.yaml index fea6e1cd0b9..357fdb76648 100644 --- a/examples/spark/spark-gluster/glusterfs-endpoints.yaml +++ b/examples/spark/spark-gluster/glusterfs-endpoints.yaml @@ -2,6 +2,7 @@ kind: Endpoints apiVersion: v1 metadata: name: glusterfs-cluster + namespace: spark-cluster subsets: - addresses: - ip: 192.168.30.104 diff --git a/examples/spark/spark-gluster/spark-master-controller.yaml b/examples/spark/spark-gluster/spark-master-controller.yaml index 304ee0f2bad..d0b365b7135 100644 --- a/examples/spark/spark-gluster/spark-master-controller.yaml +++ b/examples/spark/spark-gluster/spark-master-controller.yaml @@ -2,6 +2,7 @@ kind: ReplicationController apiVersion: v1 metadata: name: spark-master-controller + namespace: spark-cluster labels: component: spark-master spec: diff --git a/examples/spark/spark-gluster/spark-master-service.yaml b/examples/spark/spark-gluster/spark-master-service.yaml index ec51365b39d..2f5bdb15dc8 100644 --- a/examples/spark/spark-gluster/spark-master-service.yaml +++ b/examples/spark/spark-gluster/spark-master-service.yaml @@ -2,6 +2,7 @@ kind: Service apiVersion: v1 metadata: name: spark-master + namespace: spark-cluster labels: component: spark-master-service spec: diff --git a/examples/spark/spark-gluster/spark-worker-controller.yaml b/examples/spark/spark-gluster/spark-worker-controller.yaml index 89fa908d382..69cc3cec95e 100644 --- a/examples/spark/spark-gluster/spark-worker-controller.yaml +++ b/examples/spark/spark-gluster/spark-worker-controller.yaml @@ -2,6 +2,7 @@ kind: ReplicationController apiVersion: v1 metadata: name: spark-gluster-worker-controller + namespace: spark-cluster labels: component: spark-worker spec: diff --git a/examples/spark/spark-master-controller.yaml b/examples/spark/spark-master-controller.yaml index 60fb7ba8a15..094f66d240b 100644 --- a/examples/spark/spark-master-controller.yaml +++ b/examples/spark/spark-master-controller.yaml @@ -2,6 +2,7 @@ kind: ReplicationController apiVersion: v1 metadata: name: spark-master-controller + namespace: spark-cluster spec: replicas: 1 selector: diff --git a/examples/spark/spark-master-service.yaml b/examples/spark/spark-master-service.yaml index 32d20a71eb2..b3488e94962 100644 --- a/examples/spark/spark-master-service.yaml +++ b/examples/spark/spark-master-service.yaml @@ -2,6 +2,7 @@ kind: Service apiVersion: v1 metadata: name: spark-master + namespace: spark-cluster spec: ports: - port: 7077 diff --git a/examples/spark/spark-webui.yaml b/examples/spark/spark-webui.yaml index 421a3e47136..5abecbc64ac 100644 --- a/examples/spark/spark-webui.yaml +++ b/examples/spark/spark-webui.yaml @@ -2,6 +2,7 @@ kind: Service apiVersion: v1 metadata: name: spark-webui + namespace: spark-cluster spec: ports: - port: 8080 diff --git a/examples/spark/spark-worker-controller.yaml b/examples/spark/spark-worker-controller.yaml index 9c748b3e048..375cc376c69 100644 --- a/examples/spark/spark-worker-controller.yaml +++ b/examples/spark/spark-worker-controller.yaml @@ -2,6 +2,7 @@ kind: ReplicationController apiVersion: v1 metadata: name: spark-worker-controller + namespace: spark-cluster spec: replicas: 2 selector: diff --git a/examples/spark/zeppelin-controller.yaml b/examples/spark/zeppelin-controller.yaml index 56bb90d421d..85361687e20 100644 --- a/examples/spark/zeppelin-controller.yaml +++ b/examples/spark/zeppelin-controller.yaml @@ -2,6 +2,7 @@ kind: ReplicationController apiVersion: v1 metadata: name: zeppelin-controller + namespace: spark-cluster spec: replicas: 1 selector: diff --git a/examples/spark/zeppelin-service.yaml b/examples/spark/zeppelin-service.yaml index 9296297f168..8107c05169f 100644 --- a/examples/spark/zeppelin-service.yaml +++ b/examples/spark/zeppelin-service.yaml @@ -2,6 +2,7 @@ kind: Service apiVersion: v1 metadata: name: zeppelin + namespace: spark-cluster spec: ports: - port: 8080