From 8dee2b42930ce2b02d3b25885710f1338f7a0282 Mon Sep 17 00:00:00 2001 From: Janet Kuo Date: Thu, 16 Jun 2016 17:00:00 -0700 Subject: [PATCH] Fix broken spark example e2e test --- examples/spark/spark-master-controller.yaml | 1 - examples/spark/spark-master-service.yaml | 1 - examples/spark/spark-worker-controller.yaml | 1 - test/e2e/examples.go | 33 ++++++++++----------- 4 files changed, 16 insertions(+), 20 deletions(-) diff --git a/examples/spark/spark-master-controller.yaml b/examples/spark/spark-master-controller.yaml index 094f66d240b..60fb7ba8a15 100644 --- a/examples/spark/spark-master-controller.yaml +++ b/examples/spark/spark-master-controller.yaml @@ -2,7 +2,6 @@ kind: ReplicationController apiVersion: v1 metadata: name: spark-master-controller - namespace: spark-cluster spec: replicas: 1 selector: diff --git a/examples/spark/spark-master-service.yaml b/examples/spark/spark-master-service.yaml index b3488e94962..32d20a71eb2 100644 --- a/examples/spark/spark-master-service.yaml +++ b/examples/spark/spark-master-service.yaml @@ -2,7 +2,6 @@ kind: Service apiVersion: v1 metadata: name: spark-master - namespace: spark-cluster spec: ports: - port: 7077 diff --git a/examples/spark/spark-worker-controller.yaml b/examples/spark/spark-worker-controller.yaml index 375cc376c69..9c748b3e048 100644 --- a/examples/spark/spark-worker-controller.yaml +++ b/examples/spark/spark-worker-controller.yaml @@ -2,7 +2,6 @@ kind: ReplicationController apiVersion: v1 metadata: name: spark-worker-controller - namespace: spark-cluster spec: replicas: 2 selector: diff --git a/test/e2e/examples.go b/test/e2e/examples.go index fc150a6082a..03010caa942 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -40,13 +40,18 @@ const ( var _ = framework.KubeDescribe("[Feature:Example]", func() { f := framework.NewDefaultFramework("examples") - // Customized ForEach wrapper for this test. - forEachPod := func(selectorKey string, selectorValue string, fn func(api.Pod)) { - f.NewClusterVerification( + + // Reusable cluster state function. This won't be adversly affected by lazy initialization of framework. + clusterState := func(selectorKey string, selectorValue string) *framework.ClusterVerification { + return f.NewClusterVerification( framework.PodStateVerification{ Selectors: map[string]string{selectorKey: selectorValue}, ValidPhases: []api.PodPhase{api.PodRunning}, - }).ForEach(fn) + }) + } + // Customized ForEach wrapper for this test. + forEachPod := func(selectorKey string, selectorValue string, fn func(api.Pod)) { + clusterState(selectorKey, selectorValue).ForEach(fn) } var c *client.Client var ns string @@ -182,11 +187,6 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { return filepath.Join(framework.TestContext.RepoRoot, "examples", "spark", file) } - // Override test-generated namespace to be as specified in Spark example - ns = "spark-cluster" - namespaceYaml := mkpath("namespace-spark-cluster.yaml") - framework.RunKubectlOrDie("create", "-f", namespaceYaml) - // TODO: Add Zepplin and Web UI to this example. serviceYaml := mkpath("spark-master-service.yaml") masterYaml := mkpath("spark-master-controller.yaml") @@ -197,14 +197,14 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { By("starting master") framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", masterYaml, nsFlag) - label := labels.SelectorFromSet(labels.Set(map[string]string{"component": "spark-master"})) + selectorKey, selectorValue := "component", "spark-master" + label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) err := framework.WaitForPodsWithLabelRunning(c, ns, label) Expect(err).NotTo(HaveOccurred()) framework.Logf("Now polling for Master startup...") - // Only one master pod: But its a natural way to look up pod names. - forEachPod("component", "spark-master", func(pod api.Pod) { + forEachPod(selectorKey, selectorValue, func(pod api.Pod) { framework.Logf("Now waiting for master to startup in %v", pod.Name) _, err := framework.LookForStringInLog(ns, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout) Expect(err).NotTo(HaveOccurred()) @@ -213,7 +213,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { By("waiting for master endpoint") err = framework.WaitForEndpoint(c, ns, "spark-master") Expect(err).NotTo(HaveOccurred()) - forEachPod("component", "spark-master", func(pod api.Pod) { + forEachPod(selectorKey, selectorValue, func(pod api.Pod) { _, maErr := framework.LookForStringInLog(f.Namespace.Name, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout) if maErr != nil { framework.Failf("Didn't find target string. error:", maErr) @@ -224,7 +224,8 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { By("starting workers") framework.Logf("Now starting Workers") framework.RunKubectlOrDie("create", "-f", workerControllerYaml, nsFlag) - label := labels.SelectorFromSet(labels.Set(map[string]string{"component": "spark-worker"})) + selectorKey, selectorValue := "component", "spark-worker" + label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) err := framework.WaitForPodsWithLabelRunning(c, ns, label) Expect(err).NotTo(HaveOccurred()) @@ -232,9 +233,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { // framework.ScaleRC(c, ns, "spark-worker-controller", 2, true) framework.Logf("Now polling for worker startup...") - // ScaleRC(c, ns, "spark-worker-controller", 2, true) - framework.Logf("Now polling for worker startup...") - forEachPod("component", "spark-worker", + forEachPod(selectorKey, selectorValue, func(pod api.Pod) { _, slaveErr := framework.LookForStringInLog(ns, pod.Name, "spark-worker", "Successfully registered with master", serverStartTimeout) Expect(slaveErr).NotTo(HaveOccurred())