Fix broken spark example e2e test

This commit is contained in:
Janet Kuo 2016-06-16 17:00:00 -07:00
parent 9f06e0f1a6
commit 8dee2b4293
4 changed files with 16 additions and 20 deletions

View File

@ -2,7 +2,6 @@ kind: ReplicationController
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: spark-master-controller name: spark-master-controller
namespace: spark-cluster
spec: spec:
replicas: 1 replicas: 1
selector: selector:

View File

@ -2,7 +2,6 @@ kind: Service
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: spark-master name: spark-master
namespace: spark-cluster
spec: spec:
ports: ports:
- port: 7077 - port: 7077

View File

@ -2,7 +2,6 @@ kind: ReplicationController
apiVersion: v1 apiVersion: v1
metadata: metadata:
name: spark-worker-controller name: spark-worker-controller
namespace: spark-cluster
spec: spec:
replicas: 2 replicas: 2
selector: selector:

View File

@ -40,13 +40,18 @@ const (
var _ = framework.KubeDescribe("[Feature:Example]", func() { var _ = framework.KubeDescribe("[Feature:Example]", func() {
f := framework.NewDefaultFramework("examples") f := framework.NewDefaultFramework("examples")
// Customized ForEach wrapper for this test.
forEachPod := func(selectorKey string, selectorValue string, fn func(api.Pod)) { // Reusable cluster state function. This won't be adversly affected by lazy initialization of framework.
f.NewClusterVerification( clusterState := func(selectorKey string, selectorValue string) *framework.ClusterVerification {
return f.NewClusterVerification(
framework.PodStateVerification{ framework.PodStateVerification{
Selectors: map[string]string{selectorKey: selectorValue}, Selectors: map[string]string{selectorKey: selectorValue},
ValidPhases: []api.PodPhase{api.PodRunning}, ValidPhases: []api.PodPhase{api.PodRunning},
}).ForEach(fn) })
}
// Customized ForEach wrapper for this test.
forEachPod := func(selectorKey string, selectorValue string, fn func(api.Pod)) {
clusterState(selectorKey, selectorValue).ForEach(fn)
} }
var c *client.Client var c *client.Client
var ns string var ns string
@ -182,11 +187,6 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
return filepath.Join(framework.TestContext.RepoRoot, "examples", "spark", file) return filepath.Join(framework.TestContext.RepoRoot, "examples", "spark", file)
} }
// Override test-generated namespace to be as specified in Spark example
ns = "spark-cluster"
namespaceYaml := mkpath("namespace-spark-cluster.yaml")
framework.RunKubectlOrDie("create", "-f", namespaceYaml)
// TODO: Add Zepplin and Web UI to this example. // TODO: Add Zepplin and Web UI to this example.
serviceYaml := mkpath("spark-master-service.yaml") serviceYaml := mkpath("spark-master-service.yaml")
masterYaml := mkpath("spark-master-controller.yaml") masterYaml := mkpath("spark-master-controller.yaml")
@ -197,14 +197,14 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
By("starting master") By("starting master")
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", masterYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", masterYaml, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{"component": "spark-master"})) selectorKey, selectorValue := "component", "spark-master"
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err := framework.WaitForPodsWithLabelRunning(c, ns, label) err := framework.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.Logf("Now polling for Master startup...") framework.Logf("Now polling for Master startup...")
// Only one master pod: But its a natural way to look up pod names. // Only one master pod: But its a natural way to look up pod names.
forEachPod("component", "spark-master", func(pod api.Pod) { forEachPod(selectorKey, selectorValue, func(pod api.Pod) {
framework.Logf("Now waiting for master to startup in %v", pod.Name) framework.Logf("Now waiting for master to startup in %v", pod.Name)
_, err := framework.LookForStringInLog(ns, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -213,7 +213,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
By("waiting for master endpoint") By("waiting for master endpoint")
err = framework.WaitForEndpoint(c, ns, "spark-master") err = framework.WaitForEndpoint(c, ns, "spark-master")
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
forEachPod("component", "spark-master", func(pod api.Pod) { forEachPod(selectorKey, selectorValue, func(pod api.Pod) {
_, maErr := framework.LookForStringInLog(f.Namespace.Name, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout) _, maErr := framework.LookForStringInLog(f.Namespace.Name, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
if maErr != nil { if maErr != nil {
framework.Failf("Didn't find target string. error:", maErr) framework.Failf("Didn't find target string. error:", maErr)
@ -224,7 +224,8 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
By("starting workers") By("starting workers")
framework.Logf("Now starting Workers") framework.Logf("Now starting Workers")
framework.RunKubectlOrDie("create", "-f", workerControllerYaml, nsFlag) framework.RunKubectlOrDie("create", "-f", workerControllerYaml, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{"component": "spark-worker"})) selectorKey, selectorValue := "component", "spark-worker"
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err := framework.WaitForPodsWithLabelRunning(c, ns, label) err := framework.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -232,9 +233,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
// framework.ScaleRC(c, ns, "spark-worker-controller", 2, true) // framework.ScaleRC(c, ns, "spark-worker-controller", 2, true)
framework.Logf("Now polling for worker startup...") framework.Logf("Now polling for worker startup...")
// ScaleRC(c, ns, "spark-worker-controller", 2, true) forEachPod(selectorKey, selectorValue,
framework.Logf("Now polling for worker startup...")
forEachPod("component", "spark-worker",
func(pod api.Pod) { func(pod api.Pod) {
_, slaveErr := framework.LookForStringInLog(ns, pod.Name, "spark-worker", "Successfully registered with master", serverStartTimeout) _, slaveErr := framework.LookForStringInLog(ns, pod.Name, "spark-worker", "Successfully registered with master", serverStartTimeout)
Expect(slaveErr).NotTo(HaveOccurred()) Expect(slaveErr).NotTo(HaveOccurred())