diff --git a/test/e2e/density.go b/test/e2e/density.go index a89e07194c4..ea2c780cfde 100644 --- a/test/e2e/density.go +++ b/test/e2e/density.go @@ -93,7 +93,7 @@ var _ = Describe("Density", func() { // TODO: We should reset metrics before the test. Currently previous tests influence latency metrics. highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second, util.NewStringSet("events")) expectNoError(err) - Expect(highLatencyRequests).NotTo(BeNumerically(">", 0)) + Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests") }) // Tests with "Skipped" substring in their name will be skipped when running diff --git a/test/e2e/pre_stop.go b/test/e2e/pre_stop.go index 43af2355f00..5d49573513d 100644 --- a/test/e2e/pre_stop.go +++ b/test/e2e/pre_stop.go @@ -125,6 +125,7 @@ func testPreStop(c *client.Client, ns string) { DoRaw(); err != nil { By(fmt.Sprintf("Error validating prestop: %v", err)) } else { + Logf("Saw: %s", string(body)) state := State{} err := json.Unmarshal(body, &state) if err != nil { @@ -134,7 +135,6 @@ func testPreStop(c *client.Client, ns string) { if state.Received["prestop"] != 0 { return true, nil } - Logf("Saw: %s", string(body)) } return false, nil }) diff --git a/test/e2e/resize_nodes.go b/test/e2e/resize_nodes.go index 57deeb87c03..0acc26e502f 100644 --- a/test/e2e/resize_nodes.go +++ b/test/e2e/resize_nodes.go @@ -274,7 +274,7 @@ var _ = Describe("Nodes", func() { Logf("starting test %s", testName) if testContext.CloudConfig.NumNodes < 2 { - Failf("Failing test %s as it requires at lease 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes) + Failf("Failing test %s as it requires at least 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes) return } @@ -301,10 +301,11 @@ var _ = Describe("Nodes", func() { testName = "should be able to add nodes." It(testName, func() { + // TODO: Bug here - testName is not correct Logf("starting test %s", testName) if testContext.CloudConfig.NumNodes < 2 { - Failf("Failing test %s as it requires at lease 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes) + Failf("Failing test %s as it requires at least 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes) return } @@ -344,7 +345,7 @@ var _ = Describe("Nodes", func() { testName = "should survive network partition." It(testName, func() { if testContext.CloudConfig.NumNodes < 2 { - By(fmt.Sprintf("skipping %s test, which requires at lease 2 nodes (not %d)", + By(fmt.Sprintf("skipping %s test, which requires at least 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes)) return } @@ -356,7 +357,7 @@ var _ = Describe("Nodes", func() { replicas := testContext.CloudConfig.NumNodes createServeHostnameReplicationController(c, ns, name, replicas) err := waitForPodsCreatedRunningResponding(c, ns, name, replicas) - Expect(err).NotTo(HaveOccurred()) + Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding") By("cause network partition on one node") nodelist, err := c.Nodes().List(labels.Everything(), fields.Everything()) @@ -389,6 +390,7 @@ var _ = Describe("Nodes", func() { err = waitForPodsCreatedRunningResponding(c, ns, name, replicas) Expect(err).NotTo(HaveOccurred()) + // TODO: We should do this cleanup even if one of the above fails By("remove network partition") undropCmd := "sudo iptables --delete OUTPUT 1" if _, _, code, err := SSH(undropCmd, host, testContext.Provider); code != 0 || err != nil {