e2e test polishing

Some typo fixes; some clearer messages on failed assertions;
some TODOs on deeper problems.
This commit is contained in:
Justin Santa Barbara 2015-06-16 21:15:11 -04:00
parent 9f60f3ce44
commit 3912ab29d8
3 changed files with 8 additions and 6 deletions

View File

@ -93,7 +93,7 @@ var _ = Describe("Density", func() {
// TODO: We should reset metrics before the test. Currently previous tests influence latency metrics.
highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second, util.NewStringSet("events"))
expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0))
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
})
// Tests with "Skipped" substring in their name will be skipped when running

View File

@ -125,6 +125,7 @@ func testPreStop(c *client.Client, ns string) {
DoRaw(); err != nil {
By(fmt.Sprintf("Error validating prestop: %v", err))
} else {
Logf("Saw: %s", string(body))
state := State{}
err := json.Unmarshal(body, &state)
if err != nil {
@ -134,7 +135,6 @@ func testPreStop(c *client.Client, ns string) {
if state.Received["prestop"] != 0 {
return true, nil
}
Logf("Saw: %s", string(body))
}
return false, nil
})

View File

@ -274,7 +274,7 @@ var _ = Describe("Nodes", func() {
Logf("starting test %s", testName)
if testContext.CloudConfig.NumNodes < 2 {
Failf("Failing test %s as it requires at lease 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes)
Failf("Failing test %s as it requires at least 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes)
return
}
@ -301,10 +301,11 @@ var _ = Describe("Nodes", func() {
testName = "should be able to add nodes."
It(testName, func() {
// TODO: Bug here - testName is not correct
Logf("starting test %s", testName)
if testContext.CloudConfig.NumNodes < 2 {
Failf("Failing test %s as it requires at lease 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes)
Failf("Failing test %s as it requires at least 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes)
return
}
@ -344,7 +345,7 @@ var _ = Describe("Nodes", func() {
testName = "should survive network partition."
It(testName, func() {
if testContext.CloudConfig.NumNodes < 2 {
By(fmt.Sprintf("skipping %s test, which requires at lease 2 nodes (not %d)",
By(fmt.Sprintf("skipping %s test, which requires at least 2 nodes (not %d)",
testName, testContext.CloudConfig.NumNodes))
return
}
@ -356,7 +357,7 @@ var _ = Describe("Nodes", func() {
replicas := testContext.CloudConfig.NumNodes
createServeHostnameReplicationController(c, ns, name, replicas)
err := waitForPodsCreatedRunningResponding(c, ns, name, replicas)
Expect(err).NotTo(HaveOccurred())
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
By("cause network partition on one node")
nodelist, err := c.Nodes().List(labels.Everything(), fields.Everything())
@ -389,6 +390,7 @@ var _ = Describe("Nodes", func() {
err = waitForPodsCreatedRunningResponding(c, ns, name, replicas)
Expect(err).NotTo(HaveOccurred())
// TODO: We should do this cleanup even if one of the above fails
By("remove network partition")
undropCmd := "sudo iptables --delete OUTPUT 1"
if _, _, code, err := SSH(undropCmd, host, testContext.Provider); code != 0 || err != nil {