mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-11 13:02:14 +00:00
e2e test polishing
Some typo fixes; some clearer messages on failed assertions; some TODOs on deeper problems.
This commit is contained in:
parent
9f60f3ce44
commit
3912ab29d8
@ -93,7 +93,7 @@ var _ = Describe("Density", func() {
|
|||||||
// TODO: We should reset metrics before the test. Currently previous tests influence latency metrics.
|
// TODO: We should reset metrics before the test. Currently previous tests influence latency metrics.
|
||||||
highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second, util.NewStringSet("events"))
|
highLatencyRequests, err := HighLatencyRequests(c, 3*time.Second, util.NewStringSet("events"))
|
||||||
expectNoError(err)
|
expectNoError(err)
|
||||||
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0))
|
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Tests with "Skipped" substring in their name will be skipped when running
|
// Tests with "Skipped" substring in their name will be skipped when running
|
||||||
|
@ -125,6 +125,7 @@ func testPreStop(c *client.Client, ns string) {
|
|||||||
DoRaw(); err != nil {
|
DoRaw(); err != nil {
|
||||||
By(fmt.Sprintf("Error validating prestop: %v", err))
|
By(fmt.Sprintf("Error validating prestop: %v", err))
|
||||||
} else {
|
} else {
|
||||||
|
Logf("Saw: %s", string(body))
|
||||||
state := State{}
|
state := State{}
|
||||||
err := json.Unmarshal(body, &state)
|
err := json.Unmarshal(body, &state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -134,7 +135,6 @@ func testPreStop(c *client.Client, ns string) {
|
|||||||
if state.Received["prestop"] != 0 {
|
if state.Received["prestop"] != 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
Logf("Saw: %s", string(body))
|
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
|
@ -274,7 +274,7 @@ var _ = Describe("Nodes", func() {
|
|||||||
Logf("starting test %s", testName)
|
Logf("starting test %s", testName)
|
||||||
|
|
||||||
if testContext.CloudConfig.NumNodes < 2 {
|
if testContext.CloudConfig.NumNodes < 2 {
|
||||||
Failf("Failing test %s as it requires at lease 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes)
|
Failf("Failing test %s as it requires at least 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -301,10 +301,11 @@ var _ = Describe("Nodes", func() {
|
|||||||
|
|
||||||
testName = "should be able to add nodes."
|
testName = "should be able to add nodes."
|
||||||
It(testName, func() {
|
It(testName, func() {
|
||||||
|
// TODO: Bug here - testName is not correct
|
||||||
Logf("starting test %s", testName)
|
Logf("starting test %s", testName)
|
||||||
|
|
||||||
if testContext.CloudConfig.NumNodes < 2 {
|
if testContext.CloudConfig.NumNodes < 2 {
|
||||||
Failf("Failing test %s as it requires at lease 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes)
|
Failf("Failing test %s as it requires at least 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -344,7 +345,7 @@ var _ = Describe("Nodes", func() {
|
|||||||
testName = "should survive network partition."
|
testName = "should survive network partition."
|
||||||
It(testName, func() {
|
It(testName, func() {
|
||||||
if testContext.CloudConfig.NumNodes < 2 {
|
if testContext.CloudConfig.NumNodes < 2 {
|
||||||
By(fmt.Sprintf("skipping %s test, which requires at lease 2 nodes (not %d)",
|
By(fmt.Sprintf("skipping %s test, which requires at least 2 nodes (not %d)",
|
||||||
testName, testContext.CloudConfig.NumNodes))
|
testName, testContext.CloudConfig.NumNodes))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -356,7 +357,7 @@ var _ = Describe("Nodes", func() {
|
|||||||
replicas := testContext.CloudConfig.NumNodes
|
replicas := testContext.CloudConfig.NumNodes
|
||||||
createServeHostnameReplicationController(c, ns, name, replicas)
|
createServeHostnameReplicationController(c, ns, name, replicas)
|
||||||
err := waitForPodsCreatedRunningResponding(c, ns, name, replicas)
|
err := waitForPodsCreatedRunningResponding(c, ns, name, replicas)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
|
||||||
|
|
||||||
By("cause network partition on one node")
|
By("cause network partition on one node")
|
||||||
nodelist, err := c.Nodes().List(labels.Everything(), fields.Everything())
|
nodelist, err := c.Nodes().List(labels.Everything(), fields.Everything())
|
||||||
@ -389,6 +390,7 @@ var _ = Describe("Nodes", func() {
|
|||||||
err = waitForPodsCreatedRunningResponding(c, ns, name, replicas)
|
err = waitForPodsCreatedRunningResponding(c, ns, name, replicas)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
// TODO: We should do this cleanup even if one of the above fails
|
||||||
By("remove network partition")
|
By("remove network partition")
|
||||||
undropCmd := "sudo iptables --delete OUTPUT 1"
|
undropCmd := "sudo iptables --delete OUTPUT 1"
|
||||||
if _, _, code, err := SSH(undropCmd, host, testContext.Provider); code != 0 || err != nil {
|
if _, _, code, err := SSH(undropCmd, host, testContext.Provider); code != 0 || err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user