Merge pull request #10745 from justinsb/e2e_fix_resize_nodes

e2e fix resize nodes
This commit is contained in:
Brendan Burns 2015-07-28 15:57:10 -07:00
commit 54eb143342

View File

@ -38,9 +38,12 @@ import (
const serveHostnameImage = "gcr.io/google_containers/serve_hostname:1.1"
const resizeNodeReadyTimeout = 2 * time.Minute
const resizeNodeNotReadyTimeout = 2 * time.Minute
func resizeGroup(size int) error {
if testContext.Provider == "gce" || testContext.Provider == "gke" {
// TODO: make this hit the compute API directly instread of shelling out to gcloud.
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
output, err := exec.Command("gcloud", "preview", "managed-instance-groups", "--project="+testContext.CloudConfig.ProjectID, "--zone="+testContext.CloudConfig.Zone,
"resize", testContext.CloudConfig.NodeInstanceGroup, fmt.Sprintf("--new-size=%v", size)).CombinedOutput()
@ -60,7 +63,7 @@ func resizeGroup(size int) error {
func groupSize() (int, error) {
if testContext.Provider == "gce" || testContext.Provider == "gke" {
// TODO: make this hit the compute API directly instread of shelling out to gcloud.
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
output, err := exec.Command("gcloud", "preview", "managed-instance-groups", "--project="+testContext.CloudConfig.ProjectID,
"--zone="+testContext.CloudConfig.Zone, "describe", testContext.CloudConfig.NodeInstanceGroup).CombinedOutput()
@ -101,7 +104,8 @@ func groupSize() (int, error) {
}
func waitForGroupSize(size int) error {
for start := time.Now(); time.Since(start) < 4*time.Minute; time.Sleep(5 * time.Second) {
timeout := 4 * time.Minute
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
currentSize, err := groupSize()
if err != nil {
Logf("Failed to get node instance group size: %v", err)
@ -114,7 +118,7 @@ func waitForGroupSize(size int) error {
Logf("Node instance group has reached the desired size %d", size)
return nil
}
return fmt.Errorf("timeout waiting for node instance group size to be %d", size)
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)
}
func waitForClusterSize(c *client.Client, size int) error {
@ -136,7 +140,7 @@ func waitForClusterSize(c *client.Client, size int) error {
}
Logf("Waiting for cluster size %d, current size %d", size, len(nodes.Items))
}
return fmt.Errorf("timeout waiting for cluster size to be %d", size)
return fmt.Errorf("timeout waiting %v for cluster size to be %d", timeout, size)
}
func svcByName(name string) *api.Service {
@ -254,9 +258,10 @@ func resizeRC(c *client.Client, ns, name string, replicas int) error {
}
func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList, error) {
timeout := time.Minute
// List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
pods, err := c.Pods(ns).List(label, fields.Everything())
if err != nil {
return nil, err
@ -267,7 +272,7 @@ func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList,
return pods, nil
}
}
return nil, fmt.Errorf("Pod name %s: Gave up waiting for %d pods to come up", name, replicas)
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
func podsRunning(c *client.Client, pods *api.PodList) []error {
@ -375,10 +380,15 @@ func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replica
}
}()
Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
if !waitForNodeToBe(c, node.Name, true, resizeNodeReadyTimeout) {
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
// The command will block all outgoing network traffic from the node to the master
// When multi-master is implemented, this test will have to be improved to block
// network traffic to all masters.
// We could also block network traffic from the master(s)s to this node,
// We could also block network traffic from the master(s) to this node,
// but blocking it one way is sufficient for this test.
dropCmd := fmt.Sprintf("sudo iptables --insert %s", iptablesRule)
if _, _, code, err := SSH(dropCmd, host, testContext.Provider); code != 0 || err != nil {
@ -386,17 +396,20 @@ func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replica
dropCmd, node.Name, code, err)
}
Logf("Waiting for node %s to be not ready", node.Name)
waitForNodeToBe(c, node.Name, false, 2*time.Minute)
Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
if !waitForNodeToBe(c, node.Name, false, resizeNodeNotReadyTimeout) {
Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
}
Logf("Waiting for pod %s to be removed", podNameToDisappear)
waitForRCPodToDisappear(c, ns, rcName, podNameToDisappear)
By("verifying whether the pod from the unreachable node is recreated")
err := verifyPods(c, ns, rcName, true, replicas)
err := waitForRCPodToDisappear(c, ns, rcName, podNameToDisappear)
Expect(err).NotTo(HaveOccurred())
// network traffic is unblocked in a defered function
By("verifying whether the pod from the unreachable node is recreated")
err = verifyPods(c, ns, rcName, true, replicas)
Expect(err).NotTo(HaveOccurred())
// network traffic is unblocked in a deferred function
}
var _ = Describe("Nodes", func() {
@ -452,7 +465,7 @@ var _ = Describe("Nodes", func() {
It("should be able to delete nodes", func() {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-delete-node"
replicas := testContext.CloudConfig.NumNodes
newRCByName(c, ns, name, replicas)
@ -475,7 +488,7 @@ var _ = Describe("Nodes", func() {
// TODO: Bug here - testName is not correct
It("should be able to add nodes", func() {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-add-node"
newSVCByName(c, ns, name)
replicas := testContext.CloudConfig.NumNodes
@ -516,7 +529,7 @@ var _ = Describe("Nodes", func() {
"AND allows scheduling of pods on a minion after it rejoins the cluster", func() {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-net"
newSVCByName(c, ns, name)
replicas := testContext.CloudConfig.NumNodes
@ -535,8 +548,10 @@ var _ = Describe("Nodes", func() {
By(fmt.Sprintf("block network traffic from node %s", node.Name))
performTemporaryNetworkFailure(c, ns, name, replicas, pods.Items[0].Name, node)
Logf("Waiting for node %s to be ready", node.Name)
waitForNodeToBe(c, node.Name, true, 2*time.Minute)
Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !waitForNodeToBe(c, node.Name, true, resizeNodeReadyTimeout) {
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
By("verify whether new pods can be created on the re-attached node")
// increasing the RC size is not a valid way to test this