Merge pull request #5633 from satnam6502/network

Reduce pod usage for network e2e test
This commit is contained in:
Vish Kannan 2015-03-19 08:43:29 -07:00
commit 53c3b16663
2 changed files with 66 additions and 49 deletions

View File

@ -23,7 +23,6 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api" "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client" "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util" "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -48,6 +47,16 @@ var _ = Describe("Networking", func() {
return return
} }
// Obtain a list of nodes so we can place one webserver container on each node.
nodes, err := c.Nodes().List()
if err != nil {
Failf("Failed to list nodes: %v", err)
}
peers := len(nodes.Items)
if peers == 0 {
Failf("Failed to find any nodes")
}
// Test basic external connectivity. // Test basic external connectivity.
resp, err := http.Get("http://google.com/") resp, err := http.Get("http://google.com/")
if err != nil { if err != nil {
@ -87,53 +96,54 @@ var _ = Describe("Networking", func() {
} }
}() }()
By("Creating a replication controller") By("Creating a webserver pod on each node")
rc, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{ podNames := []string{}
ObjectMeta: api.ObjectMeta{ for i, node := range nodes.Items {
Name: name, podName := fmt.Sprintf("%s-%d", name, i)
Labels: map[string]string{ podNames = append(podNames, podName)
"name": name, Logf("Creating pod %s on node %s", podName, node.Name)
}, _, err := c.Pods(ns).Create(&api.Pod{
}, ObjectMeta: api.ObjectMeta{
Spec: api.ReplicationControllerSpec{ Name: podName,
Replicas: 8, Labels: map[string]string{
Selector: map[string]string{ "name": name,
"name": name,
},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": name},
}, },
Spec: api.PodSpec{ },
Containers: []api.Container{ Spec: api.PodSpec{
{ Containers: []api.Container{
Name: "webserver", {
Image: "kubernetes/nettest:1.1", Name: "webserver",
Command: []string{"-service=" + name, "-namespace=" + ns}, Image: "kubernetes/nettest:1.1",
Ports: []api.ContainerPort{{ContainerPort: 8080}}, Command: []string{
}, "-service=" + name,
fmt.Sprintf("-peers=%d", peers),
"-namespace=" + ns},
Ports: []api.ContainerPort{{ContainerPort: 8080}},
}, },
}, },
Host: node.Name,
RestartPolicy: api.RestartPolicyNever,
}, },
}, })
}) Expect(err).NotTo(HaveOccurred())
if err != nil {
Fail(fmt.Sprintf("unable to create test rc: %v", err))
} }
// Clean up rc // Clean up the pods
defer func() { defer func() {
defer GinkgoRecover() defer GinkgoRecover()
By("Cleaning up the replication controller") By("Cleaning up the webserver pods")
// Resize the replication controller to zero to get rid of pods. for _, podName := range podNames {
rcReaper, err := kubectl.ReaperFor("ReplicationController", c) if err = c.Pods(ns).Delete(podName); err != nil {
if err != nil { Logf("Failed to delete pod %s: %v", podName, err)
Fail(fmt.Sprintf("unable to stop rc %v: %v", rc.Name, err)) }
}
if _, err = rcReaper.Stop(ns, rc.Name); err != nil {
Fail(fmt.Sprintf("unable to stop rc %v: %v", rc.Name, err))
} }
}() }()
By("Wait for the webserver pods to be ready")
for _, podName := range podNames {
err = waitForPodRunningInNamespace(c, podName, ns)
Expect(err).NotTo(HaveOccurred())
}
By("Waiting for connectivity to be verified") By("Waiting for connectivity to be verified")
const maxAttempts = 60 const maxAttempts = 60
passed := false passed := false
@ -148,22 +158,26 @@ var _ = Describe("Networking", func() {
Suffix("status"). Suffix("status").
Do().Raw() Do().Raw()
if err != nil { if err != nil {
fmt.Printf("Attempt %v/%v: service/pod still starting. (error: '%v')\n", i, maxAttempts, err) Logf("Attempt %v/%v: service/pod still starting. (error: '%v')", i, maxAttempts, err)
continue continue
} }
switch string(body) { switch string(body) {
case "pass": case "pass":
fmt.Printf("Passed on attempt %v. Cleaning up.\n", i) Logf("Passed on attempt %v. Cleaning up.", i)
passed = true passed = true
break break
case "running": case "running":
fmt.Printf("Attempt %v/%v: test still running\n", i, maxAttempts) Logf("Attempt %v/%v: test still running", i, maxAttempts)
break break
case "fail": case "fail":
if body, err = c.Get().Namespace(ns).Prefix("proxy").Resource("services").Name(svc.Name).Suffix("read").Do().Raw(); err != nil { if body, err = c.Get().
Namespace(ns).Prefix("proxy").
Resource("services").
Name(svc.Name).Suffix("read").
Do().Raw(); err != nil {
Fail(fmt.Sprintf("Failed on attempt %v. Cleaning up. Error reading details: %v", i, err)) Fail(fmt.Sprintf("Failed on attempt %v. Cleaning up. Error reading details: %v", i, err))
} else { } else {
Fail(fmt.Sprintf("Failed on attempt %v. Cleaning up. Details:\n%v", i, string(body))) Fail(fmt.Sprintf("Failed on attempt %v. Cleaning up. Details:\n%s", i, string(body)))
} }
break break
} }
@ -179,7 +193,7 @@ var _ = Describe("Networking", func() {
Do().Raw(); err != nil { Do().Raw(); err != nil {
Fail(fmt.Sprintf("Timed out. Cleaning up. Error reading details: %v", err)) Fail(fmt.Sprintf("Timed out. Cleaning up. Error reading details: %v", err))
} else { } else {
Fail(fmt.Sprintf("Timed out. Cleaning up. Details:\n%v", string(body))) Fail(fmt.Sprintf("Timed out. Cleaning up. Details:\n%s", string(body)))
} }
} }
Expect(string(body)).To(Equal("pass")) Expect(string(body)).To(Equal("pass"))
@ -198,8 +212,7 @@ var _ = Describe("Networking", func() {
data, err := c.RESTClient.Get(). data, err := c.RESTClient.Get().
Namespace(ns). Namespace(ns).
AbsPath(test.path). AbsPath(test.path).
Do(). Do().Raw()
Raw()
if err != nil { if err != nil {
Fail(fmt.Sprintf("Failed: %v\nBody: %s", err, string(data))) Fail(fmt.Sprintf("Failed: %v\nBody: %s", err, string(data)))
} }

View File

@ -71,17 +71,21 @@ func waitForPodCondition(c *client.Client, ns, podName, desc string, condition p
if done { if done {
return err return err
} }
Logf("Waiting for pod %s status to be %q (found %q) (%.2f seconds)", podName, desc, pod.Status.Phase, time.Since(start).Seconds()) Logf("Waiting for pod %s in namespace %s status to be %q (found %q) (%v)", podName, ns, desc, pod.Status.Phase, time.Since(start))
} }
return fmt.Errorf("gave up waiting for pod %s to be %s after %.2f seconds", podName, desc, podStartTimeout.Seconds()) return fmt.Errorf("gave up waiting for pod %s to be %s after %.2f seconds", podName, desc, podStartTimeout.Seconds())
} }
func waitForPodRunning(c *client.Client, podName string) error { func waitForPodRunningInNamespace(c *client.Client, podName string, namespace string) error {
return waitForPodCondition(c, api.NamespaceDefault, podName, "running", func(pod *api.Pod) (bool, error) { return waitForPodCondition(c, namespace, podName, "running", func(pod *api.Pod) (bool, error) {
return (pod.Status.Phase == api.PodRunning), nil return (pod.Status.Phase == api.PodRunning), nil
}) })
} }
func waitForPodRunning(c *client.Client, podName string) error {
return waitForPodRunningInNamespace(c, podName, api.NamespaceDefault)
}
// waitForPodNotPending returns an error if it took too long for the pod to go out of pending state. // waitForPodNotPending returns an error if it took too long for the pod to go out of pending state.
func waitForPodNotPending(c *client.Client, ns, podName string) error { func waitForPodNotPending(c *client.Client, ns, podName string) error {
return waitForPodCondition(c, ns, podName, "!pending", func(pod *api.Pod) (bool, error) { return waitForPodCondition(c, ns, podName, "!pending", func(pod *api.Pod) (bool, error) {