Cleanup more extensively in e2e

This commit is contained in:
Clayton Coleman 2015-08-19 11:35:00 -04:00
parent a927791293
commit 611530889f
9 changed files with 74 additions and 38 deletions

View File

@ -158,7 +158,7 @@ func validateDNSResults(f *Framework, pod *api.Pod, fileNames []string) {
defer func() { defer func() {
By("deleting the pod") By("deleting the pod")
defer GinkgoRecover() defer GinkgoRecover()
podClient.Delete(pod.Name, nil) podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}() }()
if _, err := podClient.Create(pod); err != nil { if _, err := podClient.Create(pod); err != nil {
Failf("Failed to create %s pod: %v", pod.Name, err) Failf("Failed to create %s pod: %v", pod.Name, err)

View File

@ -138,7 +138,7 @@ func checkExistingRCRecovers(f Framework) {
pods, err := podClient.List(rcSelector, fields.Everything()) pods, err := podClient.List(rcSelector, fields.Everything())
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items { for _, pod := range pods.Items {
if api.IsPodReady(&pod) { if pod.DeletionTimestamp == nil && api.IsPodReady(&pod) {
return true, nil return true, nil
} }
} }

View File

@ -86,6 +86,8 @@ func (f *Framework) afterEach() {
// Note that we don't wait for any cleanup to propagate, which means // Note that we don't wait for any cleanup to propagate, which means
// that if you delete a bunch of pods right before ending your test, // that if you delete a bunch of pods right before ending your test,
// you may or may not see the killing/deletion/cleanup events. // you may or may not see the killing/deletion/cleanup events.
dumpAllPodInfo(f.Client)
} }
// Check whether all nodes are ready after the test. // Check whether all nodes are ready after the test.

View File

@ -92,6 +92,9 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
return nil, err return nil, err
} }
for _, pod := range podList.Items { for _, pod := range podList.Items {
if pod.DeletionTimestamp != nil {
continue
}
expectedPods = append(expectedPods, string(pod.UID)) expectedPods = append(expectedPods, string(pod.UID))
} }
} }

View File

@ -190,7 +190,7 @@ var _ = Describe("Pod Disks", func() {
By("cleaning up PD-RW test environment") By("cleaning up PD-RW test environment")
// Teardown pods, PD. Ignore errors. // Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed. // Teardown should do nothing unless test failed.
podClient.Delete(host0Pod.Name, nil) podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
detachPD(host0Name, diskName) detachPD(host0Name, diskName)
deletePD(diskName) deletePD(diskName)
}() }()
@ -221,7 +221,7 @@ var _ = Describe("Pod Disks", func() {
verifyPDContentsViaContainer(framework, host0Pod.Name, containerName, fileAndContentToVerify) verifyPDContentsViaContainer(framework, host0Pod.Name, containerName, fileAndContentToVerify)
By("deleting host0Pod") By("deleting host0Pod")
expectNoError(podClient.Delete(host0Pod.Name, nil), "Failed to delete host0Pod") expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
} }
By(fmt.Sprintf("deleting PD %q", diskName)) By(fmt.Sprintf("deleting PD %q", diskName))

View File

@ -21,8 +21,6 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util"
@ -88,41 +86,24 @@ func ServeImageOrFail(f *Framework, test string, image string) {
// Cleanup the replication controller when we are done. // Cleanup the replication controller when we are done.
defer func() { defer func() {
// Resize the replication controller to zero to get rid of pods. // Resize the replication controller to zero to get rid of pods.
By("Cleaning up the replication controller") if err := DeleteRC(f.Client, f.Namespace.Name, controller.Name); err != nil {
rcReaper, err := kubectl.ReaperFor("ReplicationController", f.Client, nil)
if err != nil {
Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
} }
if _, err = rcReaper.Stop(f.Namespace.Name, controller.Name, 0, nil); err != nil {
Logf("Failed to stop replication controller %v: %v.", controller.Name, err)
}
}() }()
// List the pods, making sure we observe all the replicas. // List the pods, making sure we observe all the replicas.
listTimeout := time.Minute
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := f.Client.Pods(f.Namespace.Name).List(label, fields.Everything())
Expect(err).NotTo(HaveOccurred()) pods, err := podsCreated(f.Client, f.Namespace.Name, name, replicas)
t := time.Now()
for {
Logf("Controller %s: Found %d pods out of %d", name, len(pods.Items), replicas)
if len(pods.Items) == replicas {
break
}
if time.Since(t) > listTimeout {
Failf("Controller %s: Gave up waiting for %d pods to come up after seeing only %d pods after %v seconds",
name, replicas, len(pods.Items), time.Since(t).Seconds())
}
time.Sleep(5 * time.Second)
pods, err = f.Client.Pods(f.Namespace.Name).List(label, fields.Everything())
Expect(err).NotTo(HaveOccurred())
}
By("Ensuring each pod is running") By("Ensuring each pod is running")
// Wait for the pods to enter the running state. Waiting loops until the pods // Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test. // are running so non-running pods cause a timeout for this test.
for _, pod := range pods.Items { for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
err = f.WaitForPodRunning(pod.Name) err = f.WaitForPodRunning(pod.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }

View File

@ -255,8 +255,17 @@ func podsCreated(c *client.Client, ns, name string, replicas int) (*api.PodList,
return nil, err return nil, err
} }
Logf("Pod name %s: Found %d pods out of %d", name, len(pods.Items), replicas) created := []api.Pod{}
if len(pods.Items) == replicas { for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
created = append(created, pod)
}
Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
if len(created) == replicas {
pods.Items = created
return pods, nil return pods, nil
} }
} }
@ -416,6 +425,9 @@ var _ = Describe("Nodes", func() {
if err := deleteNS(c, ns); err != nil { if err := deleteNS(c, ns); err != nil {
Failf("Couldn't delete namespace '%s', %v", ns, err) Failf("Couldn't delete namespace '%s', %v", ns, err)
} }
if err := deleteTestingNS(c); err != nil {
Failf("Couldn't delete testing namespaces '%s', %v", ns, err)
}
}) })
Describe("Resize", func() { Describe("Resize", func() {

View File

@ -1372,7 +1372,7 @@ func verifyServeHostnameServiceUp(c *client.Client, host string, expectedPods []
passed = true passed = true
break break
} }
Logf("Expected pods: %v, got: %v", expectedPods, pods) Logf("Waiting for expected pods for %s: %v, got: %v", serviceIP, expectedPods, pods)
} }
if !passed { if !passed {
return fmt.Errorf("service verification failed for:\n %s", cmd) return fmt.Errorf("service verification failed for:\n %s", cmd)

View File

@ -1235,6 +1235,8 @@ func RunRC(config RCConfig) error {
for oldRunning != config.Replicas { for oldRunning != config.Replicas {
time.Sleep(interval) time.Sleep(interval)
terminating := 0
running := 0 running := 0
waiting := 0 waiting := 0
pending := 0 pending := 0
@ -1244,10 +1246,13 @@ func RunRC(config RCConfig) error {
containerRestartNodes := util.NewStringSet() containerRestartNodes := util.NewStringSet()
pods := podStore.List() pods := podStore.List()
if config.CreatedPods != nil { created := []*api.Pod{}
*config.CreatedPods = pods
}
for _, p := range pods { for _, p := range pods {
if p.DeletionTimestamp != nil {
terminating++
continue
}
created = append(created, p)
if p.Status.Phase == api.PodRunning { if p.Status.Phase == api.PodRunning {
running++ running++
for _, v := range FailedContainers(p) { for _, v := range FailedContainers(p) {
@ -1266,9 +1271,13 @@ func RunRC(config RCConfig) error {
unknown++ unknown++
} }
} }
pods = created
if config.CreatedPods != nil {
*config.CreatedPods = pods
}
Logf("%v %v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d unknown ", Logf("%v %v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown ",
time.Now(), rc.Name, len(pods), config.Replicas, running, pending, waiting, inactive, unknown) time.Now(), rc.Name, len(pods), config.Replicas, running, pending, waiting, inactive, terminating, unknown)
promPushRunningPending(running, pending) promPushRunningPending(running, pending)
@ -1332,6 +1341,16 @@ func dumpPodDebugInfo(c *client.Client, pods []*api.Pod) {
dumpNodeDebugInfo(c, badNodes.List()) dumpNodeDebugInfo(c, badNodes.List())
} }
func dumpAllPodInfo(c *client.Client) {
pods, err := c.Pods("").List(labels.Everything(), fields.Everything())
if err != nil {
Logf("unable to fetch pod debug info: %v", err)
}
for _, pod := range pods.Items {
Logf("Pod %s %s node=%s, deletionTimestamp=%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
}
}
func dumpNodeDebugInfo(c *client.Client, nodeNames []string) { func dumpNodeDebugInfo(c *client.Client, nodeNames []string) {
for _, n := range nodeNames { for _, n := range nodeNames {
Logf("\nLogging kubelet events for node %v", n) Logf("\nLogging kubelet events for node %v", n)
@ -1442,9 +1461,29 @@ func DeleteRC(c *client.Client, ns, name string) error {
_, err = reaper.Stop(ns, name, 0, api.NewDeleteOptions(0)) _, err = reaper.Stop(ns, name, 0, api.NewDeleteOptions(0))
deleteRCTime := time.Now().Sub(startTime) deleteRCTime := time.Now().Sub(startTime)
Logf("Deleting RC took: %v", deleteRCTime) Logf("Deleting RC took: %v", deleteRCTime)
if err == nil {
err = waitForRCPodsGone(c, ns, name)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRCTime
Logf("Terminating RC pods took: %v", terminatePodTime)
return err return err
} }
// waitForRCPodsGone waits until there are no pods reported under an RC's selector (because the pods
// have completed termination).
func waitForRCPodsGone(c *client.Client, ns, name string) error {
rc, err := c.ReplicationControllers(ns).Get(name)
if err != nil {
return err
}
return wait.Poll(poll, singleCallTimeout, func() (bool, error) {
if pods, err := c.Pods(ns).List(labels.SelectorFromSet(rc.Spec.Selector), fields.Everything()); err == nil && len(pods.Items) == 0 {
return true, nil
}
return false, nil
})
}
// Convenient wrapper around listing nodes supporting retries. // Convenient wrapper around listing nodes supporting retries.
func listNodes(c *client.Client, label labels.Selector, field fields.Selector) (*api.NodeList, error) { func listNodes(c *client.Client, label labels.Selector, field fields.Selector) (*api.NodeList, error) {
var nodes *api.NodeList var nodes *api.NodeList
@ -1606,7 +1645,6 @@ func getSigner(provider string) (ssh.Signer, error) {
return nil, fmt.Errorf("getSigner(...) not implemented for %s", provider) return nil, fmt.Errorf("getSigner(...) not implemented for %s", provider)
} }
key := filepath.Join(keydir, keyfile) key := filepath.Join(keydir, keyfile)
Logf("Using SSH key: %s", key)
return util.MakePrivateKeySignerFromFile(key) return util.MakePrivateKeySignerFromFile(key)
} }