Delete resources immediately from e2e tests

This commit is contained in:
Clayton Coleman
2015-05-25 15:37:21 -04:00
parent 984692d205
commit f1eaa8a27b
4 changed files with 41 additions and 30 deletions

View File

@@ -205,7 +205,7 @@ func getUDData(jpgExpected string, ns string) func(*client.Client, string) error
if strings.Contains(data.Image, jpgExpected) { if strings.Contains(data.Image, jpgExpected) {
return nil return nil
} else { } else {
return errors.New(fmt.Sprintf("data served up in container is innaccurate, %s didn't contain %s", data, jpgExpected)) return errors.New(fmt.Sprintf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected))
} }
} }
} }

View File

@@ -82,8 +82,8 @@ var _ = Describe("PD", func() {
By("cleaning up PD-RW test environment") By("cleaning up PD-RW test environment")
// Teardown pods, PD. Ignore errors. // Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed. // Teardown should do nothing unless test failed.
podClient.Delete(host0Pod.Name, nil) podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
podClient.Delete(host1Pod.Name, nil) podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0))
detachPD(host0Name, diskName) detachPD(host0Name, diskName)
detachPD(host1Name, diskName) detachPD(host1Name, diskName)
deletePD(diskName) deletePD(diskName)
@@ -96,7 +96,7 @@ var _ = Describe("PD", func() {
expectNoError(waitForPodRunning(c, host0Pod.Name)) expectNoError(waitForPodRunning(c, host0Pod.Name))
By("deleting host0Pod") By("deleting host0Pod")
expectNoError(podClient.Delete(host0Pod.Name, nil), "Failed to delete host0Pod") expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
By("submitting host1Pod to kubernetes") By("submitting host1Pod to kubernetes")
_, err = podClient.Create(host1Pod) _, err = podClient.Create(host1Pod)
@@ -105,7 +105,7 @@ var _ = Describe("PD", func() {
expectNoError(waitForPodRunning(c, host1Pod.Name)) expectNoError(waitForPodRunning(c, host1Pod.Name))
By("deleting host1Pod") By("deleting host1Pod")
expectNoError(podClient.Delete(host1Pod.Name, nil), "Failed to delete host1Pod") expectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod")
By(fmt.Sprintf("deleting PD %q", diskName)) By(fmt.Sprintf("deleting PD %q", diskName))
for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) {
@@ -142,9 +142,9 @@ var _ = Describe("PD", func() {
By("cleaning up PD-RO test environment") By("cleaning up PD-RO test environment")
// Teardown pods, PD. Ignore errors. // Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed. // Teardown should do nothing unless test failed.
podClient.Delete(rwPod.Name, nil) podClient.Delete(rwPod.Name, api.NewDeleteOptions(0))
podClient.Delete(host0ROPod.Name, nil) podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0))
podClient.Delete(host1ROPod.Name, nil) podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0))
detachPD(host0Name, diskName) detachPD(host0Name, diskName)
detachPD(host1Name, diskName) detachPD(host1Name, diskName)
@@ -155,7 +155,7 @@ var _ = Describe("PD", func() {
_, err = podClient.Create(rwPod) _, err = podClient.Create(rwPod)
expectNoError(err, "Failed to create rwPod") expectNoError(err, "Failed to create rwPod")
expectNoError(waitForPodRunning(c, rwPod.Name)) expectNoError(waitForPodRunning(c, rwPod.Name))
expectNoError(podClient.Delete(rwPod.Name, nil), "Failed to delete host0Pod") expectNoError(podClient.Delete(rwPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
By("submitting host0ROPod to kubernetes") By("submitting host0ROPod to kubernetes")
_, err = podClient.Create(host0ROPod) _, err = podClient.Create(host0ROPod)
@@ -170,10 +170,10 @@ var _ = Describe("PD", func() {
expectNoError(waitForPodRunning(c, host1ROPod.Name)) expectNoError(waitForPodRunning(c, host1ROPod.Name))
By("deleting host0ROPod") By("deleting host0ROPod")
expectNoError(podClient.Delete(host0ROPod.Name, nil), "Failed to delete host0ROPod") expectNoError(podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0ROPod")
By("deleting host1ROPod") By("deleting host1ROPod")
expectNoError(podClient.Delete(host1ROPod.Name, nil), "Failed to delete host1ROPod") expectNoError(podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host1ROPod")
By(fmt.Sprintf("deleting PD %q", diskName)) By(fmt.Sprintf("deleting PD %q", diskName))
for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) { for start := time.Now(); time.Since(start) < 180*time.Second; time.Sleep(5 * time.Second) {

View File

@@ -55,7 +55,7 @@ func runLivenessTest(c *client.Client, podDescr *api.Pod, expectRestart bool) {
// At the end of the test, clean up by removing the pod. // At the end of the test, clean up by removing the pod.
defer func() { defer func() {
By("deleting the pod") By("deleting the pod")
c.Pods(ns).Delete(podDescr.Name, nil) c.Pods(ns).Delete(podDescr.Name, api.NewDeleteOptions(0))
}() }()
// Wait until the pod is not pending. (Here we need to check for something other than // Wait until the pod is not pending. (Here we need to check for something other than
@@ -101,7 +101,7 @@ func testHostIP(c *client.Client, pod *api.Pod) {
podClient := c.Pods(ns) podClient := c.Pods(ns)
By("creating pod") By("creating pod")
defer podClient.Delete(pod.Name, nil) defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
_, err = podClient.Create(pod) _, err = podClient.Create(pod)
if err != nil { if err != nil {
Fail(fmt.Sprintf("Failed to create pod: %v", err)) Fail(fmt.Sprintf("Failed to create pod: %v", err))
@@ -205,7 +205,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with // We call defer here in case there is a problem with
// the test so we can ensure that we clean up after // the test so we can ensure that we clean up after
// ourselves // ourselves
defer podClient.Delete(pod.Name, nil) defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
_, err = podClient.Create(pod) _, err = podClient.Create(pod)
if err != nil { if err != nil {
Fail(fmt.Sprintf("Failed to create pod: %v", err)) Fail(fmt.Sprintf("Failed to create pod: %v", err))
@@ -218,7 +218,7 @@ var _ = Describe("Pods", func() {
} }
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
By("veryfying pod creation was observed") By("verifying pod creation was observed")
select { select {
case event, _ := <-w.ResultChan(): case event, _ := <-w.ResultChan():
if event.Type != watch.Added { if event.Type != watch.Added {
@@ -228,22 +228,21 @@ var _ = Describe("Pods", func() {
Fail("Timeout while waiting for pod creation") Fail("Timeout while waiting for pod creation")
} }
By("deleting the pod") By("deleting the pod gracefully")
podClient.Delete(pod.Name, nil) if err := podClient.Delete(pod.Name, nil); err != nil {
pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything()) Fail(fmt.Sprintf("Failed to observe pod deletion: %v", err))
if err != nil {
Fail(fmt.Sprintf("Failed to delete pod: %v", err))
} }
Expect(len(pods.Items)).To(Equal(0))
By("veryfying pod deletion was observed") By("verifying pod deletion was observed")
deleted := false deleted := false
timeout := false timeout := false
var lastPod *api.Pod
timer := time.After(podStartTimeout) timer := time.After(podStartTimeout)
for !deleted && !timeout { for !deleted && !timeout {
select { select {
case event, _ := <-w.ResultChan(): case event, _ := <-w.ResultChan():
if event.Type == watch.Deleted { if event.Type == watch.Deleted {
lastPod = event.Object.(*api.Pod)
deleted = true deleted = true
} }
case <-timer: case <-timer:
@@ -253,6 +252,14 @@ var _ = Describe("Pods", func() {
if !deleted { if !deleted {
Fail("Failed to observe pod deletion") Fail("Failed to observe pod deletion")
} }
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
pods, err = podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})), fields.Everything())
if err != nil {
Fail(fmt.Sprintf("Failed to delete pod: %v", err))
}
Expect(len(pods.Items)).To(Equal(0))
}) })
It("should be updated", func() { It("should be updated", func() {
@@ -292,7 +299,7 @@ var _ = Describe("Pods", func() {
By("submitting the pod to kubernetes") By("submitting the pod to kubernetes")
defer func() { defer func() {
By("deleting the pod") By("deleting the pod")
podClient.Delete(pod.Name, nil) podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}() }()
pod, err := podClient.Create(pod) pod, err := podClient.Create(pod)
if err != nil { if err != nil {
@@ -356,7 +363,7 @@ var _ = Describe("Pods", func() {
}, },
}, },
} }
defer c.Pods(api.NamespaceDefault).Delete(serverPod.Name, nil) defer c.Pods(api.NamespaceDefault).Delete(serverPod.Name, api.NewDeleteOptions(0))
_, err := c.Pods(api.NamespaceDefault).Create(serverPod) _, err := c.Pods(api.NamespaceDefault).Create(serverPod)
if err != nil { if err != nil {
Fail(fmt.Sprintf("Failed to create serverPod: %v", err)) Fail(fmt.Sprintf("Failed to create serverPod: %v", err))
@@ -547,7 +554,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with // We call defer here in case there is a problem with
// the test so we can ensure that we clean up after // the test so we can ensure that we clean up after
// ourselves // ourselves
podClient.Delete(pod.Name) podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}() }()
By("waiting for the pod to start running") By("waiting for the pod to start running")
@@ -620,7 +627,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with // We call defer here in case there is a problem with
// the test so we can ensure that we clean up after // the test so we can ensure that we clean up after
// ourselves // ourselves
podClient.Delete(pod.Name) podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}() }()
By("waiting for the pod to start running") By("waiting for the pod to start running")

View File

@@ -409,20 +409,24 @@ func expectNoError(err error, explain ...interface{}) {
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...) ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
} }
// Stops everything from filePath from namespace ns and checks if everything maching selectors from the given namespace is correctly stopped. // Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
func cleanup(filePath string, ns string, selectors ...string) { func cleanup(filePath string, ns string, selectors ...string) {
By("using stop to clean up resources") By("using delete to clean up resources")
var nsArg string var nsArg string
if ns != "" { if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns) nsArg = fmt.Sprintf("--namespace=%s", ns)
} }
runKubectl("stop", "-f", filePath, nsArg) runKubectl("stop", "--grace-period=0", "-f", filePath, nsArg)
for _, selector := range selectors { for _, selector := range selectors {
resources := runKubectl("get", "pods,rc,se", "-l", selector, "--no-headers", nsArg) resources := runKubectl("get", "rc,se", "-l", selector, "--no-headers", nsArg)
if resources != "" { if resources != "" {
Failf("Resources left running after stop:\n%s", resources) Failf("Resources left running after stop:\n%s", resources)
} }
pods := runKubectl("get", "pods", "-l", selector, nsArg, "-t", "{{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
Failf("Pods left unterminated after stop:\n%s", pods)
}
} }
} }