Update tests to prepare for graceful deletion

For cases where we want to immediately cleanup the pod, start using
gracePeriod 0 in test cases.
This commit is contained in:
Clayton Coleman 2015-08-19 22:09:57 -04:00
parent 3fce3433d9
commit 9267f829eb
9 changed files with 47 additions and 40 deletions

View File

@ -867,7 +867,7 @@ func runSchedulerNoPhantomPodsTest(client *client.Client) {
// Delete a pod to free up room. // Delete a pod to free up room.
glog.Infof("Deleting pod %v", bar.Name) glog.Infof("Deleting pod %v", bar.Name)
err = client.Pods(api.NamespaceDefault).Delete(bar.Name, nil) err = client.Pods(api.NamespaceDefault).Delete(bar.Name, api.NewDeleteOptions(0))
if err != nil { if err != nil {
glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err) glog.Fatalf("FAILED: couldn't delete pod %q: %v", bar.Name, err)
} }
@ -878,7 +878,11 @@ func runSchedulerNoPhantomPodsTest(client *client.Client) {
glog.Fatalf("Failed to create pod: %v, %v", pod, err) glog.Fatalf("Failed to create pod: %v, %v", pod, err)
} }
if err := wait.Poll(time.Second, time.Second*60, podRunning(client, baz.Namespace, baz.Name)); err != nil { if err := wait.Poll(time.Second, time.Second*60, podRunning(client, baz.Namespace, baz.Name)); err != nil {
glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: %v", err) if pod, perr := client.Pods(api.NamespaceDefault).Get("phantom.bar"); perr == nil {
glog.Fatalf("FAILED: 'phantom.bar' was never deleted: %#v", pod)
} else {
glog.Fatalf("FAILED: (Scheduler probably didn't process deletion of 'phantom.bar') Pod never started running: %v", err)
}
} }
glog.Info("Scheduler doesn't make phantom pods: test passed.") glog.Info("Scheduler doesn't make phantom pods: test passed.")

View File

@ -23,7 +23,7 @@ readonly red=$(tput setaf 1)
readonly green=$(tput setaf 2) readonly green=$(tput setaf 2)
kube::test::clear_all() { kube::test::clear_all() {
kubectl delete "${kube_flags[@]}" rc,pods --all kubectl delete "${kube_flags[@]}" rc,pods --all --grace-period=0
} }
kube::test::get_object_assert() { kube::test::get_object_assert() {

View File

@ -246,7 +246,7 @@ runTests() {
# Pre-condition: valid-pod POD is running # Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command # Command
kubectl delete pod valid-pod "${kube_flags[@]}" kubectl delete pod valid-pod "${kube_flags[@]}" --grace-period=0
# Post-condition: no POD is running # Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
@ -262,7 +262,7 @@ runTests() {
# Pre-condition: valid-pod POD is running # Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command # Command
kubectl delete -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" kubectl delete -f docs/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" --grace-period=0
# Post-condition: no POD is running # Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
@ -278,7 +278,7 @@ runTests() {
# Pre-condition: valid-pod POD is running # Pre-condition: valid-pod POD is running
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' 'valid-pod:' kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' 'valid-pod:'
# Command # Command
kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" --grace-period=0
# Post-condition: no POD is running # Post-condition: no POD is running
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' '' kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
@ -310,7 +310,7 @@ runTests() {
# Pre-condition: valid-pod POD is running # Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command # Command
kubectl delete --all pods "${kube_flags[@]}" # --all remove all the pods kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 # --all remove all the pods
# Post-condition: no POD is running # Post-condition: no POD is running
kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' '' kube::test::get_object_assert "pods -l'name in (valid-pod)'" '{{range.items}}{{$id_field}}:{{end}}' ''
@ -327,7 +327,7 @@ runTests() {
# Pre-condition: valid-pod and redis-proxy PODs are running # Pre-condition: valid-pod and redis-proxy PODs are running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:' kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
# Command # Command
kubectl delete pods valid-pod redis-proxy "${kube_flags[@]}" # delete multiple pods at once kubectl delete pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 # delete multiple pods at once
# Post-condition: no POD is running # Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
@ -344,7 +344,7 @@ runTests() {
# Pre-condition: valid-pod and redis-proxy PODs are running # Pre-condition: valid-pod and redis-proxy PODs are running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:' kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'redis-proxy:valid-pod:'
# Command # Command
kubectl stop pods valid-pod redis-proxy "${kube_flags[@]}" # stop multiple pods at once kubectl stop pods valid-pod redis-proxy "${kube_flags[@]}" --grace-period=0 # stop multiple pods at once
# Post-condition: no POD is running # Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
@ -368,7 +368,7 @@ runTests() {
# Pre-condition: valid-pod POD is running # Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command # Command
kubectl delete pods -lnew-name=new-valid-pod "${kube_flags[@]}" kubectl delete pods -lnew-name=new-valid-pod --grace-period=0 "${kube_flags[@]}"
# Post-condition: no POD is running # Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
@ -419,7 +419,7 @@ runTests() {
# Pre-condition: valid-pod POD is running # Pre-condition: valid-pod POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command # Command
kubectl delete pods -l'name in (valid-pod-super-sayan)' "${kube_flags[@]}" kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 "${kube_flags[@]}"
# Post-condition: no POD is running # Post-condition: no POD is running
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
@ -455,7 +455,7 @@ runTests() {
# Pre-condition: valid-pod POD is running # Pre-condition: valid-pod POD is running
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command # Command
kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod kubectl delete "${kube_flags[@]}" pod --namespace=other valid-pod --grace-period=0
# Post-condition: no POD is running # Post-condition: no POD is running
kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" '' kube::test::get_object_assert 'pods --namespace=other' "{{range.items}}{{$id_field}}:{{end}}" ''

View File

@ -791,7 +791,7 @@ func getUDData(jpgExpected string, ns string) func(*client.Client, string) error
if strings.Contains(data.Image, jpgExpected) { if strings.Contains(data.Image, jpgExpected) {
return nil return nil
} else { } else {
return errors.New(fmt.Sprintf("data served up in container is innaccurate, %s didn't contain %s", data, jpgExpected)) return errors.New(fmt.Sprintf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected))
} }
} }
} }

View File

@ -78,8 +78,8 @@ var _ = Describe("Pod Disks", func() {
By("cleaning up PD-RW test environment") By("cleaning up PD-RW test environment")
// Teardown pods, PD. Ignore errors. // Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed. // Teardown should do nothing unless test failed.
podClient.Delete(host0Pod.Name, nil) podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
podClient.Delete(host1Pod.Name, nil) podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0))
detachPD(host0Name, diskName) detachPD(host0Name, diskName)
detachPD(host1Name, diskName) detachPD(host1Name, diskName)
deletePD(diskName) deletePD(diskName)
@ -98,7 +98,7 @@ var _ = Describe("Pod Disks", func() {
Logf("Wrote value: %v", testFileContents) Logf("Wrote value: %v", testFileContents)
By("deleting host0Pod") By("deleting host0Pod")
expectNoError(podClient.Delete(host0Pod.Name, nil), "Failed to delete host0Pod") expectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
By("submitting host1Pod to kubernetes") By("submitting host1Pod to kubernetes")
_, err = podClient.Create(host1Pod) _, err = podClient.Create(host1Pod)
@ -113,7 +113,7 @@ var _ = Describe("Pod Disks", func() {
Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(testFileContents))) Expect(strings.TrimSpace(v)).To(Equal(strings.TrimSpace(testFileContents)))
By("deleting host1Pod") By("deleting host1Pod")
expectNoError(podClient.Delete(host1Pod.Name, nil), "Failed to delete host1Pod") expectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod")
By(fmt.Sprintf("deleting PD %q", diskName)) By(fmt.Sprintf("deleting PD %q", diskName))
deletePDWithRetry(diskName) deletePDWithRetry(diskName)
@ -136,9 +136,9 @@ var _ = Describe("Pod Disks", func() {
By("cleaning up PD-RO test environment") By("cleaning up PD-RO test environment")
// Teardown pods, PD. Ignore errors. // Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed. // Teardown should do nothing unless test failed.
podClient.Delete(rwPod.Name, nil) podClient.Delete(rwPod.Name, api.NewDeleteOptions(0))
podClient.Delete(host0ROPod.Name, nil) podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0))
podClient.Delete(host1ROPod.Name, nil) podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0))
detachPD(host0Name, diskName) detachPD(host0Name, diskName)
detachPD(host1Name, diskName) detachPD(host1Name, diskName)
@ -149,7 +149,7 @@ var _ = Describe("Pod Disks", func() {
_, err = podClient.Create(rwPod) _, err = podClient.Create(rwPod)
expectNoError(err, "Failed to create rwPod") expectNoError(err, "Failed to create rwPod")
expectNoError(framework.WaitForPodRunning(rwPod.Name)) expectNoError(framework.WaitForPodRunning(rwPod.Name))
expectNoError(podClient.Delete(rwPod.Name, nil), "Failed to delete host0Pod") expectNoError(podClient.Delete(rwPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
expectNoError(waitForPDDetach(diskName, host0Name)) expectNoError(waitForPDDetach(diskName, host0Name))
By("submitting host0ROPod to kubernetes") By("submitting host0ROPod to kubernetes")
@ -165,10 +165,10 @@ var _ = Describe("Pod Disks", func() {
expectNoError(framework.WaitForPodRunning(host1ROPod.Name)) expectNoError(framework.WaitForPodRunning(host1ROPod.Name))
By("deleting host0ROPod") By("deleting host0ROPod")
expectNoError(podClient.Delete(host0ROPod.Name, nil), "Failed to delete host0ROPod") expectNoError(podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0ROPod")
By("deleting host1ROPod") By("deleting host1ROPod")
expectNoError(podClient.Delete(host1ROPod.Name, nil), "Failed to delete host1ROPod") expectNoError(podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host1ROPod")
By(fmt.Sprintf("deleting PD %q", diskName)) By(fmt.Sprintf("deleting PD %q", diskName))
deletePDWithRetry(diskName) deletePDWithRetry(diskName)

View File

@ -43,7 +43,7 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectResta
// At the end of the test, clean up by removing the pod. // At the end of the test, clean up by removing the pod.
defer func() { defer func() {
By("deleting the pod") By("deleting the pod")
c.Pods(ns).Delete(podDescr.Name, nil) c.Pods(ns).Delete(podDescr.Name, api.NewDeleteOptions(0))
}() }()
// Wait until the pod is not pending. (Here we need to check for something other than // Wait until the pod is not pending. (Here we need to check for something other than
@ -86,15 +86,14 @@ func runLivenessTest(c *client.Client, ns string, podDescr *api.Pod, expectResta
func testHostIP(c *client.Client, ns string, pod *api.Pod) { func testHostIP(c *client.Client, ns string, pod *api.Pod) {
podClient := c.Pods(ns) podClient := c.Pods(ns)
By("creating pod") By("creating pod")
defer podClient.Delete(pod.Name, nil) defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
_, err := podClient.Create(pod) if _, err := podClient.Create(pod); err != nil {
if err != nil {
Failf("Failed to create pod: %v", err) Failf("Failed to create pod: %v", err)
} }
By("ensuring that pod is running and has a hostIP") By("ensuring that pod is running and has a hostIP")
// Wait for the pods to enter the running state. Waiting loops until the pods // Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test. // are running so non-running pods cause a timeout for this test.
err = waitForPodRunningInNamespace(c, pod.Name, ns) err := waitForPodRunningInNamespace(c, pod.Name, ns)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Try to make sure we get a hostIP for each pod. // Try to make sure we get a hostIP for each pod.
hostIPTimeout := 2 * time.Minute hostIPTimeout := 2 * time.Minute
@ -222,7 +221,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with // We call defer here in case there is a problem with
// the test so we can ensure that we clean up after // the test so we can ensure that we clean up after
// ourselves // ourselves
defer podClient.Delete(pod.Name, nil) defer podClient.Delete(pod.Name, api.NewDeleteOptions(0))
_, err = podClient.Create(pod) _, err = podClient.Create(pod)
if err != nil { if err != nil {
Failf("Failed to create pod: %v", err) Failf("Failed to create pod: %v", err)
@ -235,7 +234,7 @@ var _ = Describe("Pods", func() {
} }
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
By("veryfying pod creation was observed") By("verifying pod creation was observed")
select { select {
case event, _ := <-w.ResultChan(): case event, _ := <-w.ResultChan():
if event.Type != watch.Added { if event.Type != watch.Added {
@ -312,7 +311,7 @@ var _ = Describe("Pods", func() {
By("submitting the pod to kubernetes") By("submitting the pod to kubernetes")
defer func() { defer func() {
By("deleting the pod") By("deleting the pod")
podClient.Delete(pod.Name, nil) podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}() }()
pod, err := podClient.Create(pod) pod, err := podClient.Create(pod)
if err != nil { if err != nil {
@ -376,7 +375,7 @@ var _ = Describe("Pods", func() {
}, },
}, },
} }
defer framework.Client.Pods(framework.Namespace.Name).Delete(serverPod.Name, nil) defer framework.Client.Pods(framework.Namespace.Name).Delete(serverPod.Name, api.NewDeleteOptions(0))
_, err := framework.Client.Pods(framework.Namespace.Name).Create(serverPod) _, err := framework.Client.Pods(framework.Namespace.Name).Create(serverPod)
if err != nil { if err != nil {
Failf("Failed to create serverPod: %v", err) Failf("Failed to create serverPod: %v", err)
@ -600,7 +599,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with // We call defer here in case there is a problem with
// the test so we can ensure that we clean up after // the test so we can ensure that we clean up after
// ourselves // ourselves
podClient.Delete(pod.Name) podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}() }()
By("waiting for the pod to start running") By("waiting for the pod to start running")
@ -673,7 +672,7 @@ var _ = Describe("Pods", func() {
// We call defer here in case there is a problem with // We call defer here in case there is a problem with
// the test so we can ensure that we clean up after // the test so we can ensure that we clean up after
// ourselves // ourselves
podClient.Delete(pod.Name) podClient.Delete(pod.Name, api.NewDeleteOptions(0))
}() }()
By("waiting for the pod to start running") By("waiting for the pod to start running")

View File

@ -831,20 +831,24 @@ func expectNoError(err error, explain ...interface{}) {
ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...) ExpectWithOffset(1, err).NotTo(HaveOccurred(), explain...)
} }
// Stops everything from filePath from namespace ns and checks if everything maching selectors from the given namespace is correctly stopped. // Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
func cleanup(filePath string, ns string, selectors ...string) { func cleanup(filePath string, ns string, selectors ...string) {
By("using stop to clean up resources") By("using delete to clean up resources")
var nsArg string var nsArg string
if ns != "" { if ns != "" {
nsArg = fmt.Sprintf("--namespace=%s", ns) nsArg = fmt.Sprintf("--namespace=%s", ns)
} }
runKubectl("stop", "-f", filePath, nsArg) runKubectl("stop", "--grace-period=0", "-f", filePath, nsArg)
for _, selector := range selectors { for _, selector := range selectors {
resources := runKubectl("get", "pods,rc,svc", "-l", selector, "--no-headers", nsArg) resources := runKubectl("get", "rc,svc", "-l", selector, "--no-headers", nsArg)
if resources != "" { if resources != "" {
Failf("Resources left running after stop:\n%s", resources) Failf("Resources left running after stop:\n%s", resources)
} }
pods := runKubectl("get", "pods", "-l", selector, nsArg, "-t", "{{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
if pods != "" {
Failf("Pods left unterminated after stop:\n%s", pods)
}
} }
} }

View File

@ -232,7 +232,7 @@ var deleteNow string = `
{ {
"kind": "DeleteOptions", "kind": "DeleteOptions",
"apiVersion": "` + testapi.Version() + `", "apiVersion": "` + testapi.Version() + `",
"gracePeriodSeconds": null%s "gracePeriodSeconds": 0%s
} }
` `

View File

@ -277,7 +277,7 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore
t.Logf("Test %d: Pod got scheduled on a schedulable node", i) t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
} }
err = restClient.Pods(api.NamespaceDefault).Delete(myPod.Name, nil) err = restClient.Pods(api.NamespaceDefault).Delete(myPod.Name, api.NewDeleteOptions(0))
if err != nil { if err != nil {
t.Errorf("Failed to delete pod: %v", err) t.Errorf("Failed to delete pod: %v", err)
} }