From ad73b325f30b5b72792a94a4ca6bc633a2ae6875 Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Thu, 2 Feb 2017 08:51:01 -0800 Subject: [PATCH] changed density test to use volumes, and include deletion before logging --- test/e2e_node/density_test.go | 18 ++++----- test/e2e_node/resource_collector.go | 56 ++++++++++++++++++++-------- test/e2e_node/resource_usage_test.go | 2 +- test/e2e_node/restart_test.go | 2 +- 4 files changed, 52 insertions(+), 26 deletions(-) diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index e5cb6fc7206..08162f28fb2 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -324,7 +324,7 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg ) // create test pod data structure - pods := newTestPods(testArg.podsNr, framework.GetPauseImageNameForHostArch(), podType) + pods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), podType) // the controller watches the change of pod status controller := newInformerWatchPod(f, mutex, watchTimes, podType) @@ -338,9 +338,6 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg time.Sleep(sleepBeforeCreatePods) rc.Start() - // Explicitly delete pods to prevent namespace controller cleanning up timeout - defer deletePodsSync(f, append(pods, getCadvisorPod())) - defer rc.Stop() By("Creating a batch of pods") // It returns a map['pod name']'creation time' containing the creation timestamps @@ -387,6 +384,9 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg sort.Sort(framework.LatencySlice(e2eLags)) batchLag := lastRunning.Time.Sub(firstCreate.Time) + rc.Stop() + deletePodsSync(f, append(pods, getCadvisorPod())) + // Log time series data. if isLogTimeSeries { logDensityTimeSeries(rc, createTimes, watchTimes, testInfo) @@ -403,8 +403,8 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de podType = "density_test_pod" sleepBeforeCreatePods = 30 * time.Second ) - bgPods := newTestPods(testArg.bgPodsNr, framework.GetPauseImageNameForHostArch(), "background_pod") - testPods := newTestPods(testArg.podsNr, framework.GetPauseImageNameForHostArch(), podType) + bgPods := newTestPods(testArg.bgPodsNr, true, framework.GetPauseImageNameForHostArch(), "background_pod") + testPods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), podType) By("Creating a batch of background pods") @@ -414,13 +414,13 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de time.Sleep(sleepBeforeCreatePods) rc.Start() - // Explicitly delete pods to prevent namespace controller cleanning up timeout - defer deletePodsSync(f, append(bgPods, append(testPods, getCadvisorPod())...)) - defer rc.Stop() // Create pods sequentially (back-to-back). e2eLags have been sorted. batchlag, e2eLags := createBatchPodSequential(f, testPods) + rc.Stop() + deletePodsSync(f, append(bgPods, append(testPods, getCadvisorPod())...)) + // Log throughput data. logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testInfo) diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index a474cabb5c1..3409df7432a 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -386,7 +386,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) { } // newTestPods creates a list of pods (specification) for test. -func newTestPods(numPods int, imageName, podType string) []*v1.Pod { +func newTestPods(numPods int, volume bool, imageName, podType string) []*v1.Pod { var pods []*v1.Pod for i := 0; i < numPods; i++ { podName := "test-" + string(uuid.NewUUID()) @@ -394,22 +394,48 @@ func newTestPods(numPods int, imageName, podType string) []*v1.Pod { "type": podType, "name": podName, } - pods = append(pods, - &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Labels: labels, - }, - Spec: v1.PodSpec{ - // Restart policy is always (default). - Containers: []v1.Container{ - { - Image: imageName, - Name: podName, + if volume { + pods = append(pods, + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Labels: labels, + }, + Spec: v1.PodSpec{ + // Restart policy is always (default). + Containers: []v1.Container{ + { + Image: imageName, + Name: podName, + VolumeMounts: []v1.VolumeMount{ + {MountPath: "/test-volume-mnt", Name: podName + "-volume"}, + }, + }, + }, + Volumes: []v1.Volume{ + {Name: podName + "-volume", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}}, }, }, - }, - }) + }) + } else { + pods = append(pods, + &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Labels: labels, + }, + Spec: v1.PodSpec{ + // Restart policy is always (default). + Containers: []v1.Container{ + { + Image: imageName, + Name: podName, + }, + }, + }, + }) + } + } return pods } diff --git a/test/e2e_node/resource_usage_test.go b/test/e2e_node/resource_usage_test.go index 41d478ff6c9..66643f1aade 100644 --- a/test/e2e_node/resource_usage_test.go +++ b/test/e2e_node/resource_usage_test.go @@ -142,7 +142,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg // sleep for an interval here to measure steady data sleepAfterCreatePods = 10 * time.Second ) - pods := newTestPods(testArg.podsNr, framework.GetPauseImageNameForHostArch(), "test_pod") + pods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), "test_pod") rc.Start() // Explicitly delete pods to prevent namespace controller cleanning up timeout diff --git a/test/e2e_node/restart_test.go b/test/e2e_node/restart_test.go index 572634cc7aa..50adc82ec26 100644 --- a/test/e2e_node/restart_test.go +++ b/test/e2e_node/restart_test.go @@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() { Context("Network", func() { It("should recover from ip leak", func() { - pods := newTestPods(podCount, framework.GetPauseImageNameForHostArch(), "restart-docker-test") + pods := newTestPods(podCount, false, framework.GetPauseImageNameForHostArch(), "restart-docker-test") By(fmt.Sprintf("Trying to create %d pods on node", len(pods))) createBatchPodWithRateControl(f, pods, podCreationInterval) defer deletePodsSync(f, pods)