Merge pull request #31716 from coufon/explicitly_delete_pods_in_node_perf_test

Automatic merge from submit-queue

Explicitly delete pods in node performance tests

This PR explicitly deletes all created pods at the end in node e2e performance related tests.

The large number of pods may cause namespace cleanup times out (in #30878), therefore we explicitly delete all pods for cleaning up.
This commit is contained in:
Kubernetes Submit Queue 2016-08-30 16:52:03 -07:00 committed by GitHub
commit 2b755dc480
3 changed files with 17 additions and 28 deletions

View File

@ -19,7 +19,6 @@ limitations under the License.
package e2e_node package e2e_node
import ( import (
"errors"
"fmt" "fmt"
"sort" "sort"
"strconv" "strconv"
@ -27,7 +26,6 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
controllerframework "k8s.io/kubernetes/pkg/controller/framework" controllerframework "k8s.io/kubernetes/pkg/controller/framework"
@ -67,15 +65,12 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
ns = f.Namespace.Name ns = f.Namespace.Name
nodeName = framework.TestContext.NodeName nodeName = framework.TestContext.NodeName
// Start a standalone cadvisor pod using 'createSync', the pod is running when it returns // Start a standalone cadvisor pod using 'createSync', the pod is running when it returns
createCadvisorPod(f) f.PodClient().CreateSync(getCadvisorPod())
// Resource collector monitors fine-grain CPU/memory usage by a standalone Cadvisor with // Resource collector monitors fine-grain CPU/memory usage by a standalone Cadvisor with
// 1s housingkeeping interval // 1s housingkeeping interval
rc = NewResourceCollector(containerStatsPollingPeriod) rc = NewResourceCollector(containerStatsPollingPeriod)
}) })
AfterEach(func() {
})
Context("create a batch of pods", func() { Context("create a batch of pods", func() {
// TODO(coufon): the values are generous, set more precise limits with benchmark data // TODO(coufon): the values are generous, set more precise limits with benchmark data
// and add more tests // and add more tests
@ -296,6 +291,8 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
time.Sleep(sleepBeforeCreatePods) time.Sleep(sleepBeforeCreatePods)
rc.Start() rc.Start()
// Explicitly delete pods to prevent namespace controller cleanning up timeout
defer deletePodsSync(f, append(pods, getCadvisorPod()))
defer rc.Stop() defer rc.Stop()
By("Creating a batch of pods") By("Creating a batch of pods")
@ -371,6 +368,8 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
time.Sleep(sleepBeforeCreatePods) time.Sleep(sleepBeforeCreatePods)
rc.Start() rc.Start()
// Explicitly delete pods to prevent namespace controller cleanning up timeout
defer deletePodsSync(f, append(bgPods, append(testPods, getCadvisorPod())...))
defer rc.Stop() defer rc.Stop()
// Create pods sequentially (back-to-back). e2eLags have been sorted. // Create pods sequentially (back-to-back). e2eLags have been sorted.
@ -394,16 +393,6 @@ func createBatchPodWithRateControl(f *framework.Framework, pods []*api.Pod, inte
return createTimes return createTimes
} }
// checkPodDeleted checks whether a pod has been successfully deleted
func checkPodDeleted(f *framework.Framework, podName string) error {
ns := f.Namespace.Name
_, err := f.Client.Pods(ns).Get(podName)
if apierrors.IsNotFound(err) {
return nil
}
return errors.New("Pod Not Deleted")
}
// getPodStartLatency gets prometheus metric 'pod start latency' from kubelet // getPodStartLatency gets prometheus metric 'pod start latency' from kubelet
func getPodStartLatency(node string) (framework.KubeletLatencyMetrics, error) { func getPodStartLatency(node string) (framework.KubeletLatencyMetrics, error) {
latencyMetrics := framework.KubeletLatencyMetrics{} latencyMetrics := framework.KubeletLatencyMetrics{}

View File

@ -292,8 +292,8 @@ func formatCPUSummary(summary framework.ContainersCPUSummary) string {
} }
// createCadvisorPod creates a standalone cadvisor pod for fine-grain resource monitoring. // createCadvisorPod creates a standalone cadvisor pod for fine-grain resource monitoring.
func createCadvisorPod(f *framework.Framework) { func getCadvisorPod() *api.Pod {
f.PodClient().CreateSync(&api.Pod{ return &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: cadvisorPodName, Name: cadvisorPodName,
}, },
@ -363,24 +363,22 @@ func createCadvisorPod(f *framework.Framework) {
}, },
}, },
}, },
}) }
} }
// deleteBatchPod deletes a batch of pods (synchronous). // deletePodsSync deletes a list of pods and block until pods disappear.
func deleteBatchPod(f *framework.Framework, pods []*api.Pod) { func deletePodsSync(f *framework.Framework, pods []*api.Pod) {
ns := f.Namespace.Name
var wg sync.WaitGroup var wg sync.WaitGroup
for _, pod := range pods { for _, pod := range pods {
wg.Add(1) wg.Add(1)
go func(pod *api.Pod) { go func(pod *api.Pod) {
defer wg.Done() defer wg.Done()
err := f.Client.Pods(ns).Delete(pod.ObjectMeta.Name, api.NewDeleteOptions(30)) err := f.PodClient().Delete(pod.ObjectMeta.Name, api.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(framework.WaitForPodToDisappear(f.Client, ns, pod.ObjectMeta.Name, labels.Everything(), Expect(framework.WaitForPodToDisappear(f.Client, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)). 30*time.Second, 10*time.Minute)).NotTo(HaveOccurred())
NotTo(HaveOccurred())
}(pod) }(pod)
} }
wg.Wait() wg.Wait()

View File

@ -52,7 +52,7 @@ var _ = framework.KubeDescribe("Resource-usage [Serial] [Slow]", func() {
// The Cadvsior of Kubelet has a housekeeping interval of 10s, which is too long to // The Cadvsior of Kubelet has a housekeeping interval of 10s, which is too long to
// show the resource usage spikes. But changing its interval increases the overhead // show the resource usage spikes. But changing its interval increases the overhead
// of kubelet. Hence we use a Cadvisor pod. // of kubelet. Hence we use a Cadvisor pod.
createCadvisorPod(f) f.PodClient().CreateSync(getCadvisorPod())
rc = NewResourceCollector(containerStatsPollingPeriod) rc = NewResourceCollector(containerStatsPollingPeriod)
}) })
@ -135,12 +135,14 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
// sleep for an interval here to measure steady data // sleep for an interval here to measure steady data
sleepAfterCreatePods = 10 * time.Second sleepAfterCreatePods = 10 * time.Second
) )
pods := newTestPods(testArg.podsNr, ImageRegistry[pauseImage], "test_pod")
rc.Start() rc.Start()
// Explicitly delete pods to prevent namespace controller cleanning up timeout
defer deletePodsSync(f, append(pods, getCadvisorPod()))
defer rc.Stop() defer rc.Stop()
By("Creating a batch of Pods") By("Creating a batch of Pods")
pods := newTestPods(testArg.podsNr, ImageRegistry[pauseImage], "test_pod")
f.PodClient().CreateBatch(pods) f.PodClient().CreateBatch(pods)
// wait for a while to let the node be steady // wait for a while to let the node be steady