From fc0955c26acbaecdd256443076843cbc7507d02f Mon Sep 17 00:00:00 2001 From: Francesco Romani Date: Thu, 15 Oct 2020 17:31:25 +0200 Subject: [PATCH] e2e: topomgr: use deletePodSync for faster delete Previously the code used to delete pods serially. In this patch we factor out code to do that in parallel, using goroutines. This shaves some time in the e2e tm test run with no intended changes in behaviour. Signed-off-by: Francesco Romani --- test/e2e_node/podresources_test.go | 15 +----------- test/e2e_node/topology_manager_test.go | 34 +++++++++++++++++--------- 2 files changed, 23 insertions(+), 26 deletions(-) diff --git a/test/e2e_node/podresources_test.go b/test/e2e_node/podresources_test.go index b4190d75e84..2384cd9d68c 100644 --- a/test/e2e_node/podresources_test.go +++ b/test/e2e_node/podresources_test.go @@ -21,7 +21,6 @@ import ( "fmt" "io/ioutil" "strings" - "sync" "time" v1 "k8s.io/api/core/v1" @@ -148,19 +147,7 @@ func (tpd *testPodData) createPodsForTest(f *framework.Framework, podReqs []podD /* deletePodsForTest clean up all the pods run for a testcase. Must ensure proper cleanup */ func (tpd *testPodData) deletePodsForTest(f *framework.Framework) { - podNS := f.Namespace.Name - var wg sync.WaitGroup - for podName := range tpd.PodMap { - wg.Add(1) - go func(podName string) { - defer ginkgo.GinkgoRecover() - defer wg.Done() - - deletePodSyncByName(f, podName) - waitForAllContainerRemoval(podName, podNS) - }(podName) - } - wg.Wait() + deletePodsAsync(f, tpd.PodMap) } /* deletePod removes pod during a test. Should do a best-effort clean up */ diff --git a/test/e2e_node/topology_manager_test.go b/test/e2e_node/topology_manager_test.go index 21a80d7c028..1e3b338c5b4 100644 --- a/test/e2e_node/topology_manager_test.go +++ b/test/e2e_node/topology_manager_test.go @@ -24,6 +24,7 @@ import ( "regexp" "strconv" "strings" + "sync" "time" testutils "k8s.io/kubernetes/test/utils" @@ -414,7 +415,7 @@ func waitForAllContainerRemoval(podName, podNS string) { } func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) { - var pods []*v1.Pod + podMap := make(map[string]*v1.Pod) for podID := 0; podID < numPods; podID++ { podName := fmt.Sprintf("gu-pod-%d", podID) @@ -422,30 +423,39 @@ func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttr pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs) pod = f.PodClient().CreateSync(pod) framework.Logf("created pod %s", podName) - pods = append(pods, pod) + podMap[podName] = pod } // per https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/693-topology-manager/README.md#multi-numa-systems-tests // we can do a menaingful validation only when using the single-numa node policy if envInfo.policy == topologymanager.PolicySingleNumaNode { - for podID := 0; podID < numPods; podID++ { - validatePodAlignment(f, pods[podID], envInfo) + for _, pod := range podMap { + validatePodAlignment(f, pod, envInfo) } if envInfo.scope == podScopeTopology { - for podID := 0; podID < numPods; podID++ { - err := validatePodAlignmentWithPodScope(f, pods[podID], envInfo) + for _, pod := range podMap { + err := validatePodAlignmentWithPodScope(f, pod, envInfo) framework.ExpectNoError(err) } } } - for podID := 0; podID < numPods; podID++ { - pod := pods[podID] - framework.Logf("deleting the pod %s/%s and waiting for container removal", - pod.Namespace, pod.Name) - deletePodSyncByName(f, pod.Name) - waitForAllContainerRemoval(pod.Name, pod.Namespace) + deletePodsAsync(f, podMap) +} + +func deletePodsAsync(f *framework.Framework, podMap map[string]*v1.Pod) { + var wg sync.WaitGroup + for _, pod := range podMap { + wg.Add(1) + go func(podNS, podName string) { + defer ginkgo.GinkgoRecover() + defer wg.Done() + + deletePodSyncByName(f, podName) + waitForAllContainerRemoval(podName, podNS) + }(pod.Namespace, pod.Name) } + wg.Wait() } func runTopologyManagerNegativeTest(f *framework.Framework, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {