e2e: topomgr: use deletePodSync for faster delete

Previously the code used to delete pods serially.
In this patch we factor out code to do that in parallel,
using goroutines.

This shaves some time in the e2e tm test run with no intended
changes in behaviour.

Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
Francesco Romani 2020-10-15 17:31:25 +02:00
parent 0c6d922812
commit fc0955c26a
2 changed files with 23 additions and 26 deletions

View File

@ -21,7 +21,6 @@ import (
"fmt"
"io/ioutil"
"strings"
"sync"
"time"
v1 "k8s.io/api/core/v1"
@ -148,19 +147,7 @@ func (tpd *testPodData) createPodsForTest(f *framework.Framework, podReqs []podD
/* deletePodsForTest clean up all the pods run for a testcase. Must ensure proper cleanup */
func (tpd *testPodData) deletePodsForTest(f *framework.Framework) {
podNS := f.Namespace.Name
var wg sync.WaitGroup
for podName := range tpd.PodMap {
wg.Add(1)
go func(podName string) {
defer ginkgo.GinkgoRecover()
defer wg.Done()
deletePodSyncByName(f, podName)
waitForAllContainerRemoval(podName, podNS)
}(podName)
}
wg.Wait()
deletePodsAsync(f, tpd.PodMap)
}
/* deletePod removes pod during a test. Should do a best-effort clean up */

View File

@ -24,6 +24,7 @@ import (
"regexp"
"strconv"
"strings"
"sync"
"time"
testutils "k8s.io/kubernetes/test/utils"
@ -414,7 +415,7 @@ func waitForAllContainerRemoval(podName, podNS string) {
}
func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {
var pods []*v1.Pod
podMap := make(map[string]*v1.Pod)
for podID := 0; podID < numPods; podID++ {
podName := fmt.Sprintf("gu-pod-%d", podID)
@ -422,30 +423,39 @@ func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttr
pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs)
pod = f.PodClient().CreateSync(pod)
framework.Logf("created pod %s", podName)
pods = append(pods, pod)
podMap[podName] = pod
}
// per https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/693-topology-manager/README.md#multi-numa-systems-tests
// we can do a menaingful validation only when using the single-numa node policy
if envInfo.policy == topologymanager.PolicySingleNumaNode {
for podID := 0; podID < numPods; podID++ {
validatePodAlignment(f, pods[podID], envInfo)
for _, pod := range podMap {
validatePodAlignment(f, pod, envInfo)
}
if envInfo.scope == podScopeTopology {
for podID := 0; podID < numPods; podID++ {
err := validatePodAlignmentWithPodScope(f, pods[podID], envInfo)
for _, pod := range podMap {
err := validatePodAlignmentWithPodScope(f, pod, envInfo)
framework.ExpectNoError(err)
}
}
}
for podID := 0; podID < numPods; podID++ {
pod := pods[podID]
framework.Logf("deleting the pod %s/%s and waiting for container removal",
pod.Namespace, pod.Name)
deletePodSyncByName(f, pod.Name)
waitForAllContainerRemoval(pod.Name, pod.Namespace)
deletePodsAsync(f, podMap)
}
func deletePodsAsync(f *framework.Framework, podMap map[string]*v1.Pod) {
var wg sync.WaitGroup
for _, pod := range podMap {
wg.Add(1)
go func(podNS, podName string) {
defer ginkgo.GinkgoRecover()
defer wg.Done()
deletePodSyncByName(f, podName)
waitForAllContainerRemoval(podName, podNS)
}(pod.Namespace, pod.Name)
}
wg.Wait()
}
func runTopologyManagerNegativeTest(f *framework.Framework, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {