e2e: topomgr: use deletePodSync for faster delete

Previously the code used to delete pods serially.
In this patch we factor out code to do that in parallel,
using goroutines.

This shaves some time in the e2e tm test run with no intended
changes in behaviour.

Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
Francesco Romani 2020-10-15 17:31:25 +02:00
parent 0c6d922812
commit fc0955c26a
2 changed files with 23 additions and 26 deletions

View File

@ -21,7 +21,6 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"strings" "strings"
"sync"
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -148,19 +147,7 @@ func (tpd *testPodData) createPodsForTest(f *framework.Framework, podReqs []podD
/* deletePodsForTest clean up all the pods run for a testcase. Must ensure proper cleanup */ /* deletePodsForTest clean up all the pods run for a testcase. Must ensure proper cleanup */
func (tpd *testPodData) deletePodsForTest(f *framework.Framework) { func (tpd *testPodData) deletePodsForTest(f *framework.Framework) {
podNS := f.Namespace.Name deletePodsAsync(f, tpd.PodMap)
var wg sync.WaitGroup
for podName := range tpd.PodMap {
wg.Add(1)
go func(podName string) {
defer ginkgo.GinkgoRecover()
defer wg.Done()
deletePodSyncByName(f, podName)
waitForAllContainerRemoval(podName, podNS)
}(podName)
}
wg.Wait()
} }
/* deletePod removes pod during a test. Should do a best-effort clean up */ /* deletePod removes pod during a test. Should do a best-effort clean up */

View File

@ -24,6 +24,7 @@ import (
"regexp" "regexp"
"strconv" "strconv"
"strings" "strings"
"sync"
"time" "time"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -414,7 +415,7 @@ func waitForAllContainerRemoval(podName, podNS string) {
} }
func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) { func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {
var pods []*v1.Pod podMap := make(map[string]*v1.Pod)
for podID := 0; podID < numPods; podID++ { for podID := 0; podID < numPods; podID++ {
podName := fmt.Sprintf("gu-pod-%d", podID) podName := fmt.Sprintf("gu-pod-%d", podID)
@ -422,30 +423,39 @@ func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttr
pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs) pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs)
pod = f.PodClient().CreateSync(pod) pod = f.PodClient().CreateSync(pod)
framework.Logf("created pod %s", podName) framework.Logf("created pod %s", podName)
pods = append(pods, pod) podMap[podName] = pod
} }
// per https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/693-topology-manager/README.md#multi-numa-systems-tests // per https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/693-topology-manager/README.md#multi-numa-systems-tests
// we can do a menaingful validation only when using the single-numa node policy // we can do a menaingful validation only when using the single-numa node policy
if envInfo.policy == topologymanager.PolicySingleNumaNode { if envInfo.policy == topologymanager.PolicySingleNumaNode {
for podID := 0; podID < numPods; podID++ { for _, pod := range podMap {
validatePodAlignment(f, pods[podID], envInfo) validatePodAlignment(f, pod, envInfo)
} }
if envInfo.scope == podScopeTopology { if envInfo.scope == podScopeTopology {
for podID := 0; podID < numPods; podID++ { for _, pod := range podMap {
err := validatePodAlignmentWithPodScope(f, pods[podID], envInfo) err := validatePodAlignmentWithPodScope(f, pod, envInfo)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
} }
} }
for podID := 0; podID < numPods; podID++ { deletePodsAsync(f, podMap)
pod := pods[podID]
framework.Logf("deleting the pod %s/%s and waiting for container removal",
pod.Namespace, pod.Name)
deletePodSyncByName(f, pod.Name)
waitForAllContainerRemoval(pod.Name, pod.Namespace)
} }
func deletePodsAsync(f *framework.Framework, podMap map[string]*v1.Pod) {
var wg sync.WaitGroup
for _, pod := range podMap {
wg.Add(1)
go func(podNS, podName string) {
defer ginkgo.GinkgoRecover()
defer wg.Done()
deletePodSyncByName(f, podName)
waitForAllContainerRemoval(podName, podNS)
}(pod.Namespace, pod.Name)
}
wg.Wait()
} }
func runTopologyManagerNegativeTest(f *framework.Framework, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) { func runTopologyManagerNegativeTest(f *framework.Framework, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {