mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #95609 from fromanirh/tm-e2e-faster-delete
e2e: topology manager: use deletePodSync for faster delete
This commit is contained in:
commit
92aff21558
@ -21,7 +21,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@ -148,19 +147,7 @@ func (tpd *testPodData) createPodsForTest(f *framework.Framework, podReqs []podD
|
|||||||
|
|
||||||
/* deletePodsForTest clean up all the pods run for a testcase. Must ensure proper cleanup */
|
/* deletePodsForTest clean up all the pods run for a testcase. Must ensure proper cleanup */
|
||||||
func (tpd *testPodData) deletePodsForTest(f *framework.Framework) {
|
func (tpd *testPodData) deletePodsForTest(f *framework.Framework) {
|
||||||
podNS := f.Namespace.Name
|
deletePodsAsync(f, tpd.PodMap)
|
||||||
var wg sync.WaitGroup
|
|
||||||
for podName := range tpd.PodMap {
|
|
||||||
wg.Add(1)
|
|
||||||
go func(podName string) {
|
|
||||||
defer ginkgo.GinkgoRecover()
|
|
||||||
defer wg.Done()
|
|
||||||
|
|
||||||
deletePodSyncByName(f, podName)
|
|
||||||
waitForAllContainerRemoval(podName, podNS)
|
|
||||||
}(podName)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* deletePod removes pod during a test. Should do a best-effort clean up */
|
/* deletePod removes pod during a test. Should do a best-effort clean up */
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
@ -414,7 +415,7 @@ func waitForAllContainerRemoval(podName, podNS string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {
|
func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {
|
||||||
var pods []*v1.Pod
|
podMap := make(map[string]*v1.Pod)
|
||||||
|
|
||||||
for podID := 0; podID < numPods; podID++ {
|
for podID := 0; podID < numPods; podID++ {
|
||||||
podName := fmt.Sprintf("gu-pod-%d", podID)
|
podName := fmt.Sprintf("gu-pod-%d", podID)
|
||||||
@ -422,30 +423,39 @@ func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttr
|
|||||||
pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs)
|
pod := makeTopologyManagerTestPod(podName, ctnAttrs, initCtnAttrs)
|
||||||
pod = f.PodClient().CreateSync(pod)
|
pod = f.PodClient().CreateSync(pod)
|
||||||
framework.Logf("created pod %s", podName)
|
framework.Logf("created pod %s", podName)
|
||||||
pods = append(pods, pod)
|
podMap[podName] = pod
|
||||||
}
|
}
|
||||||
|
|
||||||
// per https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/693-topology-manager/README.md#multi-numa-systems-tests
|
// per https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/693-topology-manager/README.md#multi-numa-systems-tests
|
||||||
// we can do a menaingful validation only when using the single-numa node policy
|
// we can do a menaingful validation only when using the single-numa node policy
|
||||||
if envInfo.policy == topologymanager.PolicySingleNumaNode {
|
if envInfo.policy == topologymanager.PolicySingleNumaNode {
|
||||||
for podID := 0; podID < numPods; podID++ {
|
for _, pod := range podMap {
|
||||||
validatePodAlignment(f, pods[podID], envInfo)
|
validatePodAlignment(f, pod, envInfo)
|
||||||
}
|
}
|
||||||
if envInfo.scope == podScopeTopology {
|
if envInfo.scope == podScopeTopology {
|
||||||
for podID := 0; podID < numPods; podID++ {
|
for _, pod := range podMap {
|
||||||
err := validatePodAlignmentWithPodScope(f, pods[podID], envInfo)
|
err := validatePodAlignmentWithPodScope(f, pod, envInfo)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for podID := 0; podID < numPods; podID++ {
|
deletePodsAsync(f, podMap)
|
||||||
pod := pods[podID]
|
}
|
||||||
framework.Logf("deleting the pod %s/%s and waiting for container removal",
|
|
||||||
pod.Namespace, pod.Name)
|
func deletePodsAsync(f *framework.Framework, podMap map[string]*v1.Pod) {
|
||||||
deletePodSyncByName(f, pod.Name)
|
var wg sync.WaitGroup
|
||||||
waitForAllContainerRemoval(pod.Name, pod.Namespace)
|
for _, pod := range podMap {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(podNS, podName string) {
|
||||||
|
defer ginkgo.GinkgoRecover()
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
deletePodSyncByName(f, podName)
|
||||||
|
waitForAllContainerRemoval(podName, podNS)
|
||||||
|
}(pod.Namespace, pod.Name)
|
||||||
}
|
}
|
||||||
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func runTopologyManagerNegativeTest(f *framework.Framework, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {
|
func runTopologyManagerNegativeTest(f *framework.Framework, ctnAttrs, initCtnAttrs []tmCtnAttribute, envInfo *testEnvInfo) {
|
||||||
|
Loading…
Reference in New Issue
Block a user