mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-18 16:21:13 +00:00
e2e: topomgr: introduce sriov setup/teardown funcs
Reorganize the code with setup and teardown functions, to make room for the future addition of more device plugin support, and to make the code a bit tidier. Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
parent
2f0a6d2c76
commit
70cce5e3f1
@ -598,7 +598,7 @@ func getSRIOVDevicePluginConfigMap(cmFile string) *v1.ConfigMap {
|
|||||||
return readConfigMapV1OrDie(cmData)
|
return readConfigMapV1OrDie(cmData)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap *v1.ConfigMap, reservedSystemCPUs string, numaNodes, coreCount int) {
|
func setupSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) (*v1.Pod, string, int64) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", metav1.NamespaceSystem, configMap.Name))
|
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", metav1.NamespaceSystem, configMap.Name))
|
||||||
@ -632,10 +632,24 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap
|
|||||||
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||||
framework.Logf("Successfully created device plugin pod, detected %d SRIOV device %q", sriovResourceAmount, sriovResourceName)
|
framework.Logf("Successfully created device plugin pod, detected %d SRIOV device %q", sriovResourceAmount, sriovResourceName)
|
||||||
|
|
||||||
|
return dpPod, sriovResourceName, sriovResourceAmount
|
||||||
|
}
|
||||||
|
|
||||||
|
func teardownSRIOVConfigOrFail(f *framework.Framework, dpPod *v1.Pod) {
|
||||||
|
framework.Logf("deleting the SRIOV device plugin pod %s/%s and waiting for container %s removal",
|
||||||
|
dpPod.Namespace, dpPod.Name, dpPod.Spec.Containers[0].Name)
|
||||||
|
deletePodInNamespace(f, dpPod.Namespace, dpPod.Name)
|
||||||
|
waitForContainerRemoval(dpPod.Spec.Containers[0].Name, dpPod.Name, dpPod.Namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap *v1.ConfigMap, reservedSystemCPUs string, numaNodes, coreCount int) {
|
||||||
threadsPerCore := 1
|
threadsPerCore := 1
|
||||||
if isHTEnabled() {
|
if isHTEnabled() {
|
||||||
threadsPerCore = 2
|
threadsPerCore = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dpPod, sriovResourceName, sriovResourceAmount := setupSRIOVConfigOrFail(f, configMap)
|
||||||
|
|
||||||
// could have been a loop, we unroll it to explain the testcases
|
// could have been a loop, we unroll it to explain the testcases
|
||||||
|
|
||||||
// simplest case
|
// simplest case
|
||||||
@ -670,10 +684,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap
|
|||||||
ginkgo.By(fmt.Sprintf("Trying to admit a guaranteed pods, with %d cores, 1 %s device - and it should be rejected", numCores, sriovResourceName))
|
ginkgo.By(fmt.Sprintf("Trying to admit a guaranteed pods, with %d cores, 1 %s device - and it should be rejected", numCores, sriovResourceName))
|
||||||
runTopologyManagerNegativeTest(f, numaNodes, 1, fmt.Sprintf("%dm", numCores*1000), sriovResourceName, "1")
|
runTopologyManagerNegativeTest(f, numaNodes, 1, fmt.Sprintf("%dm", numCores*1000), sriovResourceName, "1")
|
||||||
|
|
||||||
framework.Logf("deleting the SRIOV device plugin pod %s/%s and waiting for container %s removal",
|
teardownSRIOVConfigOrFail(f, dpPod)
|
||||||
dpPod.Namespace, dpPod.Name, dpPod.Spec.Containers[0].Name)
|
|
||||||
deletePodInNamespace(f, dpPod.Namespace, dpPod.Name)
|
|
||||||
waitForContainerRemoval(dpPod.Spec.Containers[0].Name, dpPod.Name, dpPod.Namespace)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func runTopologyManagerTests(f *framework.Framework) {
|
func runTopologyManagerTests(f *framework.Framework) {
|
||||||
|
Loading…
Reference in New Issue
Block a user