Merge pull request #96219 from fromanirh/tm-e2e-sriovdp-usage

node: e2e: bring up/down SRIOV DP just once
This commit is contained in:
Kubernetes Prow Robot 2020-12-08 16:29:53 -08:00 committed by GitHub
commit dcc863a8b3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -531,6 +531,16 @@ func setupSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *sr
}
framework.ExpectNoError(err)
return &sriovData{
configMap: configMap,
serviceAccount: serviceAccount,
pod: dpPod,
}
}
// waitForSRIOVResources waits until enough SRIOV resources are avaailable, expecting to complete within the timeout.
// if exits successfully, updates the sriovData with the resources which were found.
func waitForSRIOVResources(f *framework.Framework, sd *sriovData) {
sriovResourceName := ""
var sriovResourceAmount int64
ginkgo.By("Waiting for devices to become available on the local node")
@ -539,15 +549,10 @@ func setupSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *sr
sriovResourceName, sriovResourceAmount = findSRIOVResource(node)
return sriovResourceAmount > minSriovResource
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrue())
framework.Logf("Successfully created device plugin pod, detected %d SRIOV allocatable devices %q", sriovResourceAmount, sriovResourceName)
return &sriovData{
configMap: configMap,
serviceAccount: serviceAccount,
pod: dpPod,
resourceName: sriovResourceName,
resourceAmount: sriovResourceAmount,
}
sd.resourceName = sriovResourceName
sd.resourceAmount = sriovResourceAmount
framework.Logf("Detected SRIOV allocatable devices name=%q amount=%d", sd.resourceName, sd.resourceAmount)
}
func teardownSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) {
@ -672,14 +677,13 @@ func runTMScopeResourceAlignmentTestSuite(f *framework.Framework, configMap *v1.
teardownSRIOVConfigOrFail(f, sd)
}
func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap *v1.ConfigMap, reservedSystemCPUs, policy string, numaNodes, coreCount int) {
func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriovData, reservedSystemCPUs, policy string, numaNodes, coreCount int) {
threadsPerCore := 1
if isHTEnabled() {
threadsPerCore = 2
}
sd := setupSRIOVConfigOrFail(f, configMap)
defer teardownSRIOVConfigOrFail(f, sd)
waitForSRIOVResources(f, sd)
envInfo := &testEnvInfo{
numaNodes: numaNodes,
@ -855,13 +859,17 @@ func runTopologyManagerTests(f *framework.Framework) {
var oldCfg *kubeletconfig.KubeletConfiguration
var err error
var policies = []string{
topologymanager.PolicySingleNumaNode,
topologymanager.PolicyRestricted,
topologymanager.PolicyBestEffort,
topologymanager.PolicyNone,
}
ginkgo.It("run Topology Manager policy test suite", func() {
oldCfg, err = getCurrentKubeletConfig()
framework.ExpectNoError(err)
var policies = []string{topologymanager.PolicySingleNumaNode, topologymanager.PolicyRestricted,
topologymanager.PolicyBestEffort, topologymanager.PolicyNone}
scope := containerScopeTopology
for _, policy := range policies {
// Configure Topology Manager
@ -901,8 +909,8 @@ func runTopologyManagerTests(f *framework.Framework) {
oldCfg, err = getCurrentKubeletConfig()
framework.ExpectNoError(err)
var policies = []string{topologymanager.PolicySingleNumaNode, topologymanager.PolicyRestricted,
topologymanager.PolicyBestEffort, topologymanager.PolicyNone}
sd := setupSRIOVConfigOrFail(f, configMap)
defer teardownSRIOVConfigOrFail(f, sd)
scope := containerScopeTopology
for _, policy := range policies {
@ -912,7 +920,7 @@ func runTopologyManagerTests(f *framework.Framework) {
reservedSystemCPUs := configureTopologyManagerInKubelet(f, oldCfg, policy, scope, configMap, numaNodes)
runTopologyManagerNodeAlignmentSuiteTests(f, configMap, reservedSystemCPUs, policy, numaNodes, coreCount)
runTopologyManagerNodeAlignmentSuiteTests(f, sd, reservedSystemCPUs, policy, numaNodes, coreCount)
}
// restore kubelet config