mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 05:27:21 +00:00
Merge pull request #96219 from fromanirh/tm-e2e-sriovdp-usage
node: e2e: bring up/down SRIOV DP just once
This commit is contained in:
commit
dcc863a8b3
@ -531,6 +531,16 @@ func setupSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *sr
|
|||||||
}
|
}
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
return &sriovData{
|
||||||
|
configMap: configMap,
|
||||||
|
serviceAccount: serviceAccount,
|
||||||
|
pod: dpPod,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// waitForSRIOVResources waits until enough SRIOV resources are avaailable, expecting to complete within the timeout.
|
||||||
|
// if exits successfully, updates the sriovData with the resources which were found.
|
||||||
|
func waitForSRIOVResources(f *framework.Framework, sd *sriovData) {
|
||||||
sriovResourceName := ""
|
sriovResourceName := ""
|
||||||
var sriovResourceAmount int64
|
var sriovResourceAmount int64
|
||||||
ginkgo.By("Waiting for devices to become available on the local node")
|
ginkgo.By("Waiting for devices to become available on the local node")
|
||||||
@ -539,15 +549,10 @@ func setupSRIOVConfigOrFail(f *framework.Framework, configMap *v1.ConfigMap) *sr
|
|||||||
sriovResourceName, sriovResourceAmount = findSRIOVResource(node)
|
sriovResourceName, sriovResourceAmount = findSRIOVResource(node)
|
||||||
return sriovResourceAmount > minSriovResource
|
return sriovResourceAmount > minSriovResource
|
||||||
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||||
framework.Logf("Successfully created device plugin pod, detected %d SRIOV allocatable devices %q", sriovResourceAmount, sriovResourceName)
|
|
||||||
|
|
||||||
return &sriovData{
|
sd.resourceName = sriovResourceName
|
||||||
configMap: configMap,
|
sd.resourceAmount = sriovResourceAmount
|
||||||
serviceAccount: serviceAccount,
|
framework.Logf("Detected SRIOV allocatable devices name=%q amount=%d", sd.resourceName, sd.resourceAmount)
|
||||||
pod: dpPod,
|
|
||||||
resourceName: sriovResourceName,
|
|
||||||
resourceAmount: sriovResourceAmount,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func teardownSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) {
|
func teardownSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) {
|
||||||
@ -672,14 +677,13 @@ func runTMScopeResourceAlignmentTestSuite(f *framework.Framework, configMap *v1.
|
|||||||
teardownSRIOVConfigOrFail(f, sd)
|
teardownSRIOVConfigOrFail(f, sd)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap *v1.ConfigMap, reservedSystemCPUs, policy string, numaNodes, coreCount int) {
|
func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, sd *sriovData, reservedSystemCPUs, policy string, numaNodes, coreCount int) {
|
||||||
threadsPerCore := 1
|
threadsPerCore := 1
|
||||||
if isHTEnabled() {
|
if isHTEnabled() {
|
||||||
threadsPerCore = 2
|
threadsPerCore = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
sd := setupSRIOVConfigOrFail(f, configMap)
|
waitForSRIOVResources(f, sd)
|
||||||
defer teardownSRIOVConfigOrFail(f, sd)
|
|
||||||
|
|
||||||
envInfo := &testEnvInfo{
|
envInfo := &testEnvInfo{
|
||||||
numaNodes: numaNodes,
|
numaNodes: numaNodes,
|
||||||
@ -855,13 +859,17 @@ func runTopologyManagerTests(f *framework.Framework) {
|
|||||||
var oldCfg *kubeletconfig.KubeletConfiguration
|
var oldCfg *kubeletconfig.KubeletConfiguration
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
|
var policies = []string{
|
||||||
|
topologymanager.PolicySingleNumaNode,
|
||||||
|
topologymanager.PolicyRestricted,
|
||||||
|
topologymanager.PolicyBestEffort,
|
||||||
|
topologymanager.PolicyNone,
|
||||||
|
}
|
||||||
|
|
||||||
ginkgo.It("run Topology Manager policy test suite", func() {
|
ginkgo.It("run Topology Manager policy test suite", func() {
|
||||||
oldCfg, err = getCurrentKubeletConfig()
|
oldCfg, err = getCurrentKubeletConfig()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
var policies = []string{topologymanager.PolicySingleNumaNode, topologymanager.PolicyRestricted,
|
|
||||||
topologymanager.PolicyBestEffort, topologymanager.PolicyNone}
|
|
||||||
|
|
||||||
scope := containerScopeTopology
|
scope := containerScopeTopology
|
||||||
for _, policy := range policies {
|
for _, policy := range policies {
|
||||||
// Configure Topology Manager
|
// Configure Topology Manager
|
||||||
@ -901,8 +909,8 @@ func runTopologyManagerTests(f *framework.Framework) {
|
|||||||
oldCfg, err = getCurrentKubeletConfig()
|
oldCfg, err = getCurrentKubeletConfig()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
var policies = []string{topologymanager.PolicySingleNumaNode, topologymanager.PolicyRestricted,
|
sd := setupSRIOVConfigOrFail(f, configMap)
|
||||||
topologymanager.PolicyBestEffort, topologymanager.PolicyNone}
|
defer teardownSRIOVConfigOrFail(f, sd)
|
||||||
|
|
||||||
scope := containerScopeTopology
|
scope := containerScopeTopology
|
||||||
for _, policy := range policies {
|
for _, policy := range policies {
|
||||||
@ -912,7 +920,7 @@ func runTopologyManagerTests(f *framework.Framework) {
|
|||||||
|
|
||||||
reservedSystemCPUs := configureTopologyManagerInKubelet(f, oldCfg, policy, scope, configMap, numaNodes)
|
reservedSystemCPUs := configureTopologyManagerInKubelet(f, oldCfg, policy, scope, configMap, numaNodes)
|
||||||
|
|
||||||
runTopologyManagerNodeAlignmentSuiteTests(f, configMap, reservedSystemCPUs, policy, numaNodes, coreCount)
|
runTopologyManagerNodeAlignmentSuiteTests(f, sd, reservedSystemCPUs, policy, numaNodes, coreCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
// restore kubelet config
|
// restore kubelet config
|
||||||
|
Loading…
Reference in New Issue
Block a user