mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 21:17:23 +00:00
e2e: topomgr: extend tests to all the policies
Per https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/0035-20190130-topology-manager.md#multi-numa-systems-tests we validate only the results for single-numa node policy, because the is no a simple and reliable way to validate the allocation performed by the other policies. Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
parent
a249b93687
commit
64904d0ab8
@ -180,6 +180,7 @@ func makeEnvMap(logs string) (map[string]string, error) {
|
|||||||
type testEnvInfo struct {
|
type testEnvInfo struct {
|
||||||
numaNodes int
|
numaNodes int
|
||||||
sriovResourceName string
|
sriovResourceName string
|
||||||
|
policy string
|
||||||
}
|
}
|
||||||
|
|
||||||
func containerWantsDevices(cnt *v1.Container, envInfo *testEnvInfo) bool {
|
func containerWantsDevices(cnt *v1.Container, envInfo *testEnvInfo) bool {
|
||||||
|
@ -568,8 +568,12 @@ func runTopologyManagerPositiveTest(f *framework.Framework, numPods int, ctnAttr
|
|||||||
pods = append(pods, pod)
|
pods = append(pods, pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
for podID := 0; podID < numPods; podID++ {
|
// per https://github.com/kubernetes/enhancements/blob/master/keps/sig-node/0035-20190130-topology-manager.md#multi-numa-systems-tests
|
||||||
validatePodAlignment(f, pods[podID], envInfo)
|
// we can do a menaingful validation only when using the single-numa node policy
|
||||||
|
if envInfo.policy == topologymanager.PolicySingleNumaNode {
|
||||||
|
for podID := 0; podID < numPods; podID++ {
|
||||||
|
validatePodAlignment(f, pods[podID], envInfo)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for podID := 0; podID < numPods; podID++ {
|
for podID := 0; podID < numPods; podID++ {
|
||||||
@ -703,7 +707,7 @@ func teardownSRIOVConfigOrFail(f *framework.Framework, sd *sriovData) {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap *v1.ConfigMap, reservedSystemCPUs string, numaNodes, coreCount int) {
|
func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap *v1.ConfigMap, reservedSystemCPUs string, numaNodes, coreCount int, policy string) {
|
||||||
threadsPerCore := 1
|
threadsPerCore := 1
|
||||||
if isHTEnabled() {
|
if isHTEnabled() {
|
||||||
threadsPerCore = 2
|
threadsPerCore = 2
|
||||||
@ -713,6 +717,7 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap
|
|||||||
envInfo := &testEnvInfo{
|
envInfo := &testEnvInfo{
|
||||||
numaNodes: numaNodes,
|
numaNodes: numaNodes,
|
||||||
sriovResourceName: sd.resourceName,
|
sriovResourceName: sd.resourceName,
|
||||||
|
policy: policy,
|
||||||
}
|
}
|
||||||
|
|
||||||
// could have been a loop, we unroll it to explain the testcases
|
// could have been a loop, we unroll it to explain the testcases
|
||||||
@ -859,22 +864,24 @@ func runTopologyManagerNodeAlignmentSuiteTests(f *framework.Framework, configMap
|
|||||||
runTopologyManagerPositiveTest(f, 2, ctnAttrs, envInfo)
|
runTopologyManagerPositiveTest(f, 2, ctnAttrs, envInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
// overflow NUMA node capacity: cores
|
// this is the only policy that can guarantee reliable rejects
|
||||||
numCores := 1 + (threadsPerCore * coreCount)
|
if policy == topologymanager.PolicySingleNumaNode {
|
||||||
excessCoresReq := fmt.Sprintf("%dm", numCores*1000)
|
// overflow NUMA node capacity: cores
|
||||||
ginkgo.By(fmt.Sprintf("Trying to admit a guaranteed pods, with %d cores, 1 %s device - and it should be rejected", numCores, sd.resourceName))
|
numCores := 1 + (threadsPerCore * coreCount)
|
||||||
ctnAttrs = []tmCtnAttribute{
|
excessCoresReq := fmt.Sprintf("%dm", numCores*1000)
|
||||||
{
|
ginkgo.By(fmt.Sprintf("Trying to admit a guaranteed pods, with %d cores, 1 %s device - and it should be rejected", numCores, sd.resourceName))
|
||||||
ctnName: "gu-container",
|
ctnAttrs = []tmCtnAttribute{
|
||||||
cpuRequest: excessCoresReq,
|
{
|
||||||
cpuLimit: excessCoresReq,
|
ctnName: "gu-container",
|
||||||
deviceName: sd.resourceName,
|
cpuRequest: excessCoresReq,
|
||||||
deviceRequest: "1",
|
cpuLimit: excessCoresReq,
|
||||||
deviceLimit: "1",
|
deviceName: sd.resourceName,
|
||||||
},
|
deviceRequest: "1",
|
||||||
|
deviceLimit: "1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
runTopologyManagerNegativeTest(f, 1, ctnAttrs, envInfo)
|
||||||
}
|
}
|
||||||
runTopologyManagerNegativeTest(f, 1, ctnAttrs, envInfo)
|
|
||||||
|
|
||||||
teardownSRIOVConfigOrFail(f, sd)
|
teardownSRIOVConfigOrFail(f, sd)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -927,15 +934,18 @@ func runTopologyManagerTests(f *framework.Framework) {
|
|||||||
oldCfg, err = getCurrentKubeletConfig()
|
oldCfg, err = getCurrentKubeletConfig()
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
policy := topologymanager.PolicySingleNumaNode
|
var policies = []string{topologymanager.PolicySingleNumaNode, topologymanager.PolicyRestricted,
|
||||||
|
topologymanager.PolicyBestEffort, topologymanager.PolicyNone}
|
||||||
|
|
||||||
// Configure Topology Manager
|
for _, policy := range policies {
|
||||||
ginkgo.By(fmt.Sprintf("by configuring Topology Manager policy to %s", policy))
|
// Configure Topology Manager
|
||||||
framework.Logf("Configuring topology Manager policy to %s", policy)
|
ginkgo.By(fmt.Sprintf("by configuring Topology Manager policy to %s", policy))
|
||||||
|
framework.Logf("Configuring topology Manager policy to %s", policy)
|
||||||
|
|
||||||
reservedSystemCPUs := configureTopologyManagerInKubelet(f, oldCfg, policy, configMap, numaNodes)
|
reservedSystemCPUs := configureTopologyManagerInKubelet(f, oldCfg, policy, configMap, numaNodes)
|
||||||
|
|
||||||
runTopologyManagerNodeAlignmentSuiteTests(f, configMap, reservedSystemCPUs, numaNodes, coreCount)
|
runTopologyManagerNodeAlignmentSuiteTests(f, configMap, reservedSystemCPUs, numaNodes, coreCount, policy)
|
||||||
|
}
|
||||||
|
|
||||||
// restore kubelet config
|
// restore kubelet config
|
||||||
setOldKubeletConfig(f, oldCfg)
|
setOldKubeletConfig(f, oldCfg)
|
||||||
|
Loading…
Reference in New Issue
Block a user