e2e_node: fix tests after Kubelet dynamic configuration removal

- CPU manager
- Memory Manager
- Topology Manager

Signed-off-by: Artyom Lukianov <alukiano@redhat.com>
This commit is contained in:
Artyom Lukianov 2021-11-07 18:01:14 +02:00
parent d92a443ca7
commit 117141eee3
5 changed files with 22 additions and 21 deletions

View File

@ -228,7 +228,7 @@ func configureCPUManagerInKubelet(oldCfg *kubeletconfig.KubeletConfiguration, ku
}
}
return oldCfg
return newCfg
}
func runGuPodTest(f *framework.Framework, cpuCount int) {
@ -532,8 +532,10 @@ func runCPUManagerTests(f *framework.Framework) {
ginkgo.BeforeEach(func() {
var err error
oldCfg, err = getCurrentKubeletConfig()
framework.ExpectNoError(err)
if oldCfg == nil {
oldCfg, err = getCurrentKubeletConfig()
framework.ExpectNoError(err)
}
})
ginkgo.It("should assign CPUs as expected based on the Pod spec", func() {

View File

@ -85,7 +85,7 @@ func registerNodeFlags(flags *flag.FlagSet) {
// It is hard and unnecessary to deal with the complexity inside the test suite.
flags.BoolVar(&framework.TestContext.NodeConformance, "conformance", false, "If true, the test suite will not start kubelet, and fetch system log (kernel, docker, kubelet log etc.) to the report directory.")
flags.BoolVar(&framework.TestContext.PrepullImages, "prepull-images", true, "If true, prepull images so image pull failures do not cause test failures.")
flags.BoolVar(&framework.TestContext.RestartKubelet, "restart-kubelet", true, "If true, restart Kubelet unit when the process is killed.")
flags.BoolVar(&framework.TestContext.RestartKubelet, "restart-kubelet", false, "If true, restart Kubelet unit when the process is killed.")
flags.StringVar(&framework.TestContext.ImageDescription, "image-description", "", "The description of the image which the test will be running on.")
flags.StringVar(&framework.TestContext.SystemSpecName, "system-spec-name", "", "The name of the system spec (e.g., gke) that's used in the node e2e test. The system specs are in test/e2e_node/system/specs/. This is used by the test framework to determine which tests to run for validating the system requirements.")
flags.Var(cliflag.NewMapStringString(&framework.TestContext.ExtraEnvs), "extra-envs", "The extra environment variables needed for node e2e tests. Format: a list of key=value pairs, e.g., env1=val1,env2=val2")

View File

@ -53,6 +53,7 @@ const (
resourceMemory = "memory"
staticPolicy = "Static"
nonePolicy = "None"
hugepages2MiCount = 8
)
// Helper for makeMemoryManagerPod().
@ -318,23 +319,22 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
if len(allNUMANodes) == 0 {
allNUMANodes = getAllNUMANodes()
}
})
// dynamically update the kubelet configuration
ginkgo.JustBeforeEach(func() {
hugepagesCount := 8
// allocate hugepages
if *is2MiHugepagesSupported {
ginkgo.By("Configuring hugepages")
gomega.Eventually(func() error {
return configureHugePages(hugepagesSize2M, hugepagesCount)
return configureHugePages(hugepagesSize2M, hugepages2MiCount)
}, 30*time.Second, framework.Poll).Should(gomega.BeNil())
}
})
restartKubelet(true)
// dynamically update the kubelet configuration
ginkgo.JustBeforeEach(func() {
// allocate hugepages
if *is2MiHugepagesSupported {
ginkgo.By("Waiting for hugepages resource to become available on the local node")
waitingForHugepages(hugepagesCount)
waitingForHugepages(hugepages2MiCount)
for i := 0; i < len(ctnParams); i++ {
ctnParams[i].hugepages2Mi = "8Mi"
@ -358,10 +358,6 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
gomega.Eventually(func() error {
return configureHugePages(hugepagesSize2M, 0)
}, 90*time.Second, 15*time.Second).ShouldNot(gomega.HaveOccurred(), "failed to release hugepages")
restartKubelet(true)
waitingForHugepages(0)
}
})

View File

@ -923,7 +923,7 @@ func runTopologyManagerTests(f *framework.Framework) {
configMap := getSRIOVDevicePluginConfigMap(framework.TestContext.SriovdpConfigMapFile)
oldCfg, err := getCurrentKubeletConfig()
oldCfg, err = getCurrentKubeletConfig()
framework.ExpectNoError(err)
policy := topologymanager.PolicySingleNumaNode
@ -936,8 +936,10 @@ func runTopologyManagerTests(f *framework.Framework) {
})
ginkgo.AfterEach(func() {
// restore kubelet config
updateKubeletConfig(f, oldCfg, true)
if oldCfg != nil {
// restore kubelet config
updateKubeletConfig(f, oldCfg, true)
}
})
}

View File

@ -163,7 +163,8 @@ func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(ini
var oldCfg *kubeletconfig.KubeletConfiguration
ginkgo.BeforeEach(func() {
oldCfg, err := getCurrentKubeletConfig()
var err error
oldCfg, err = getCurrentKubeletConfig()
framework.ExpectNoError(err)
newCfg := oldCfg.DeepCopy()