diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 9e4493f3a21..c579c7a5ed8 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -220,7 +220,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { restartTime := time.Now() ginkgo.By("Restarting Kubelet") - restartKubelet() + restartKubelet(true) // We need to wait for node to be ready before re-registering stub device plugin. // Otherwise, Kubelet DeviceManager may remove the re-registered sockets after it starts. diff --git a/test/e2e_node/gpu_device_plugin_test.go b/test/e2e_node/gpu_device_plugin_test.go index 20b410b3326..54f787846da 100644 --- a/test/e2e_node/gpu_device_plugin_test.go +++ b/test/e2e_node/gpu_device_plugin_test.go @@ -103,7 +103,7 @@ var _ = SIGDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin][NodeFeat f.PodClient().DeleteSync(p.Name, metav1.DeleteOptions{}, 2*time.Minute) } - restartKubelet() + restartKubelet(true) ginkgo.By("Waiting for GPUs to become unavailable on the local node") gomega.Eventually(func() bool { @@ -142,7 +142,7 @@ var _ = SIGDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin][NodeFeat framework.ExpectEqual(devIDRestart1, devID1) ginkgo.By("Restarting Kubelet") - restartKubelet() + restartKubelet(true) framework.WaitForAllNodesSchedulable(f.ClientSet, 30*time.Minute) ginkgo.By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.") @@ -172,7 +172,7 @@ var _ = SIGDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin][NodeFeat } ginkgo.By("Restarting Kubelet") - restartKubelet() + restartKubelet(true) ginkgo.By("Confirming that after a kubelet and pod restart, GPU assignment is kept") ensurePodContainerRestart(f, p1.Name, p1.Name) @@ -181,7 +181,7 @@ var _ = SIGDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin][NodeFeat ginkgo.By("Restarting Kubelet and creating another pod") - restartKubelet() + restartKubelet(true) framework.WaitForAllNodesSchedulable(f.ClientSet, 30*time.Minute) ensurePodContainerRestart(f, p1.Name, p1.Name) diff --git a/test/e2e_node/hugepages_test.go b/test/e2e_node/hugepages_test.go index e62376d8c38..74c715333cc 100644 --- a/test/e2e_node/hugepages_test.go +++ b/test/e2e_node/hugepages_test.go @@ -207,7 +207,7 @@ var _ = SIGDescribe("HugePages [Serial] [Feature:HugePages][NodeSpecialFeature:H framework.ExpectEqual(value.String(), "9Mi", "huge pages with size 3Mi should be supported") ginkgo.By("restarting the node and verifying that huge pages with size 3Mi are not supported") - restartKubelet() + restartKubelet(true) ginkgo.By("verifying that the hugepages-3Mi resource no longer is present") gomega.Eventually(func() bool { diff --git a/test/e2e_node/memory_manager_test.go b/test/e2e_node/memory_manager_test.go index eee9674de48..6bdfd3a5340 100644 --- a/test/e2e_node/memory_manager_test.go +++ b/test/e2e_node/memory_manager_test.go @@ -348,7 +348,7 @@ var _ = SIGDescribe("Memory Manager [Serial] [Feature:MemoryManager]", func() { return kubeletHealthCheck(kubeletHealthCheckURL) }, time.Minute, time.Second).Should(gomega.BeFalse()) - restartKubelet() + restartKubelet(false) // wait until the kubelet health check will pass gomega.Eventually(func() bool { diff --git a/test/e2e_node/podresources_test.go b/test/e2e_node/podresources_test.go index 591060c8af2..3a010e0f0c0 100644 --- a/test/e2e_node/podresources_test.go +++ b/test/e2e_node/podresources_test.go @@ -731,7 +731,7 @@ var _ = SIGDescribe("POD Resources [Serial] [Feature:PodResources][NodeFeature:P expectPodResources(1, cli, []podDesc{desc}) ginkgo.By("Restarting Kubelet") - restartKubelet() + restartKubelet(true) framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout) expectPodResources(1, cli, []podDesc{desc}) tpd.deletePodsForTest(f) diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index 5a3599ee7fc..411b8e02529 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -414,8 +414,8 @@ func findKubletServiceName(running bool) string { return kubeletServiceName } -func restartKubelet() { - kubeletServiceName := findKubletServiceName(false) +func restartKubelet(running bool) { + kubeletServiceName := findKubletServiceName(running) // reset the kubelet service start-limit-hit stdout, err := exec.Command("sudo", "systemctl", "reset-failed", kubeletServiceName).CombinedOutput() framework.ExpectNoError(err, "Failed to reset kubelet start-limit-hit with systemctl: %v, %s", err, string(stdout))