diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 9a4e3382f93..755589c1578 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -130,14 +130,16 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { ginkgo.By("Waiting for devices to become available on the local node") gomega.Eventually(func() bool { - return numberOfSampleResources(getLocalNode(f)) > 0 + node, ready := getLocalTestNode(f) + return ready && numberOfSampleResources(node) > 0 }, 5*time.Minute, framework.Poll).Should(gomega.BeTrue()) framework.Logf("Successfully created device plugin pod") ginkgo.By("Waiting for the resource exported by the sample device plugin to become available on the local node") gomega.Eventually(func() bool { - node := getLocalNode(f) - return numberOfDevicesCapacity(node, resourceName) == devsLen && + node, ready := getLocalTestNode(f) + return ready && + numberOfDevicesCapacity(node, resourceName) == devsLen && numberOfDevicesAllocatable(node, resourceName) == devsLen }, 30*time.Second, framework.Poll).Should(gomega.BeTrue()) }) @@ -162,8 +164,11 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { ginkgo.By("Waiting for devices to become unavailable on the local node") gomega.Eventually(func() bool { - return numberOfSampleResources(getLocalNode(f)) <= 0 + node, ready := getLocalTestNode(f) + return ready && numberOfSampleResources(node) <= 0 }, 5*time.Minute, framework.Poll).Should(gomega.BeTrue()) + + ginkgo.By("devices now unavailable on the local node") }) ginkgo.It("Can schedule a pod that requires a device", func() { @@ -284,8 +289,9 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { ginkgo.By("Waiting for resource to become available on the local node after re-registration") gomega.Eventually(func() bool { - node := getLocalNode(f) - return numberOfDevicesCapacity(node, resourceName) == devsLen && + node, ready := getLocalTestNode(f) + return ready && + numberOfDevicesCapacity(node, resourceName) == devsLen && numberOfDevicesAllocatable(node, resourceName) == devsLen }, 30*time.Second, framework.Poll).Should(gomega.BeTrue()) diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go index 25185255c94..220fc3b15d6 100644 --- a/test/e2e_node/util.go +++ b/test/e2e_node/util.go @@ -254,6 +254,19 @@ func getLocalNode(f *framework.Framework) *v1.Node { return &nodeList.Items[0] } +// getLocalTestNode fetches the node object describing the local worker node set up by the e2e_node infra, alongside with its ready state. +// getLocalTestNode is a variant of `getLocalNode` which reports but does not set any requirement about the node readiness state, letting +// the caller decide. The check is intentionally done like `getLocalNode` does. +// Note `getLocalNode` aborts (as in ginkgo.Expect) the test implicitly if the worker node is not ready. +func getLocalTestNode(f *framework.Framework) (*v1.Node, bool) { + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + ready := e2enode.IsNodeReady(node) + schedulable := e2enode.IsNodeSchedulable(node) + framework.Logf("node %q ready=%v schedulable=%v", node.Name, ready, schedulable) + return node, ready && schedulable +} + // logKubeletLatencyMetrics logs KubeletLatencyMetrics computed from the Prometheus // metrics exposed on the current node and identified by the metricNames. // The Kubelet subsystem prefix is automatically prepended to these metric names.