Merge pull request #56519 from jiayingz/e2e-node-readiness

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Make sure node is ready before calling getLocalNode to fix test failure.

**What this PR does / why we need it**:

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes https://github.com/kubernetes/kubernetes/issues/56518

**Special notes for your reviewer**:

**Release note**:

```release-note

```
This commit is contained in:
Kubernetes Submit Queue 2017-12-06 18:29:19 -08:00 committed by GitHub
commit f875ee596d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -59,6 +59,8 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
Skip("Nvidia GPUs do not exist on the node. Skipping test.")
}
framework.WaitForAllNodesSchedulable(f.ClientSet, framework.TestContext.NodeSchedulableTimeout)
By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE")
devicePluginPod = f.PodClient().CreateSync(framework.NVIDIADevicePlugin(f.Namespace.Name))
@ -111,7 +113,9 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
f.PodClient().Delete(devicePluginPod.Name, &metav1.DeleteOptions{})
By("Waiting for GPUs to become unavailable on the local node")
Eventually(func() bool {
return framework.NumberOfNVIDIAGPUs(getLocalNode(f)) <= 0
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
return framework.NumberOfNVIDIAGPUs(node) <= 0
}, 10*time.Minute, framework.Poll).Should(BeTrue())
By("Checking that scheduled pods can continue to run even after we delete device plugin.")
count1, devIdRestart1 = getDeviceId(f, p1.Name, p1.Name, count1+1)