mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-11 21:12:07 +00:00
e2e_node: install gpu pod with PodClient
Prior to this change, the pod was not getting scheduled on the node as we don't have a running scheduler in e2e_node. PodClient solves this problem by manually assigning the pod to the node.
This commit is contained in:
parent
0cc8af82a1
commit
7d7884c0e6
@ -54,8 +54,7 @@ func NVIDIADevicePlugin() *v1.Pod {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
p := &v1.Pod{
|
p := &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
|
Name: "device-plugin-nvidia-gpu-" + string(uuid.NewUUID()),
|
||||||
Namespace: metav1.NamespaceSystem,
|
|
||||||
},
|
},
|
||||||
Spec: ds.Spec.Template.Spec,
|
Spec: ds.Spec.Template.Spec,
|
||||||
}
|
}
|
||||||
@ -70,7 +69,6 @@ var _ = SIGDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin][NodeFeat
|
|||||||
|
|
||||||
ginkgo.Context("DevicePlugin", func() {
|
ginkgo.Context("DevicePlugin", func() {
|
||||||
var devicePluginPod *v1.Pod
|
var devicePluginPod *v1.Pod
|
||||||
var err error
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
ginkgo.By("Ensuring that Nvidia GPUs exists on the node")
|
ginkgo.By("Ensuring that Nvidia GPUs exists on the node")
|
||||||
if !checkIfNvidiaGPUsExistOnNode() {
|
if !checkIfNvidiaGPUsExistOnNode() {
|
||||||
@ -81,14 +79,13 @@ var _ = SIGDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin][NodeFeat
|
|||||||
ginkgo.Skip("Test works only with in-tree dockershim. Skipping test.")
|
ginkgo.Skip("Test works only with in-tree dockershim. Skipping test.")
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Creating the Google Device Plugin pod for NVIDIA GPU in GKE")
|
ginkgo.By("Creating the Google Device Plugin pod for NVIDIA GPU")
|
||||||
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(context.TODO(), NVIDIADevicePlugin(), metav1.CreateOptions{})
|
devicePluginPod = f.PodClient().Create(NVIDIADevicePlugin())
|
||||||
framework.ExpectNoError(err)
|
|
||||||
|
|
||||||
ginkgo.By("Waiting for GPUs to become available on the local node")
|
ginkgo.By("Waiting for GPUs to become available on the local node")
|
||||||
gomega.Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
return numberOfNVIDIAGPUs(getLocalNode(f)) > 0
|
return numberOfNVIDIAGPUs(getLocalNode(f)) > 0
|
||||||
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue(), "GPUs never became available on the local node")
|
||||||
|
|
||||||
if numberOfNVIDIAGPUs(getLocalNode(f)) < 2 {
|
if numberOfNVIDIAGPUs(getLocalNode(f)) < 2 {
|
||||||
ginkgo.Skip("Not enough GPUs to execute this test (at least two needed)")
|
ginkgo.Skip("Not enough GPUs to execute this test (at least two needed)")
|
||||||
|
Loading…
Reference in New Issue
Block a user