mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #109820 from fromanirh/e2e-node-enable-device-plugin-test
e2e: node: re-enable the device plugin tests
This commit is contained in:
commit
19ca12cb3e
@ -19,18 +19,20 @@ package e2enode
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/onsi/ginkgo"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
kubeletdevicepluginv1beta1 "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
|
||||||
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
|
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||||
admissionapi "k8s.io/pod-security-admission/api"
|
admissionapi "k8s.io/pod-security-admission/api"
|
||||||
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
@ -39,9 +41,6 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
|
||||||
"github.com/onsi/gomega"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -64,7 +63,7 @@ var (
|
|||||||
var _ = SIGDescribe("Device Plugin [Feature:DevicePluginProbe][NodeFeature:DevicePluginProbe][Serial]", func() {
|
var _ = SIGDescribe("Device Plugin [Feature:DevicePluginProbe][NodeFeature:DevicePluginProbe][Serial]", func() {
|
||||||
f := framework.NewDefaultFramework("device-plugin-errors")
|
f := framework.NewDefaultFramework("device-plugin-errors")
|
||||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||||
testDevicePlugin(f, "/var/lib/kubelet/plugins_registry")
|
testDevicePlugin(f, kubeletdevicepluginv1beta1.DevicePluginPath)
|
||||||
})
|
})
|
||||||
|
|
||||||
// numberOfSampleResources returns the number of resources advertised by a node.
|
// numberOfSampleResources returns the number of resources advertised by a node.
|
||||||
@ -97,8 +96,6 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
|||||||
var devicePluginPod, dptemplate *v1.Pod
|
var devicePluginPod, dptemplate *v1.Pod
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
e2eskipper.Skipf("Device Plugin tests are currently broken and being investigated")
|
|
||||||
|
|
||||||
ginkgo.By("Wait for node to be ready")
|
ginkgo.By("Wait for node to be ready")
|
||||||
gomega.Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
nodes, err := e2enode.TotalReady(f.ClientSet)
|
nodes, err := e2enode.TotalReady(f.ClientSet)
|
||||||
@ -125,19 +122,21 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
|||||||
dp.Spec.Containers[0].Env[i].Value = pluginSockDir
|
dp.Spec.Containers[0].Env[i].Value = pluginSockDir
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dptemplate = dp
|
dptemplate = dp.DeepCopy()
|
||||||
devicePluginPod = f.PodClient().CreateSync(dp)
|
devicePluginPod = f.PodClient().CreateSync(dp)
|
||||||
|
|
||||||
ginkgo.By("Waiting for devices to become available on the local node")
|
ginkgo.By("Waiting for devices to become available on the local node")
|
||||||
gomega.Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
return numberOfSampleResources(getLocalNode(f)) > 0
|
node, ready := getLocalTestNode(f)
|
||||||
|
return ready && numberOfSampleResources(node) > 0
|
||||||
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||||
framework.Logf("Successfully created device plugin pod")
|
framework.Logf("Successfully created device plugin pod")
|
||||||
|
|
||||||
ginkgo.By("Waiting for the resource exported by the sample device plugin to become available on the local node")
|
ginkgo.By("Waiting for the resource exported by the sample device plugin to become available on the local node")
|
||||||
gomega.Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
node := getLocalNode(f)
|
node, ready := getLocalTestNode(f)
|
||||||
return numberOfDevicesCapacity(node, resourceName) == devsLen &&
|
return ready &&
|
||||||
|
numberOfDevicesCapacity(node, resourceName) == devsLen &&
|
||||||
numberOfDevicesAllocatable(node, resourceName) == devsLen
|
numberOfDevicesAllocatable(node, resourceName) == devsLen
|
||||||
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
|
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
|
||||||
})
|
})
|
||||||
@ -162,8 +161,11 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
|||||||
|
|
||||||
ginkgo.By("Waiting for devices to become unavailable on the local node")
|
ginkgo.By("Waiting for devices to become unavailable on the local node")
|
||||||
gomega.Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
return numberOfSampleResources(getLocalNode(f)) <= 0
|
node, ready := getLocalTestNode(f)
|
||||||
|
return ready && numberOfSampleResources(node) <= 0
|
||||||
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||||
|
|
||||||
|
ginkgo.By("devices now unavailable on the local node")
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("Can schedule a pod that requires a device", func() {
|
ginkgo.It("Can schedule a pod that requires a device", func() {
|
||||||
@ -284,8 +286,9 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
|||||||
|
|
||||||
ginkgo.By("Waiting for resource to become available on the local node after re-registration")
|
ginkgo.By("Waiting for resource to become available on the local node after re-registration")
|
||||||
gomega.Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
node := getLocalNode(f)
|
node, ready := getLocalTestNode(f)
|
||||||
return numberOfDevicesCapacity(node, resourceName) == devsLen &&
|
return ready &&
|
||||||
|
numberOfDevicesCapacity(node, resourceName) == devsLen &&
|
||||||
numberOfDevicesAllocatable(node, resourceName) == devsLen
|
numberOfDevicesAllocatable(node, resourceName) == devsLen
|
||||||
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
|
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
|
||||||
|
|
||||||
|
@ -254,6 +254,19 @@ func getLocalNode(f *framework.Framework) *v1.Node {
|
|||||||
return &nodeList.Items[0]
|
return &nodeList.Items[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getLocalTestNode fetches the node object describing the local worker node set up by the e2e_node infra, alongside with its ready state.
|
||||||
|
// getLocalTestNode is a variant of `getLocalNode` which reports but does not set any requirement about the node readiness state, letting
|
||||||
|
// the caller decide. The check is intentionally done like `getLocalNode` does.
|
||||||
|
// Note `getLocalNode` aborts (as in ginkgo.Expect) the test implicitly if the worker node is not ready.
|
||||||
|
func getLocalTestNode(f *framework.Framework) (*v1.Node, bool) {
|
||||||
|
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
ready := e2enode.IsNodeReady(node)
|
||||||
|
schedulable := e2enode.IsNodeSchedulable(node)
|
||||||
|
framework.Logf("node %q ready=%v schedulable=%v", node.Name, ready, schedulable)
|
||||||
|
return node, ready && schedulable
|
||||||
|
}
|
||||||
|
|
||||||
// logKubeletLatencyMetrics logs KubeletLatencyMetrics computed from the Prometheus
|
// logKubeletLatencyMetrics logs KubeletLatencyMetrics computed from the Prometheus
|
||||||
// metrics exposed on the current node and identified by the metricNames.
|
// metrics exposed on the current node and identified by the metricNames.
|
||||||
// The Kubelet subsystem prefix is automatically prepended to these metric names.
|
// The Kubelet subsystem prefix is automatically prepended to these metric names.
|
||||||
|
Loading…
Reference in New Issue
Block a user