From f609aa8310faad2c0eb203dd1fbc8def02f2842d Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Tue, 30 Apr 2024 13:58:07 +0300 Subject: [PATCH 1/8] e2e: test-driver: add new matchers --- test/e2e/dra/test-driver/app/gomega.go | 36 ++++++++++++++++++++------ test/e2e_node/dra_test.go | 2 +- 2 files changed, 29 insertions(+), 9 deletions(-) diff --git a/test/e2e/dra/test-driver/app/gomega.go b/test/e2e/dra/test-driver/app/gomega.go index 45f4ceff5b5..70bc1bde9ea 100644 --- a/test/e2e/dra/test-driver/app/gomega.go +++ b/test/e2e/dra/test-driver/app/gomega.go @@ -33,22 +33,42 @@ var BeRegistered = gcustom.MakeMatcher(func(actualCalls []GRPCCall) (bool, error return false, nil }).WithMessage("contain successful NotifyRegistrationStatus call") -// NodePrepareResouceCalled checks that NodePrepareResource API has been called -var NodePrepareResourceCalled = gcustom.MakeMatcher(func(actualCalls []GRPCCall) (bool, error) { +// NodePrepareResoucesSucceeded checks that NodePrepareResources API has been called and succeeded +var NodePrepareResourcesSucceeded = gcustom.MakeMatcher(func(actualCalls []GRPCCall) (bool, error) { for _, call := range actualCalls { - if strings.HasSuffix(call.FullMethod, "/NodePrepareResource") && call.Err == nil { + if strings.HasSuffix(call.FullMethod, "/NodePrepareResources") && call.Response != nil && call.Err == nil { return true, nil } } return false, nil -}).WithMessage("contain NodePrepareResource call") +}).WithMessage("contain successful NodePrepareResources call") -// NodePrepareResoucesCalled checks that NodePrepareResources API has been called -var NodePrepareResourcesCalled = gcustom.MakeMatcher(func(actualCalls []GRPCCall) (bool, error) { +// NodePrepareResoucesFailed checks that NodePrepareResources API has been called and returned an error +var NodePrepareResourcesFailed = gcustom.MakeMatcher(func(actualCalls []GRPCCall) (bool, error) { for _, call := range actualCalls { - if strings.HasSuffix(call.FullMethod, "/NodePrepareResources") && call.Err == nil { + if strings.HasSuffix(call.FullMethod, "/NodePrepareResources") && call.Err != nil { return true, nil } } return false, nil -}).WithMessage("contain NodePrepareResources call") +}).WithMessage("contain unsuccessful NodePrepareResources call") + +// NodeUnprepareResoucesSucceeded checks that NodeUnprepareResources API has been called and succeeded +var NodeUnprepareResourcesSucceeded = gcustom.MakeMatcher(func(actualCalls []GRPCCall) (bool, error) { + for _, call := range actualCalls { + if strings.HasSuffix(call.FullMethod, "/NodeUnprepareResources") && call.Response != nil && call.Err == nil { + return true, nil + } + } + return false, nil +}).WithMessage("contain successful NodeUnprepareResources call") + +// NodeUnprepareResoucesFailed checks that NodeUnprepareResources API has been called and returned an error +var NodeUnprepareResourcesFailed = gcustom.MakeMatcher(func(actualCalls []GRPCCall) (bool, error) { + for _, call := range actualCalls { + if strings.HasSuffix(call.FullMethod, "/NodeUnprepareResources") && call.Err != nil { + return true, nil + } + } + return false, nil +}).WithMessage("contain unsuccessful NodeUnprepareResources call") diff --git a/test/e2e_node/dra_test.go b/test/e2e_node/dra_test.go index ad2fab45d6d..4bf4d368c48 100644 --- a/test/e2e_node/dra_test.go +++ b/test/e2e_node/dra_test.go @@ -122,7 +122,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, framework.ExpectNoError(err) ginkgo.By("wait for NodePrepareResources call") - gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(dra.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesCalled) + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(dra.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) // TODO: Check condition or event when implemented // see https://github.com/kubernetes/kubernetes/issues/118468 for details From d6c78f853a77f77cf74fe93a8407b425a27d35f0 Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Sat, 25 May 2024 00:16:27 +0300 Subject: [PATCH 2/8] e2e_node: add deferPodDeletion parameter --- test/e2e_node/dra_test.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/test/e2e_node/dra_test.go b/test/e2e_node/dra_test.go index 4bf4d368c48..a41ae79b4a5 100644 --- a/test/e2e_node/dra_test.go +++ b/test/e2e_node/dra_test.go @@ -97,7 +97,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, ginkgo.It("must process pod created when kubelet is not running", func(ctx context.Context) { // Stop Kubelet startKubelet := stopKubelet() - pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod") + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true) // Pod must be in pending state err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Pending", framework.PodStartShortTimeout, func(pod *v1.Pod) (bool, error) { return pod.Status.Phase == v1.PodPending, nil @@ -113,7 +113,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, ginkgo.It("must keep pod in pending state if NodePrepareResources times out", func(ctx context.Context) { ginkgo.By("set delay for the NodePrepareResources call") kubeletPlugin.Block() - pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod") + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true) ginkgo.By("wait for pod to be in Pending state") err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Pending", framework.PodStartShortTimeout, func(pod *v1.Pod) (bool, error) { @@ -170,7 +170,7 @@ func newKubeletPlugin(ctx context.Context, nodeName string) *testdriver.ExampleP // NOTE: as scheduler and controller manager are not running by the Node e2e, // the objects must contain all required data to be processed correctly by the API server // and placed on the node without involving the scheduler and the DRA controller -func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, nodename, namespace, className, claimName, podName string) *v1.Pod { +func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, nodename, namespace, className, claimName, podName string, deferPodDeletion bool) *v1.Pod { // ResourceClass class := &resourcev1alpha2.ResourceClass{ ObjectMeta: metav1.ObjectMeta{ @@ -231,7 +231,9 @@ func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, node createdPod, err := clientSet.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err) - ginkgo.DeferCleanup(clientSet.CoreV1().Pods(namespace).Delete, podName, metav1.DeleteOptions{}) + if deferPodDeletion { + ginkgo.DeferCleanup(clientSet.CoreV1().Pods(namespace).Delete, podName, metav1.DeleteOptions{}) + } // Update claim status: set ReservedFor and AllocationResult // NOTE: This is usually done by the DRA controller From 2ea2fb3166f325ff3ad532b7ec538d5dd110e2a5 Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Sat, 25 May 2024 00:22:05 +0300 Subject: [PATCH 3/8] e2e: test-driver: implement failure mode --- test/e2e/dra/test-driver/app/kubeletplugin.go | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/test/e2e/dra/test-driver/app/kubeletplugin.go b/test/e2e/dra/test-driver/app/kubeletplugin.go index ac1f2bfa69f..47d9dbfb00e 100644 --- a/test/e2e/dra/test-driver/app/kubeletplugin.go +++ b/test/e2e/dra/test-driver/app/kubeletplugin.go @@ -55,6 +55,12 @@ type ExamplePlugin struct { gRPCCalls []GRPCCall block bool + + prepareResourcesFailure error + failPrepareResourcesMutex sync.Mutex + + unprepareResourcesFailure error + failUnprepareResourcesMutex sync.Mutex } type GRPCCall struct { @@ -168,6 +174,52 @@ func (ex *ExamplePlugin) Block() { ex.block = true } +func (ex *ExamplePlugin) withLock(mutex *sync.Mutex, f func()) { + mutex.Lock() + f() + mutex.Unlock() +} + +// SetNodePrepareResourcesFailureMode sets the failure mode for NodePrepareResources call +// and returns a function to unset the failure mode +func (ex *ExamplePlugin) SetNodePrepareResourcesFailureMode() func() { + ex.failPrepareResourcesMutex.Lock() + ex.prepareResourcesFailure = errors.New("simulated PrepareResources failure") + ex.failPrepareResourcesMutex.Unlock() + + return func() { + ex.failPrepareResourcesMutex.Lock() + ex.prepareResourcesFailure = nil + ex.failPrepareResourcesMutex.Unlock() + } +} + +func (ex *ExamplePlugin) getPrepareResourcesFailure() error { + ex.failPrepareResourcesMutex.Lock() + defer ex.failPrepareResourcesMutex.Unlock() + return ex.prepareResourcesFailure +} + +// SetNodeUnprepareResourcesFailureMode sets the failure mode for NodeUnprepareResources call +// and returns a function to unset the failure mode +func (ex *ExamplePlugin) SetNodeUnprepareResourcesFailureMode() func() { + ex.failUnprepareResourcesMutex.Lock() + ex.unprepareResourcesFailure = errors.New("simulated UnprepareResources failure") + ex.failUnprepareResourcesMutex.Unlock() + + return func() { + ex.failUnprepareResourcesMutex.Lock() + ex.unprepareResourcesFailure = nil + ex.failUnprepareResourcesMutex.Unlock() + } +} + +func (ex *ExamplePlugin) getUnprepareResourcesFailure() error { + ex.failUnprepareResourcesMutex.Lock() + defer ex.failUnprepareResourcesMutex.Unlock() + return ex.unprepareResourcesFailure +} + // NodePrepareResource ensures that the CDI file for the claim exists. It uses // a deterministic name to simplify NodeUnprepareResource (no need to remember // or discover the name) and idempotency (when called again, the file simply @@ -309,6 +361,11 @@ func (ex *ExamplePlugin) NodePrepareResources(ctx context.Context, req *drapbv1a resp := &drapbv1alpha3.NodePrepareResourcesResponse{ Claims: make(map[string]*drapbv1alpha3.NodePrepareResourceResponse), } + + if failure := ex.getPrepareResourcesFailure(); failure != nil { + return resp, failure + } + for _, claimReq := range req.Claims { cdiDevices, err := ex.nodePrepareResource(ctx, claimReq.Name, claimReq.Uid, claimReq.ResourceHandle, claimReq.StructuredResourceHandle) if err != nil { @@ -381,6 +438,11 @@ func (ex *ExamplePlugin) NodeUnprepareResources(ctx context.Context, req *drapbv resp := &drapbv1alpha3.NodeUnprepareResourcesResponse{ Claims: make(map[string]*drapbv1alpha3.NodeUnprepareResourceResponse), } + + if failure := ex.getUnprepareResourcesFailure(); failure != nil { + return resp, failure + } + for _, claimReq := range req.Claims { err := ex.nodeUnprepareResource(ctx, claimReq.Name, claimReq.Uid, claimReq.ResourceHandle, claimReq.StructuredResourceHandle) if err != nil { From ffc407b4dd960d99e6bd2aaca632aac02293c7ff Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Sat, 25 May 2024 00:32:45 +0300 Subject: [PATCH 4/8] e2e_node: DRA: reimplement call blocking --- test/e2e/dra/test-driver/app/kubeletplugin.go | 42 +++++++++---------- test/e2e_node/dra_test.go | 10 ++--- 2 files changed, 22 insertions(+), 30 deletions(-) diff --git a/test/e2e/dra/test-driver/app/kubeletplugin.go b/test/e2e/dra/test-driver/app/kubeletplugin.go index 47d9dbfb00e..66df9885fd4 100644 --- a/test/e2e/dra/test-driver/app/kubeletplugin.go +++ b/test/e2e/dra/test-driver/app/kubeletplugin.go @@ -54,7 +54,8 @@ type ExamplePlugin struct { prepared map[ClaimID]any gRPCCalls []GRPCCall - block bool + blockPrepareResourcesMutex sync.Mutex + blockUnprepareResourcesMutex sync.Mutex prepareResourcesFailure error failPrepareResourcesMutex sync.Mutex @@ -168,16 +169,20 @@ func (ex *ExamplePlugin) IsRegistered() bool { return status.PluginRegistered } -// Block sets a flag to block Node[Un]PrepareResources -// to emulate time consuming or stuck calls -func (ex *ExamplePlugin) Block() { - ex.block = true +// BlockNodePrepareResources locks blockPrepareResourcesMutex and returns unlocking function for it +func (ex *ExamplePlugin) BlockNodePrepareResources() func() { + ex.blockPrepareResourcesMutex.Lock() + return func() { + ex.blockPrepareResourcesMutex.Unlock() + } } -func (ex *ExamplePlugin) withLock(mutex *sync.Mutex, f func()) { - mutex.Lock() - f() - mutex.Unlock() +// BlockNodeUnprepareResources locks blockUnprepareResourcesMutex and returns unlocking function for it +func (ex *ExamplePlugin) BlockNodeUnprepareResources() func() { + ex.blockUnprepareResourcesMutex.Lock() + return func() { + ex.blockUnprepareResourcesMutex.Unlock() + } } // SetNodePrepareResourcesFailureMode sets the failure mode for NodePrepareResources call @@ -227,15 +232,10 @@ func (ex *ExamplePlugin) getUnprepareResourcesFailure() error { func (ex *ExamplePlugin) nodePrepareResource(ctx context.Context, claimName string, claimUID string, resourceHandle string, structuredResourceHandle []*resourceapi.StructuredResourceHandle) ([]string, error) { logger := klog.FromContext(ctx) - // Block to emulate plugin stuckness or slowness. - // By default the call will not be blocked as ex.block = false. - if ex.block { - <-ctx.Done() - return nil, ctx.Err() - } - ex.mutex.Lock() defer ex.mutex.Unlock() + ex.blockPrepareResourcesMutex.Lock() + defer ex.blockPrepareResourcesMutex.Unlock() deviceName := "claim-" + claimUID vendor := ex.driverName @@ -385,14 +385,10 @@ func (ex *ExamplePlugin) NodePrepareResources(ctx context.Context, req *drapbv1a // NodePrepareResource. It's idempotent, therefore it is not an error when that // file is already gone. func (ex *ExamplePlugin) nodeUnprepareResource(ctx context.Context, claimName string, claimUID string, resourceHandle string, structuredResourceHandle []*resourceapi.StructuredResourceHandle) error { - logger := klog.FromContext(ctx) + ex.blockUnprepareResourcesMutex.Lock() + defer ex.blockUnprepareResourcesMutex.Unlock() - // Block to emulate plugin stuckness or slowness. - // By default the call will not be blocked as ex.block = false. - if ex.block { - <-ctx.Done() - return ctx.Err() - } + logger := klog.FromContext(ctx) filePath := ex.getJSONFilePath(claimUID) if err := ex.fileOps.Remove(filePath); err != nil { diff --git a/test/e2e_node/dra_test.go b/test/e2e_node/dra_test.go index a41ae79b4a5..399fc4a1ec5 100644 --- a/test/e2e_node/dra_test.go +++ b/test/e2e_node/dra_test.go @@ -39,7 +39,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" - dra "k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin" admissionapi "k8s.io/pod-security-admission/api" "k8s.io/kubernetes/test/e2e/feature" @@ -111,9 +110,9 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, }) ginkgo.It("must keep pod in pending state if NodePrepareResources times out", func(ctx context.Context) { - ginkgo.By("set delay for the NodePrepareResources call") - kubeletPlugin.Block() - pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true) + unblock := kubeletPlugin.BlockNodePrepareResources() + defer unblock() + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true, []string{driverName}) ginkgo.By("wait for pod to be in Pending state") err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Pending", framework.PodStartShortTimeout, func(pod *v1.Pod) (bool, error) { @@ -121,9 +120,6 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, }) framework.ExpectNoError(err) - ginkgo.By("wait for NodePrepareResources call") - gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(dra.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) - // TODO: Check condition or event when implemented // see https://github.com/kubernetes/kubernetes/issues/118468 for details ginkgo.By("check that pod is consistently in Pending state") From c8c7ae85e5310dec6a4bb2d93f4792349726c631 Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Fri, 3 May 2024 23:27:48 +0300 Subject: [PATCH 5/8] e2e_node: DRA: add CountCalls API --- test/e2e/dra/test-driver/app/kubeletplugin.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/test/e2e/dra/test-driver/app/kubeletplugin.go b/test/e2e/dra/test-driver/app/kubeletplugin.go index 66df9885fd4..2656a45e10d 100644 --- a/test/e2e/dra/test-driver/app/kubeletplugin.go +++ b/test/e2e/dra/test-driver/app/kubeletplugin.go @@ -23,6 +23,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "sync" "github.com/google/go-cmp/cmp" @@ -545,3 +546,14 @@ func (ex *ExamplePlugin) GetGRPCCalls() []GRPCCall { calls = append(calls, ex.gRPCCalls...) return calls } + +// CountCalls counts GRPC calls with the given method suffix. +func (ex *ExamplePlugin) CountCalls(methodSuffix string) int { + count := 0 + for _, call := range ex.GetGRPCCalls() { + if strings.HasSuffix(call.FullMethod, methodSuffix) { + count += 1 + } + } + return count +} From 118158d8dfabf646ceb8cceee603b9c32c180ffc Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Sat, 25 May 2024 01:18:19 +0300 Subject: [PATCH 6/8] e2e_node: DRA: test plugin failures --- test/e2e_node/dra_test.go | 175 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 175 insertions(+) diff --git a/test/e2e_node/dra_test.go b/test/e2e_node/dra_test.go index 399fc4a1ec5..3f2fda44329 100644 --- a/test/e2e_node/dra_test.go +++ b/test/e2e_node/dra_test.go @@ -39,6 +39,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" + draplugin "k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin" admissionapi "k8s.io/pod-security-admission/api" "k8s.io/kubernetes/test/e2e/feature" @@ -126,6 +127,180 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, gomega.Consistently(ctx, e2epod.Get(f.ClientSet, pod)).WithTimeout(podInPendingStateTimeout).Should(e2epod.BeInPhase(v1.PodPending), "Pod should be in Pending state as resource preparation time outed") }) + + ginkgo.It("must run pod if NodePrepareResources fails and then succeeds", func(ctx context.Context) { + unset := kubeletPlugin.SetNodePrepareResourcesFailureMode() + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true, []string{driverName}) + + ginkgo.By("wait for pod to be in Pending state") + err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Pending", framework.PodStartShortTimeout, func(pod *v1.Pod) (bool, error) { + return pod.Status.Phase == v1.PodPending, nil + }) + framework.ExpectNoError(err) + + ginkgo.By("wait for NodePrepareResources call to fail") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesFailed) + + unset() + + ginkgo.By("wait for NodePrepareResources call to succeed") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) + + ginkgo.By("wait for pod to succeed") + err = e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) + framework.ExpectNoError(err) + }) + + ginkgo.It("must run pod if NodeUnprepareResources fails and then succeeds", func(ctx context.Context) { + unset := kubeletPlugin.SetNodeUnprepareResourcesFailureMode() + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true, []string{driverName}) + + ginkgo.By("wait for NodePrepareResources call to succeed") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) + + ginkgo.By("wait for NodeUnprepareResources call to fail") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesFailed) + + unset() + + ginkgo.By("wait for NodeUnprepareResources call to succeed") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesSucceeded) + + ginkgo.By("wait for pod to succeed") + err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) + framework.ExpectNoError(err) + }) + + ginkgo.It("must retry NodePrepareResources after Kubelet restart", func(ctx context.Context) { + unset := kubeletPlugin.SetNodePrepareResourcesFailureMode() + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true, []string{driverName}) + + ginkgo.By("wait for pod to be in Pending state") + err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Pending", framework.PodStartShortTimeout, func(pod *v1.Pod) (bool, error) { + return pod.Status.Phase == v1.PodPending, nil + }) + framework.ExpectNoError(err) + + ginkgo.By("wait for NodePrepareResources call to fail") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesFailed) + + ginkgo.By("stop Kubelet") + startKubelet := stopKubelet() + + unset() + + ginkgo.By("start Kubelet") + startKubelet() + + ginkgo.By("wait for NodePrepareResources call to succeed") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) + + ginkgo.By("wait for pod to succeed") + err = e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) + framework.ExpectNoError(err) + }) + + ginkgo.It("must retry NodeUnprepareResources after Kubelet restart", func(ctx context.Context) { + unset := kubeletPlugin.SetNodeUnprepareResourcesFailureMode() + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true, []string{driverName}) + ginkgo.By("wait for NodePrepareResources call to succeed") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) + + ginkgo.By("wait for NodeUnprepareResources call to fail") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesFailed) + + ginkgo.By("stop Kubelet") + startKubelet := stopKubelet() + + unset() + + ginkgo.By("start Kubelet") + startKubelet() + + ginkgo.By("wait for NodeUnprepareResources call to succeed") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesSucceeded) + + ginkgo.By("wait for pod to succeed") + err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) + framework.ExpectNoError(err) + }) + + ginkgo.It("must call NodeUnprepareResources for deleted pod", func(ctx context.Context) { + unset := kubeletPlugin.SetNodeUnprepareResourcesFailureMode() + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", false, []string{driverName}) + + ginkgo.By("wait for NodePrepareResources call to succeed") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) + + ginkgo.By("wait for NodeUnprepareResources call to fail") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesFailed) + + ginkgo.By("delete pod") + e2epod.DeletePodOrFail(ctx, f.ClientSet, f.Namespace.Name, pod.Name) + + ginkgo.By("wait for NodeUnprepareResources call to fail") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesFailed) + + unset() + + ginkgo.By("wait for NodeUnprepareResources call to succeed") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesSucceeded) + }) + + ginkgo.It("must call NodeUnprepareResources for deleted pod after Kubelet restart", func(ctx context.Context) { + unset := kubeletPlugin.SetNodeUnprepareResourcesFailureMode() + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", false, []string{driverName}) + + ginkgo.By("wait for NodePrepareResources call to succeed") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) + + ginkgo.By("wait for NodeUnprepareResources call to fail") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesFailed) + + ginkgo.By("delete pod") + err := e2epod.DeletePodWithGracePeriod(ctx, f.ClientSet, pod, 0) + framework.ExpectNoError(err) + + ginkgo.By("wait for NodeUnprepareResources call to fail") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesFailed) + + ginkgo.By("restart Kubelet") + stopKubelet()() + + ginkgo.By("wait for NodeUnprepareResources call to fail") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesFailed) + + unset() + + ginkgo.By("wait for NodeUnprepareResources call to succeed") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesSucceeded) + }) + + ginkgo.It("must not call NodePrepareResources for deleted pod after Kubelet restart", func(ctx context.Context) { + unblock := kubeletPlugin.BlockNodePrepareResources() + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", false, []string{driverName}) + + ginkgo.By("wait for pod to be in Pending state") + err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Pending", framework.PodStartShortTimeout, func(pod *v1.Pod) (bool, error) { + return pod.Status.Phase == v1.PodPending, nil + }) + framework.ExpectNoError(err) + + ginkgo.By("stop Kubelet") + startKubelet := stopKubelet() + + ginkgo.By("delete pod") + e2epod.DeletePodOrFail(ctx, f.ClientSet, f.Namespace.Name, pod.Name) + + unblock() + + ginkgo.By("start Kubelet") + startKubelet() + + calls := kubeletPlugin.CountCalls("/NodePrepareResources") + ginkgo.By("make sure NodePrepareResources is not called again") + gomega.Consistently(kubeletPlugin.CountCalls("/NodePrepareResources")).WithTimeout(draplugin.PluginClientTimeout).Should(gomega.Equal(calls)) + }) }) }) From ce6faef8d8aa3316132a42ce6d7aa697d6cdc32a Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Thu, 2 May 2024 15:32:35 +0300 Subject: [PATCH 7/8] e2e_node: change DRA test APIs to work with multiple plugins --- test/e2e_node/dra_test.go | 41 +++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 19 deletions(-) diff --git a/test/e2e_node/dra_test.go b/test/e2e_node/dra_test.go index 3f2fda44329..d96e5e176d3 100644 --- a/test/e2e_node/dra_test.go +++ b/test/e2e_node/dra_test.go @@ -26,6 +26,7 @@ package e2enode import ( "context" + "fmt" "os" "path" "path/filepath" @@ -53,9 +54,8 @@ import ( const ( driverName = "test-driver.cdi.k8s.io" cdiDir = "/var/run/cdi" - endpoint = "/var/lib/kubelet/plugins/test-driver/dra.sock" + endpointTemplate = "/var/lib/kubelet/plugins/%s/dra.sock" pluginRegistrationPath = "/var/lib/kubelet/plugins_registry" - draAddress = "/var/lib/kubelet/plugins/test-driver/dra.sock" pluginRegistrationTimeout = time.Second * 60 // how long to wait for a node plugin to be registered podInPendingStateTimeout = time.Second * 60 // how long to wait for a pod to stay in pending state ) @@ -68,7 +68,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, f.Context("Resource Kubelet Plugin", f.WithSerial(), func() { ginkgo.BeforeEach(func(ctx context.Context) { - kubeletPlugin = newKubeletPlugin(ctx, getNodeName(ctx, f)) + kubeletPlugin = newKubeletPlugin(ctx, getNodeName(ctx, f), driverName) }) ginkgo.It("must register after Kubelet restart", func(ctx context.Context) { @@ -88,7 +88,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, ginkgo.It("must register after plugin restart", func(ctx context.Context) { ginkgo.By("restart Kubelet Plugin") kubeletPlugin.Stop() - kubeletPlugin = newKubeletPlugin(ctx, getNodeName(ctx, f)) + kubeletPlugin = newKubeletPlugin(ctx, getNodeName(ctx, f), driverName) ginkgo.By("wait for Kubelet plugin re-registration") gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(pluginRegistrationTimeout).Should(testdriver.BeRegistered) @@ -97,7 +97,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, ginkgo.It("must process pod created when kubelet is not running", func(ctx context.Context) { // Stop Kubelet startKubelet := stopKubelet() - pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true) + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true, []string{driverName}) // Pod must be in pending state err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Pending", framework.PodStartShortTimeout, func(pod *v1.Pod) (bool, error) { return pod.Status.Phase == v1.PodPending, nil @@ -305,9 +305,9 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, }) // Run Kubelet plugin and wait until it's registered -func newKubeletPlugin(ctx context.Context, nodeName string) *testdriver.ExamplePlugin { +func newKubeletPlugin(ctx context.Context, nodeName, pluginName string) *testdriver.ExamplePlugin { ginkgo.By("start Kubelet plugin") - logger := klog.LoggerWithValues(klog.LoggerWithName(klog.Background(), "kubelet plugin"), "node", nodeName) + logger := klog.LoggerWithValues(klog.LoggerWithName(klog.Background(), "kubelet plugin "+pluginName), "node", nodeName) ctx = klog.NewContext(ctx, logger) // Ensure that directories exist, creating them if necessary. We want @@ -315,18 +315,19 @@ func newKubeletPlugin(ctx context.Context, nodeName string) *testdriver.ExampleP // creating those directories. err := os.MkdirAll(cdiDir, os.FileMode(0750)) framework.ExpectNoError(err, "create CDI directory") + endpoint := fmt.Sprintf(endpointTemplate, pluginName) err = os.MkdirAll(filepath.Dir(endpoint), 0750) framework.ExpectNoError(err, "create socket directory") plugin, err := testdriver.StartPlugin( ctx, cdiDir, - driverName, + pluginName, "", testdriver.FileOperations{}, kubeletplugin.PluginSocketPath(endpoint), - kubeletplugin.RegistrarSocketPath(path.Join(pluginRegistrationPath, driverName+"-reg.sock")), - kubeletplugin.KubeletPluginSocketPath(draAddress), + kubeletplugin.RegistrarSocketPath(path.Join(pluginRegistrationPath, pluginName+"-reg.sock")), + kubeletplugin.KubeletPluginSocketPath(endpoint), ) framework.ExpectNoError(err) @@ -341,13 +342,13 @@ func newKubeletPlugin(ctx context.Context, nodeName string) *testdriver.ExampleP // NOTE: as scheduler and controller manager are not running by the Node e2e, // the objects must contain all required data to be processed correctly by the API server // and placed on the node without involving the scheduler and the DRA controller -func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, nodename, namespace, className, claimName, podName string, deferPodDeletion bool) *v1.Pod { +func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, nodename, namespace, className, claimName, podName string, deferPodDeletion bool, pluginNames []string) *v1.Pod { // ResourceClass class := &resourcev1alpha2.ResourceClass{ ObjectMeta: metav1.ObjectMeta{ Name: className, }, - DriverName: driverName, + DriverName: "controller", } _, err := clientSet.ResourceV1alpha2().ResourceClasses().Create(ctx, class, metav1.CreateOptions{}) framework.ExpectNoError(err) @@ -408,18 +409,20 @@ func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, node // Update claim status: set ReservedFor and AllocationResult // NOTE: This is usually done by the DRA controller + resourceHandlers := make([]resourcev1alpha2.ResourceHandle, len(pluginNames)) + for i, pluginName := range pluginNames { + resourceHandlers[i] = resourcev1alpha2.ResourceHandle{ + DriverName: pluginName, + Data: "{\"EnvVars\":{\"DRA_PARAM1\":\"PARAM1_VALUE\"},\"NodeName\":\"\"}", + } + } createdClaim.Status = resourcev1alpha2.ResourceClaimStatus{ - DriverName: driverName, + DriverName: "controller", ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{ {Resource: "pods", Name: podName, UID: createdPod.UID}, }, Allocation: &resourcev1alpha2.AllocationResult{ - ResourceHandles: []resourcev1alpha2.ResourceHandle{ - { - DriverName: driverName, - Data: "{\"EnvVars\":{\"DRA_PARAM1\":\"PARAM1_VALUE\"},\"NodeName\":\"\"}", - }, - }, + ResourceHandles: resourceHandlers, }, } _, err = clientSet.ResourceV1alpha2().ResourceClaims(namespace).UpdateStatus(ctx, createdClaim, metav1.UpdateOptions{}) From ee0340a8281636a67f09a30cd1a77da23e835a56 Mon Sep 17 00:00:00 2001 From: Ed Bartosh Date: Sat, 25 May 2024 01:45:40 +0300 Subject: [PATCH 8/8] e2e_node: add tests for 2 Kubelet plugins --- test/e2e_node/dra_test.go | 123 +++++++++++++++++++++++++++++++++++++- 1 file changed, 122 insertions(+), 1 deletion(-) diff --git a/test/e2e_node/dra_test.go b/test/e2e_node/dra_test.go index d96e5e176d3..ec53ac6fb07 100644 --- a/test/e2e_node/dra_test.go +++ b/test/e2e_node/dra_test.go @@ -53,6 +53,8 @@ import ( const ( driverName = "test-driver.cdi.k8s.io" + kubeletPlugin1Name = "test-driver1.cdi.k8s.io" + kubeletPlugin2Name = "test-driver2.cdi.k8s.io" cdiDir = "/var/run/cdi" endpointTemplate = "/var/lib/kubelet/plugins/%s/dra.sock" pluginRegistrationPath = "/var/lib/kubelet/plugins_registry" @@ -64,7 +66,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, f := framework.NewDefaultFramework("dra-node") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline - var kubeletPlugin *testdriver.ExamplePlugin + var kubeletPlugin, kubeletPlugin1, kubeletPlugin2 *testdriver.ExamplePlugin f.Context("Resource Kubelet Plugin", f.WithSerial(), func() { ginkgo.BeforeEach(func(ctx context.Context) { @@ -302,6 +304,125 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, gomega.Consistently(kubeletPlugin.CountCalls("/NodePrepareResources")).WithTimeout(draplugin.PluginClientTimeout).Should(gomega.Equal(calls)) }) }) + + f.Context("Two resource Kubelet Plugins", f.WithSerial(), func() { + ginkgo.BeforeEach(func(ctx context.Context) { + kubeletPlugin1 = newKubeletPlugin(ctx, getNodeName(ctx, f), kubeletPlugin1Name) + kubeletPlugin2 = newKubeletPlugin(ctx, getNodeName(ctx, f), kubeletPlugin2Name) + + ginkgo.By("wait for Kubelet plugin registration") + gomega.Eventually(kubeletPlugin1.GetGRPCCalls()).WithTimeout(pluginRegistrationTimeout).Should(testdriver.BeRegistered) + gomega.Eventually(kubeletPlugin2.GetGRPCCalls()).WithTimeout(pluginRegistrationTimeout).Should(testdriver.BeRegistered) + }) + + ginkgo.It("must prepare and unprepare resources", func(ctx context.Context) { + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true, []string{kubeletPlugin1Name, kubeletPlugin2Name}) + + ginkgo.By("wait for pod to succeed") + err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) + framework.ExpectNoError(err) + + ginkgo.By("wait for NodePrepareResources calls to succeed") + gomega.Eventually(kubeletPlugin1.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) + gomega.Eventually(kubeletPlugin2.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) + + ginkgo.By("wait for NodeUnprepareResources calls to succeed") + gomega.Eventually(kubeletPlugin1.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesSucceeded) + gomega.Eventually(kubeletPlugin2.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesSucceeded) + }) + + ginkgo.It("must run pod if NodePrepareResources fails for one plugin and then succeeds", func(ctx context.Context) { + unset := kubeletPlugin2.SetNodePrepareResourcesFailureMode() + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true, []string{kubeletPlugin1Name, kubeletPlugin2Name}) + + ginkgo.By("wait for pod to be in Pending state") + err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Pending", framework.PodStartShortTimeout, func(pod *v1.Pod) (bool, error) { + return pod.Status.Phase == v1.PodPending, nil + }) + framework.ExpectNoError(err) + + ginkgo.By("wait for plugin2 NodePrepareResources call to fail") + gomega.Eventually(kubeletPlugin2.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesFailed) + + unset() + + ginkgo.By("wait for plugin2 NodePrepareResources call to succeed") + gomega.Eventually(kubeletPlugin2.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) + + ginkgo.By("wait for pod to succeed") + err = e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) + framework.ExpectNoError(err) + }) + + ginkgo.It("must run pod if NodeUnprepareResources fails for one plugin and then succeeds", func(ctx context.Context) { + unset := kubeletPlugin2.SetNodeUnprepareResourcesFailureMode() + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true, []string{kubeletPlugin1Name, kubeletPlugin2Name}) + + ginkgo.By("wait for plugin1 NodePrepareResources call to succeed") + gomega.Eventually(kubeletPlugin1.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) + + ginkgo.By("wait for plugin2 NodePrepareResources call to succeed") + gomega.Eventually(kubeletPlugin2.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) + + ginkgo.By("wait for plugin2 NodeUnprepareResources call to fail") + gomega.Eventually(kubeletPlugin2.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesFailed) + + unset() + + ginkgo.By("wait for plugin2 NodeUnprepareResources call to succeed") + gomega.Eventually(kubeletPlugin2.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesSucceeded) + + ginkgo.By("wait for pod to succeed") + err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) + framework.ExpectNoError(err) + }) + + ginkgo.It("must run pod if NodePrepareResources is in progress for one plugin when Kubelet restarts", func(ctx context.Context) { + unblock := kubeletPlugin.BlockNodePrepareResources() + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true, []string{kubeletPlugin1Name, kubeletPlugin2Name}) + + ginkgo.By("wait for pod to be in Pending state") + err := e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, pod.Name, "Pending", framework.PodStartShortTimeout, func(pod *v1.Pod) (bool, error) { + return pod.Status.Phase == v1.PodPending, nil + }) + framework.ExpectNoError(err) + + ginkgo.By("restart Kubelet") + restartKubelet(true) + + unblock() + + ginkgo.By("wait for plugin2 NodePrepareResources call to succeed") + gomega.Eventually(kubeletPlugin2.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) + + ginkgo.By("wait for pod to succeed") + err = e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) + framework.ExpectNoError(err) + }) + + ginkgo.It("must call NodeUnprepareResources again if it's in progress for one plugin when Kubelet restarts", func(ctx context.Context) { + unblock := kubeletPlugin2.BlockNodeUnprepareResources() + pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod", true, []string{kubeletPlugin1Name, kubeletPlugin2Name}) + + ginkgo.By("wait for plugin1 NodePrepareResources call to succeed") + gomega.Eventually(kubeletPlugin1.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) + + ginkgo.By("wait for plugin2 NodePrepareResources call to succeed") + gomega.Eventually(kubeletPlugin2.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesSucceeded) + + ginkgo.By("restart Kubelet") + restartKubelet(true) + + unblock() + + ginkgo.By("wait for plugin2 NodeUnprepareResources call to succeed") + gomega.Eventually(kubeletPlugin2.GetGRPCCalls).WithTimeout(draplugin.PluginClientTimeout * 2).Should(testdriver.NodeUnprepareResourcesSucceeded) + + ginkgo.By("wait for pod to succeed") + err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) + framework.ExpectNoError(err) + }) + }) }) // Run Kubelet plugin and wait until it's registered