diff --git a/test/e2e/dra/dra.go b/test/e2e/dra/dra.go index cb2324e0a5e..7fa3aab928a 100644 --- a/test/e2e/dra/dra.go +++ b/test/e2e/dra/dra.go @@ -403,6 +403,73 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, return b.f.ClientSet.ResourceV1beta1().ResourceClaims(b.f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{}) }).WithTimeout(f.Timeouts.PodDelete).Should(gomega.HaveField("Status.Allocation", (*resourceapi.AllocationResult)(nil))) }) + + f.It("must be possible for the driver to update the ResourceClaim.Status.Devices once allocated", feature.DRAResourceClaimDeviceStatus, func(ctx context.Context) { + pod := b.podExternal() + claim := b.externalClaim() + b.create(ctx, claim, pod) + + // Waits for the ResourceClaim to be allocated and the pod to be scheduled. + var allocatedResourceClaim *resourceapi.ResourceClaim + var scheduledPod *v1.Pod + + gomega.Eventually(ctx, func(ctx context.Context) (*resourceapi.ResourceClaim, error) { + var err error + allocatedResourceClaim, err = b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{}) + return allocatedResourceClaim, err + }).WithTimeout(f.Timeouts.PodDelete).ShouldNot(gomega.HaveField("Status.Allocation", (*resourceapi.AllocationResult)(nil))) + + gomega.Eventually(ctx, func(ctx context.Context) error { + var err error + scheduledPod, err = b.f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) + if err != nil && scheduledPod.Spec.NodeName != "" { + return fmt.Errorf("expected the test pod %s to exist and to be scheduled on a node: %w", pod.Name, err) + } + return nil + }).WithTimeout(f.Timeouts.PodDelete).Should(gomega.BeNil()) + + gomega.Expect(allocatedResourceClaim.Status.Allocation).ToNot(gomega.BeNil()) + gomega.Expect(allocatedResourceClaim.Status.Allocation.Devices.Results).To(gomega.HaveLen(1)) + + ginkgo.By("Setting the device status a first time") + allocatedResourceClaim.Status.Devices = append(allocatedResourceClaim.Status.Devices, + resourceapi.AllocatedDeviceStatus{ + Driver: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Driver, + Pool: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Pool, + Device: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Device, + Conditions: []metav1.Condition{{Type: "a", Status: "b", Message: "c", Reason: "d"}}, + Data: &runtime.RawExtension{Raw: []byte(`{"foo":"bar"}`)}, + NetworkData: &resourceapi.NetworkDeviceData{ + InterfaceName: ptr.To("inf1"), + Addresses: []string{"10.9.8.0/24", "2001:db8::/64"}, + HWAddress: ptr.To("bc:1c:b6:3e:b8:25"), + }, + }) + // Updates the ResourceClaim from the driver on the same node as the pod. + updatedResourceClaim, err := driver.Nodes[scheduledPod.Spec.NodeName].ExamplePlugin.UpdateStatus(ctx, allocatedResourceClaim) + framework.ExpectNoError(err) + gomega.Expect(updatedResourceClaim).ToNot(gomega.BeNil()) + gomega.Expect(updatedResourceClaim.Status.Devices).To(gomega.Equal(allocatedResourceClaim.Status.Devices)) + + ginkgo.By("Updating the device status") + updatedResourceClaim.Status.Devices[0] = resourceapi.AllocatedDeviceStatus{ + Driver: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Driver, + Pool: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Pool, + Device: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Device, + Conditions: []metav1.Condition{{Type: "e", Status: "f", Message: "g", Reason: "h"}}, + Data: &runtime.RawExtension{Raw: []byte(`{"bar":"foo"}`)}, + NetworkData: &resourceapi.NetworkDeviceData{ + InterfaceName: ptr.To("inf2"), + Addresses: []string{"10.9.8.1/24", "2001:db8::1/64"}, + HWAddress: ptr.To("bc:1c:b6:3e:b8:26"), + }, + } + updatedResourceClaim2, err := driver.Nodes[scheduledPod.Spec.NodeName].ExamplePlugin.UpdateStatus(ctx, updatedResourceClaim) + framework.ExpectNoError(err) + gomega.Expect(updatedResourceClaim2).ToNot(gomega.BeNil()) + gomega.Expect(updatedResourceClaim2.Status.Devices).To(gomega.Equal(updatedResourceClaim.Status.Devices)) + + }) } singleNodeTests := func() { diff --git a/test/e2e/dra/test-driver/app/kubeletplugin.go b/test/e2e/dra/test-driver/app/kubeletplugin.go index 242832bc21d..04b6c1cfde5 100644 --- a/test/e2e/dra/test-driver/app/kubeletplugin.go +++ b/test/e2e/dra/test-driver/app/kubeletplugin.go @@ -560,3 +560,7 @@ func (ex *ExamplePlugin) CountCalls(methodSuffix string) int { } return count } + +func (ex *ExamplePlugin) UpdateStatus(ctx context.Context, resourceClaim *resourceapi.ResourceClaim) (*resourceapi.ResourceClaim, error) { + return ex.kubeClient.ResourceV1alpha3().ResourceClaims(resourceClaim.Namespace).UpdateStatus(ctx, resourceClaim, metav1.UpdateOptions{}) +} diff --git a/test/e2e/dra/test-driver/deploy/example/plugin-permissions.yaml b/test/e2e/dra/test-driver/deploy/example/plugin-permissions.yaml index 14c897ac08b..0c35cbacfbb 100644 --- a/test/e2e/dra/test-driver/deploy/example/plugin-permissions.yaml +++ b/test/e2e/dra/test-driver/deploy/example/plugin-permissions.yaml @@ -15,6 +15,9 @@ rules: - apiGroups: ["resource.k8s.io"] resources: ["resourceclaims"] verbs: ["get"] +- apiGroups: ["resource.k8s.io"] + resources: ["resourceclaims/status"] + verbs: ["update"] - apiGroups: [""] resources: ["nodes"] verbs: ["get"] diff --git a/test/e2e/feature/feature.go b/test/e2e/feature/feature.go index 557d0930cee..81d4fa67336 100644 --- a/test/e2e/feature/feature.go +++ b/test/e2e/feature/feature.go @@ -91,6 +91,15 @@ var ( // TODO: document the feature (owning SIG, when to use this feature for a test) Downgrade = framework.WithFeature(framework.ValidFeatures.Add("Downgrade")) + // owning-sig: sig-node + // kep: https://kep.k8s.io/4817 + // test-infra jobs: + // - "dra-alpha" in https://testgrid.k8s.io/sig-node-dynamic-resource-allocation + // + // This label is used for tests which need: + // - the DynamicResourceAllocation *and* DRAResourceClaimDeviceStatus feature gates + DRAResourceClaimDeviceStatus = framework.WithFeature(framework.ValidFeatures.Add("DRAResourceClaimDeviceStatus")) + // owning-sig: sig-node // kep: https://kep.k8s.io/4381 // test-infra jobs: