mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-23 10:32:03 +00:00
[KEP-4817] E2E: Update ResourceClaim.Status.Devices
Signed-off-by: Lionel Jouin <lionel.jouin@est.tech>
This commit is contained in:
parent
c59359289f
commit
8be335a755
@ -403,6 +403,73 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
return b.f.ClientSet.ResourceV1beta1().ResourceClaims(b.f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||
}).WithTimeout(f.Timeouts.PodDelete).Should(gomega.HaveField("Status.Allocation", (*resourceapi.AllocationResult)(nil)))
|
||||
})
|
||||
|
||||
f.It("must be possible for the driver to update the ResourceClaim.Status.Devices once allocated", feature.DRAResourceClaimDeviceStatus, func(ctx context.Context) {
|
||||
pod := b.podExternal()
|
||||
claim := b.externalClaim()
|
||||
b.create(ctx, claim, pod)
|
||||
|
||||
// Waits for the ResourceClaim to be allocated and the pod to be scheduled.
|
||||
var allocatedResourceClaim *resourceapi.ResourceClaim
|
||||
var scheduledPod *v1.Pod
|
||||
|
||||
gomega.Eventually(ctx, func(ctx context.Context) (*resourceapi.ResourceClaim, error) {
|
||||
var err error
|
||||
allocatedResourceClaim, err = b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||
return allocatedResourceClaim, err
|
||||
}).WithTimeout(f.Timeouts.PodDelete).ShouldNot(gomega.HaveField("Status.Allocation", (*resourceapi.AllocationResult)(nil)))
|
||||
|
||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
scheduledPod, err = b.f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
if err != nil && scheduledPod.Spec.NodeName != "" {
|
||||
return fmt.Errorf("expected the test pod %s to exist and to be scheduled on a node: %w", pod.Name, err)
|
||||
}
|
||||
return nil
|
||||
}).WithTimeout(f.Timeouts.PodDelete).Should(gomega.BeNil())
|
||||
|
||||
gomega.Expect(allocatedResourceClaim.Status.Allocation).ToNot(gomega.BeNil())
|
||||
gomega.Expect(allocatedResourceClaim.Status.Allocation.Devices.Results).To(gomega.HaveLen(1))
|
||||
|
||||
ginkgo.By("Setting the device status a first time")
|
||||
allocatedResourceClaim.Status.Devices = append(allocatedResourceClaim.Status.Devices,
|
||||
resourceapi.AllocatedDeviceStatus{
|
||||
Driver: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Driver,
|
||||
Pool: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Pool,
|
||||
Device: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Device,
|
||||
Conditions: []metav1.Condition{{Type: "a", Status: "b", Message: "c", Reason: "d"}},
|
||||
Data: &runtime.RawExtension{Raw: []byte(`{"foo":"bar"}`)},
|
||||
NetworkData: &resourceapi.NetworkDeviceData{
|
||||
InterfaceName: ptr.To("inf1"),
|
||||
Addresses: []string{"10.9.8.0/24", "2001:db8::/64"},
|
||||
HWAddress: ptr.To("bc:1c:b6:3e:b8:25"),
|
||||
},
|
||||
})
|
||||
// Updates the ResourceClaim from the driver on the same node as the pod.
|
||||
updatedResourceClaim, err := driver.Nodes[scheduledPod.Spec.NodeName].ExamplePlugin.UpdateStatus(ctx, allocatedResourceClaim)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(updatedResourceClaim).ToNot(gomega.BeNil())
|
||||
gomega.Expect(updatedResourceClaim.Status.Devices).To(gomega.Equal(allocatedResourceClaim.Status.Devices))
|
||||
|
||||
ginkgo.By("Updating the device status")
|
||||
updatedResourceClaim.Status.Devices[0] = resourceapi.AllocatedDeviceStatus{
|
||||
Driver: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Driver,
|
||||
Pool: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Pool,
|
||||
Device: allocatedResourceClaim.Status.Allocation.Devices.Results[0].Device,
|
||||
Conditions: []metav1.Condition{{Type: "e", Status: "f", Message: "g", Reason: "h"}},
|
||||
Data: &runtime.RawExtension{Raw: []byte(`{"bar":"foo"}`)},
|
||||
NetworkData: &resourceapi.NetworkDeviceData{
|
||||
InterfaceName: ptr.To("inf2"),
|
||||
Addresses: []string{"10.9.8.1/24", "2001:db8::1/64"},
|
||||
HWAddress: ptr.To("bc:1c:b6:3e:b8:26"),
|
||||
},
|
||||
}
|
||||
updatedResourceClaim2, err := driver.Nodes[scheduledPod.Spec.NodeName].ExamplePlugin.UpdateStatus(ctx, updatedResourceClaim)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(updatedResourceClaim2).ToNot(gomega.BeNil())
|
||||
gomega.Expect(updatedResourceClaim2.Status.Devices).To(gomega.Equal(updatedResourceClaim.Status.Devices))
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
singleNodeTests := func() {
|
||||
|
@ -560,3 +560,7 @@ func (ex *ExamplePlugin) CountCalls(methodSuffix string) int {
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func (ex *ExamplePlugin) UpdateStatus(ctx context.Context, resourceClaim *resourceapi.ResourceClaim) (*resourceapi.ResourceClaim, error) {
|
||||
return ex.kubeClient.ResourceV1alpha3().ResourceClaims(resourceClaim.Namespace).UpdateStatus(ctx, resourceClaim, metav1.UpdateOptions{})
|
||||
}
|
||||
|
@ -15,6 +15,9 @@ rules:
|
||||
- apiGroups: ["resource.k8s.io"]
|
||||
resources: ["resourceclaims"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: ["resource.k8s.io"]
|
||||
resources: ["resourceclaims/status"]
|
||||
verbs: ["update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get"]
|
||||
|
@ -91,6 +91,15 @@ var (
|
||||
// TODO: document the feature (owning SIG, when to use this feature for a test)
|
||||
Downgrade = framework.WithFeature(framework.ValidFeatures.Add("Downgrade"))
|
||||
|
||||
// owning-sig: sig-node
|
||||
// kep: https://kep.k8s.io/4817
|
||||
// test-infra jobs:
|
||||
// - "dra-alpha" in https://testgrid.k8s.io/sig-node-dynamic-resource-allocation
|
||||
//
|
||||
// This label is used for tests which need:
|
||||
// - the DynamicResourceAllocation *and* DRAResourceClaimDeviceStatus feature gates
|
||||
DRAResourceClaimDeviceStatus = framework.WithFeature(framework.ValidFeatures.Add("DRAResourceClaimDeviceStatus"))
|
||||
|
||||
// owning-sig: sig-node
|
||||
// kep: https://kep.k8s.io/4381
|
||||
// test-infra jobs:
|
||||
|
Loading…
Reference in New Issue
Block a user