mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-13 11:25:19 +00:00
dra api: rename NodeResourceSlice -> ResourceSlice
While currently those objects only get published by the kubelet for node-local resources, this could change once we also support network-attached resources. Dropping the "Node" prefix enables such a future extension. The NodeName in ResourceSlice and StructuredResourceHandle then becomes optional. The kubelet still needs to provide one and it must match its own node name, otherwise it doesn't have permission to access ResourceSlice objects.
This commit is contained in:
@@ -454,9 +454,9 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes
|
||||
Stub: `{"metadata": {"name": "claim1parameters"}}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclaimparameters/" + namespace + "/claim1parameters",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha2", "noderesourceslices"): {
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceslices"): {
|
||||
Stub: `{"metadata": {"name": "node1slice"}, "nodeName": "worker1", "driverName": "dra.example.com", "namedResources": {}}`,
|
||||
ExpectedEtcdPath: "/registry/noderesourceslices/node1slice",
|
||||
ExpectedEtcdPath: "/registry/resourceslices/node1slice",
|
||||
},
|
||||
// --
|
||||
|
||||
|
||||
@@ -128,7 +128,7 @@ type createResourceDriverOp struct {
|
||||
Nodes string
|
||||
// StructuredParameters is true if the controller that is built into the scheduler
|
||||
// is used and the control-plane controller is not needed.
|
||||
// Because we don't run the kubelet plugin, NodeResourceSlices must
|
||||
// Because we don't run the kubelet plugin, ResourceSlices must
|
||||
// get created for all nodes.
|
||||
StructuredParameters bool
|
||||
}
|
||||
@@ -195,12 +195,12 @@ func (op *createResourceDriverOp) run(tCtx ktesting.TContext) {
|
||||
|
||||
if op.StructuredParameters {
|
||||
for _, nodeName := range resources.Nodes {
|
||||
slice := nodeResourceSlice(op.DriverName, nodeName, op.MaxClaimsPerNode)
|
||||
_, err := tCtx.Client().ResourceV1alpha2().NodeResourceSlices().Create(tCtx, slice, metav1.CreateOptions{})
|
||||
slice := resourceSlice(op.DriverName, nodeName, op.MaxClaimsPerNode)
|
||||
_, err := tCtx.Client().ResourceV1alpha2().ResourceSlices().Create(tCtx, slice, metav1.CreateOptions{})
|
||||
tCtx.ExpectNoError(err, "create node resource slice")
|
||||
}
|
||||
tCtx.CleanupCtx(func(tCtx ktesting.TContext) {
|
||||
err := tCtx.Client().ResourceV1alpha2().NodeResourceSlices().DeleteCollection(tCtx,
|
||||
err := tCtx.Client().ResourceV1alpha2().ResourceSlices().DeleteCollection(tCtx,
|
||||
metav1.DeleteOptions{},
|
||||
metav1.ListOptions{FieldSelector: "driverName=" + op.DriverName},
|
||||
)
|
||||
@@ -228,8 +228,8 @@ func (op *createResourceDriverOp) run(tCtx ktesting.TContext) {
|
||||
})
|
||||
}
|
||||
|
||||
func nodeResourceSlice(driverName, nodeName string, capacity int) *resourcev1alpha2.NodeResourceSlice {
|
||||
slice := &resourcev1alpha2.NodeResourceSlice{
|
||||
func resourceSlice(driverName, nodeName string, capacity int) *resourcev1alpha2.ResourceSlice {
|
||||
slice := &resourcev1alpha2.ResourceSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user