From f84eb5ecf894fa0fc4e0d05da52ef51d4cd723d9 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 11 Oct 2024 16:08:54 +0200 Subject: [PATCH] DRA: remove "classic DRA" This removes the DRAControlPlaneController feature gate, the fields controlled by it (claim.spec.controller, claim.status.deallocationRequested, claim.status.allocation.controller, class.spec.suitableNodes), the PodSchedulingContext type, and all code related to the feature. The feature gets removed because there is no path towards beta and GA and DRA with "structured parameters" should be able to replace it. --- api/discovery/aggregated_v2.json | 35 - .../apis__resource.k8s.io__v1alpha3.json | 28 - api/openapi-spec/swagger.json | 1128 ---------- ...is__resource.k8s.io__v1alpha3_openapi.json | 1934 ----------------- cmd/kube-controller-manager/app/core.go | 1 - pkg/apis/resource/register.go | 2 - pkg/apis/resource/types.go | 170 +- .../v1alpha3/zz_generated.conversion.go | 176 -- pkg/apis/resource/validation/validation.go | 84 +- .../validation/validation_deviceclass_test.go | 40 - .../validation_podschedulingcontext_test.go | 342 --- .../validation_resourceclaim_test.go | 87 - pkg/apis/resource/zz_generated.deepcopy.go | 131 -- pkg/controller/resourceclaim/controller.go | 110 +- .../resourceclaim/controller_test.go | 88 +- .../apiserver/options/validation.go | 8 - pkg/features/kube_features.go | 8 - pkg/features/versioned_kube_features.go | 4 - pkg/generated/openapi/zz_generated.openapi.go | 255 +-- pkg/printers/internalversion/printers.go | 31 - pkg/printers/internalversion/printers_test.go | 133 +- pkg/registry/resource/deviceclass/strategy.go | 19 +- .../resource/deviceclass/strategy_test.go | 73 +- .../podschedulingcontext/storage/storage.go | 101 - .../storage/storage_test.go | 184 -- .../resource/podschedulingcontext/strategy.go | 165 -- .../podschedulingcontext/strategy_test.go | 84 - .../resource/resourceclaim/strategy.go | 33 +- .../resource/resourceclaim/strategy_test.go | 136 +- .../resource/rest/storage_resource.go | 18 +- pkg/scheduler/eventhandlers.go | 9 - pkg/scheduler/eventhandlers_test.go | 40 +- .../dynamicresources/dynamicresources.go | 686 +----- .../dynamicresources/dynamicresources_test.go | 523 +---- .../framework/plugins/feature/feature.go | 1 - pkg/scheduler/framework/plugins/registry.go | 1 - pkg/scheduler/framework/types.go | 2 - pkg/scheduler/scheduler_test.go | 4 - pkg/scheduler/testing/wrappers.go | 106 +- .../rbac/bootstrappolicy/controller_policy.go | 1 - .../authorizer/rbac/bootstrappolicy/policy.go | 2 - .../api/resource/v1alpha3/generated.pb.go | 1515 ++----------- .../api/resource/v1alpha3/generated.proto | 143 -- .../k8s.io/api/resource/v1alpha3/register.go | 2 - .../src/k8s.io/api/resource/v1alpha3/types.go | 173 +- .../v1alpha3/types_swagger_doc_generated.go | 70 +- .../v1alpha3/zz_generated.deepcopy.go | 131 -- .../zz_generated.prerelease-lifecycle.go | 36 - .../resource.k8s.io.v1alpha3.DeviceClass.json | 26 +- .../resource.k8s.io.v1alpha3.DeviceClass.pb | Bin 636 -> 552 bytes .../resource.k8s.io.v1alpha3.DeviceClass.yaml | 12 - ...esource.k8s.io.v1alpha3.ResourceClaim.json | 9 +- .../resource.k8s.io.v1alpha3.ResourceClaim.pb | Bin 1056 -> 1020 bytes ...esource.k8s.io.v1alpha3.ResourceClaim.yaml | 3 - ...k8s.io.v1alpha3.ResourceClaimTemplate.json | 3 +- ...e.k8s.io.v1alpha3.ResourceClaimTemplate.pb | Bin 1054 -> 1037 bytes ...k8s.io.v1alpha3.ResourceClaimTemplate.yaml | 1 - ...v1alpha3.DeviceClass.after_roundtrip.json} | 34 +- ...o.v1alpha3.DeviceClass.after_roundtrip.pb} | Bin 495 -> 552 bytes ...v1alpha3.DeviceClass.after_roundtrip.yaml} | 23 +- ...1alpha3.ResourceClaim.after_roundtrip.json | 161 ++ ....v1alpha3.ResourceClaim.after_roundtrip.pb | Bin 0 -> 1020 bytes ...1alpha3.ResourceClaim.after_roundtrip.yaml | 99 + ...ResourceClaimTemplate.after_roundtrip.json | 138 ++ ...3.ResourceClaimTemplate.after_roundtrip.pb | Bin 0 -> 1037 bytes ...ResourceClaimTemplate.after_roundtrip.yaml | 94 + .../applyconfigurations/internal/internal.go | 69 - .../resource/v1alpha3/allocationresult.go | 9 - .../resource/v1alpha3/deviceclassspec.go | 17 +- .../resource/v1alpha3/podschedulingcontext.go | 264 --- .../v1alpha3/podschedulingcontextspec.go | 50 - .../v1alpha3/podschedulingcontextstatus.go | 44 - .../v1alpha3/resourceclaimschedulingstatus.go | 50 - .../resource/v1alpha3/resourceclaimspec.go | 11 +- .../resource/v1alpha3/resourceclaimstatus.go | 13 +- .../client-go/applyconfigurations/utils.go | 8 - .../src/k8s.io/client-go/informers/generic.go | 2 - .../informers/resource/v1alpha3/interface.go | 7 - .../resource/v1alpha3/podschedulingcontext.go | 90 - .../fake/fake_podschedulingcontext.go | 197 -- .../v1alpha3/fake/fake_resource_client.go | 4 - .../resource/v1alpha3/generated_expansion.go | 2 - .../resource/v1alpha3/podschedulingcontext.go | 73 - .../resource/v1alpha3/resource_client.go | 5 - .../resource/v1alpha3/expansion_generated.go | 8 - .../resource/v1alpha3/podschedulingcontext.go | 70 - .../controller/controller.go | 880 -------- .../controller/controller_test.go | 617 ------ .../controller/mock_queue_test.go | 165 -- .../k8s.io/dynamic-resource-allocation/go.mod | 2 +- .../structured/allocator_test.go | 1 - test/e2e/dra/deploy.go | 128 +- test/e2e/dra/dra.go | 520 +---- test/e2e/dra/kind-classic-dra.yaml | 45 - test/e2e/dra/test-driver/app/controller.go | 377 ---- test/e2e/dra/test-driver/app/server.go | 91 - test/e2e/feature/feature.go | 12 - .../test_data/versioned_feature_list.yaml | 6 - .../apiserver/apply/reset_fields_test.go | 8 +- .../apiserver/apply/status_test.go | 3 +- test/integration/etcd/data.go | 4 - test/integration/scheduler/scheduler_test.go | 123 -- .../config/dra/deviceclass-structured.yaml | 8 - .../config/dra/deviceclass.yaml | 4 + .../config/dra/resourceclaim-structured.yaml | 9 - .../config/dra/resourceclaim.yaml | 1 - .../dra/resourceclaimtemplate-structured.yaml | 10 - .../config/dra/resourceclaimtemplate.yaml | 1 - .../config/performance-config.yaml | 177 +- test/integration/scheduler_perf/dra.go | 59 +- test/integration/util/util.go | 3 +- 111 files changed, 1053 insertions(+), 12850 deletions(-) delete mode 100644 pkg/apis/resource/validation/validation_podschedulingcontext_test.go delete mode 100644 pkg/registry/resource/podschedulingcontext/storage/storage.go delete mode 100644 pkg/registry/resource/podschedulingcontext/storage/storage_test.go delete mode 100644 pkg/registry/resource/podschedulingcontext/strategy.go delete mode 100644 pkg/registry/resource/podschedulingcontext/strategy_test.go rename staging/src/k8s.io/api/testdata/{HEAD/resource.k8s.io.v1alpha3.PodSchedulingContext.json => v1.31.0/resource.k8s.io.v1alpha3.DeviceClass.after_roundtrip.json} (71%) rename staging/src/k8s.io/api/testdata/{HEAD/resource.k8s.io.v1alpha3.PodSchedulingContext.pb => v1.31.0/resource.k8s.io.v1alpha3.DeviceClass.after_roundtrip.pb} (61%) rename staging/src/k8s.io/api/testdata/{HEAD/resource.k8s.io.v1alpha3.PodSchedulingContext.yaml => v1.31.0/resource.k8s.io.v1alpha3.DeviceClass.after_roundtrip.yaml} (75%) create mode 100644 staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaim.after_roundtrip.json create mode 100644 staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaim.after_roundtrip.pb create mode 100644 staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaim.after_roundtrip.yaml create mode 100644 staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaimTemplate.after_roundtrip.json create mode 100644 staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaimTemplate.after_roundtrip.pb create mode 100644 staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaimTemplate.after_roundtrip.yaml delete mode 100644 staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go delete mode 100644 staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go delete mode 100644 staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go delete mode 100644 staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go delete mode 100644 staging/src/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go delete mode 100644 staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go delete mode 100644 staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go delete mode 100644 staging/src/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go delete mode 100644 staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go delete mode 100644 staging/src/k8s.io/dynamic-resource-allocation/controller/controller_test.go delete mode 100644 staging/src/k8s.io/dynamic-resource-allocation/controller/mock_queue_test.go delete mode 100644 test/e2e/dra/kind-classic-dra.yaml delete mode 100644 test/e2e/dra/test-driver/app/controller.go delete mode 100644 test/integration/scheduler_perf/config/dra/deviceclass-structured.yaml delete mode 100644 test/integration/scheduler_perf/config/dra/resourceclaim-structured.yaml delete mode 100644 test/integration/scheduler_perf/config/dra/resourceclaimtemplate-structured.yaml diff --git a/api/discovery/aggregated_v2.json b/api/discovery/aggregated_v2.json index 6172d0bbbc0..e0eb36ce418 100644 --- a/api/discovery/aggregated_v2.json +++ b/api/discovery/aggregated_v2.json @@ -1966,41 +1966,6 @@ "watch" ] }, - { - "resource": "podschedulingcontexts", - "responseKind": { - "group": "", - "kind": "PodSchedulingContext", - "version": "" - }, - "scope": "Namespaced", - "singularResource": "podschedulingcontext", - "subresources": [ - { - "responseKind": { - "group": "", - "kind": "PodSchedulingContext", - "version": "" - }, - "subresource": "status", - "verbs": [ - "get", - "patch", - "update" - ] - } - ], - "verbs": [ - "create", - "delete", - "deletecollection", - "get", - "list", - "patch", - "update", - "watch" - ] - }, { "resource": "resourceclaims", "responseKind": { diff --git a/api/discovery/apis__resource.k8s.io__v1alpha3.json b/api/discovery/apis__resource.k8s.io__v1alpha3.json index 535ae34c33f..1cdc3b522e7 100644 --- a/api/discovery/apis__resource.k8s.io__v1alpha3.json +++ b/api/discovery/apis__resource.k8s.io__v1alpha3.json @@ -20,34 +20,6 @@ "watch" ] }, - { - "kind": "PodSchedulingContext", - "name": "podschedulingcontexts", - "namespaced": true, - "singularName": "podschedulingcontext", - "storageVersionHash": "SkSsa067T7g=", - "verbs": [ - "create", - "delete", - "deletecollection", - "get", - "list", - "patch", - "update", - "watch" - ] - }, - { - "kind": "PodSchedulingContext", - "name": "podschedulingcontexts/status", - "namespaced": true, - "singularName": "", - "verbs": [ - "get", - "patch", - "update" - ] - }, { "kind": "ResourceClaim", "name": "resourceclaims", diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index fe48ff6d4ec..f7f8f2c7e88 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -15224,10 +15224,6 @@ "io.k8s.api.resource.v1alpha3.AllocationResult": { "description": "AllocationResult contains attributes of an allocated resource.", "properties": { - "controller": { - "description": "Controller is the name of the DRA driver which handled the allocation. That driver is also responsible for deallocating the claim. It is empty when the claim can be deallocated without involving a driver.\n\nA driver may allocate devices provided by other drivers, so this driver name here can be different from the driver names listed for the results.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", - "type": "string" - }, "devices": { "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceAllocationResult", "description": "Devices is the result of allocating devices." @@ -15502,10 +15498,6 @@ }, "type": "array", "x-kubernetes-list-type": "atomic" - }, - "suitableNodes": { - "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector", - "description": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a claim that has not been allocated yet *and* that claim gets allocated through a control plane controller. It is ignored when the claim does not use a control plane controller for allocation.\n\nSetting this field is optional. If unset, all Nodes are candidates.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate." } }, "type": "object" @@ -15623,112 +15615,6 @@ ], "type": "object" }, - "io.k8s.api.resource.v1alpha3.PodSchedulingContext": { - "description": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DRAControlPlaneController feature gate.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", - "description": "Standard object metadata" - }, - "spec": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec", - "description": "Spec describes where resources for the Pod are needed." - }, - "status": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus", - "description": "Status describes where resources for the Pod can be allocated." - } - }, - "required": [ - "spec" - ], - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - ] - }, - "io.k8s.api.resource.v1alpha3.PodSchedulingContextList": { - "description": "PodSchedulingContextList is a collection of Pod scheduling objects.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "items": { - "description": "Items is the list of PodSchedulingContext objects.", - "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - }, - "type": "array" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", - "description": "Standard list metadata" - } - }, - "required": [ - "items" - ], - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "resource.k8s.io", - "kind": "PodSchedulingContextList", - "version": "v1alpha3" - } - ] - }, - "io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec": { - "description": "PodSchedulingContextSpec describes where resources for the Pod are needed.", - "properties": { - "potentialNodes": { - "description": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", - "items": { - "type": "string" - }, - "type": "array", - "x-kubernetes-list-type": "atomic" - }, - "selectedNode": { - "description": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", - "type": "string" - } - }, - "type": "object" - }, - "io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus": { - "description": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", - "properties": { - "resourceClaims": { - "description": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", - "items": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus" - }, - "type": "array", - "x-kubernetes-list-map-keys": [ - "name" - ], - "x-kubernetes-list-type": "map" - } - }, - "type": "object" - }, "io.k8s.api.resource.v1alpha3.ResourceClaim": { "description": "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "properties": { @@ -15827,34 +15713,9 @@ } ] }, - "io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus": { - "description": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", - "properties": { - "name": { - "description": "Name matches the pod.spec.resourceClaims[*].Name field.", - "type": "string" - }, - "unsuitableNodes": { - "description": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.", - "items": { - "type": "string" - }, - "type": "array", - "x-kubernetes-list-type": "atomic" - } - }, - "required": [ - "name" - ], - "type": "object" - }, "io.k8s.api.resource.v1alpha3.ResourceClaimSpec": { "description": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.", "properties": { - "controller": { - "description": "Controller is the name of the DRA driver that is meant to handle allocation of this claim. If empty, allocation is handled by the scheduler while scheduling a pod.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", - "type": "string" - }, "devices": { "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceClaim", "description": "Devices defines how to request devices." @@ -15869,10 +15730,6 @@ "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.AllocationResult", "description": "Allocation is set once the claim has been allocated successfully." }, - "deallocationRequested": { - "description": "Indicates that a claim is to be deallocated. While this is set, no new consumers may be added to ReservedFor.\n\nThis is only used if the claim needs to be deallocated by a DRA driver. That driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", - "type": "boolean" - }, "reservedFor": { "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", "items": { @@ -72003,681 +71860,6 @@ } } }, - "/apis/resource.k8s.io/v1alpha3/namespaces/{namespace}/podschedulingcontexts": { - "delete": { - "consumes": [ - "*/*" - ], - "description": "delete collection of PodSchedulingContext", - "operationId": "deleteResourceV1alpha3CollectionNamespacedPodSchedulingContext", - "parameters": [ - { - "$ref": "#/parameters/body-2Y1dVQaQ" - }, - { - "$ref": "#/parameters/continue-QfD61s0i" - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/fieldSelector-xIcQKXFG" - }, - { - "$ref": "#/parameters/gracePeriodSeconds--K5HaBOS" - }, - { - "$ref": "#/parameters/labelSelector-5Zw57w4C" - }, - { - "$ref": "#/parameters/limit-1NfNmdNH" - }, - { - "$ref": "#/parameters/orphanDependents-uRB25kX5" - }, - { - "$ref": "#/parameters/propagationPolicy-6jk3prlO" - }, - { - "$ref": "#/parameters/resourceVersion-5WAnf1kx" - }, - { - "$ref": "#/parameters/resourceVersionMatch-t8XhRHeC" - }, - { - "$ref": "#/parameters/sendInitialEvents-rLXlEK_k" - }, - { - "$ref": "#/parameters/timeoutSeconds-yvYezaOC" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "deletecollection", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "get": { - "consumes": [ - "*/*" - ], - "description": "list or watch objects of kind PodSchedulingContext", - "operationId": "listResourceV1alpha3NamespacedPodSchedulingContext", - "parameters": [ - { - "$ref": "#/parameters/allowWatchBookmarks-HC2hJt-J" - }, - { - "$ref": "#/parameters/continue-QfD61s0i" - }, - { - "$ref": "#/parameters/fieldSelector-xIcQKXFG" - }, - { - "$ref": "#/parameters/labelSelector-5Zw57w4C" - }, - { - "$ref": "#/parameters/limit-1NfNmdNH" - }, - { - "$ref": "#/parameters/resourceVersion-5WAnf1kx" - }, - { - "$ref": "#/parameters/resourceVersionMatch-t8XhRHeC" - }, - { - "$ref": "#/parameters/sendInitialEvents-rLXlEK_k" - }, - { - "$ref": "#/parameters/timeoutSeconds-yvYezaOC" - }, - { - "$ref": "#/parameters/watch-XNNPZGbK" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContextList" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "list", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "parameters": [ - { - "$ref": "#/parameters/namespace-vgWSWtn3" - }, - { - "$ref": "#/parameters/pretty-tJGM1-ng" - } - ], - "post": { - "consumes": [ - "*/*" - ], - "description": "create a PodSchedulingContext", - "operationId": "createResourceV1alpha3NamespacedPodSchedulingContext", - "parameters": [ - { - "in": "body", - "name": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/fieldManager-Qy4HdaTW" - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "type": "string", - "uniqueItems": true - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "201": { - "description": "Created", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "202": { - "description": "Accepted", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "post", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - } - }, - "/apis/resource.k8s.io/v1alpha3/namespaces/{namespace}/podschedulingcontexts/{name}": { - "delete": { - "consumes": [ - "*/*" - ], - "description": "delete a PodSchedulingContext", - "operationId": "deleteResourceV1alpha3NamespacedPodSchedulingContext", - "parameters": [ - { - "$ref": "#/parameters/body-2Y1dVQaQ" - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/gracePeriodSeconds--K5HaBOS" - }, - { - "$ref": "#/parameters/orphanDependents-uRB25kX5" - }, - { - "$ref": "#/parameters/propagationPolicy-6jk3prlO" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "202": { - "description": "Accepted", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "delete", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "get": { - "consumes": [ - "*/*" - ], - "description": "read the specified PodSchedulingContext", - "operationId": "readResourceV1alpha3NamespacedPodSchedulingContext", - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "get", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "parameters": [ - { - "description": "name of the PodSchedulingContext", - "in": "path", - "name": "name", - "required": true, - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/namespace-vgWSWtn3" - }, - { - "$ref": "#/parameters/pretty-tJGM1-ng" - } - ], - "patch": { - "consumes": [ - "application/json-patch+json", - "application/merge-patch+json", - "application/strategic-merge-patch+json", - "application/apply-patch+yaml" - ], - "description": "partially update the specified PodSchedulingContext", - "operationId": "patchResourceV1alpha3NamespacedPodSchedulingContext", - "parameters": [ - { - "$ref": "#/parameters/body-78PwaGsr" - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/fieldManager-7c6nTn1T" - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/force-tOGGb0Yi" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "201": { - "description": "Created", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "patch", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "put": { - "consumes": [ - "*/*" - ], - "description": "replace the specified PodSchedulingContext", - "operationId": "replaceResourceV1alpha3NamespacedPodSchedulingContext", - "parameters": [ - { - "in": "body", - "name": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/fieldManager-Qy4HdaTW" - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "type": "string", - "uniqueItems": true - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "201": { - "description": "Created", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "put", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - } - }, - "/apis/resource.k8s.io/v1alpha3/namespaces/{namespace}/podschedulingcontexts/{name}/status": { - "get": { - "consumes": [ - "*/*" - ], - "description": "read status of the specified PodSchedulingContext", - "operationId": "readResourceV1alpha3NamespacedPodSchedulingContextStatus", - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "get", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "parameters": [ - { - "description": "name of the PodSchedulingContext", - "in": "path", - "name": "name", - "required": true, - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/namespace-vgWSWtn3" - }, - { - "$ref": "#/parameters/pretty-tJGM1-ng" - } - ], - "patch": { - "consumes": [ - "application/json-patch+json", - "application/merge-patch+json", - "application/strategic-merge-patch+json", - "application/apply-patch+yaml" - ], - "description": "partially update status of the specified PodSchedulingContext", - "operationId": "patchResourceV1alpha3NamespacedPodSchedulingContextStatus", - "parameters": [ - { - "$ref": "#/parameters/body-78PwaGsr" - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/fieldManager-7c6nTn1T" - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/force-tOGGb0Yi" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "201": { - "description": "Created", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "patch", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "put": { - "consumes": [ - "*/*" - ], - "description": "replace status of the specified PodSchedulingContext", - "operationId": "replaceResourceV1alpha3NamespacedPodSchedulingContextStatus", - "parameters": [ - { - "in": "body", - "name": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/fieldManager-Qy4HdaTW" - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "type": "string", - "uniqueItems": true - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "201": { - "description": "Created", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "put", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - } - }, "/apis/resource.k8s.io/v1alpha3/namespaces/{namespace}/resourceclaims": { "delete": { "consumes": [ @@ -73838,80 +73020,6 @@ } } }, - "/apis/resource.k8s.io/v1alpha3/podschedulingcontexts": { - "get": { - "consumes": [ - "*/*" - ], - "description": "list or watch objects of kind PodSchedulingContext", - "operationId": "listResourceV1alpha3PodSchedulingContextForAllNamespaces", - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.resource.v1alpha3.PodSchedulingContextList" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "list", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "parameters": [ - { - "$ref": "#/parameters/allowWatchBookmarks-HC2hJt-J" - }, - { - "$ref": "#/parameters/continue-QfD61s0i" - }, - { - "$ref": "#/parameters/fieldSelector-xIcQKXFG" - }, - { - "$ref": "#/parameters/labelSelector-5Zw57w4C" - }, - { - "$ref": "#/parameters/limit-1NfNmdNH" - }, - { - "$ref": "#/parameters/pretty-tJGM1-ng" - }, - { - "$ref": "#/parameters/resourceVersion-5WAnf1kx" - }, - { - "$ref": "#/parameters/resourceVersionMatch-t8XhRHeC" - }, - { - "$ref": "#/parameters/sendInitialEvents-rLXlEK_k" - }, - { - "$ref": "#/parameters/timeoutSeconds-yvYezaOC" - }, - { - "$ref": "#/parameters/watch-XNNPZGbK" - } - ] - }, "/apis/resource.k8s.io/v1alpha3/resourceclaims": { "get": { "consumes": [ @@ -74695,168 +73803,6 @@ } ] }, - "/apis/resource.k8s.io/v1alpha3/watch/namespaces/{namespace}/podschedulingcontexts": { - "get": { - "consumes": [ - "*/*" - ], - "description": "watch individual changes to a list of PodSchedulingContext. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha3NamespacedPodSchedulingContextList", - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "watchlist", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "parameters": [ - { - "$ref": "#/parameters/allowWatchBookmarks-HC2hJt-J" - }, - { - "$ref": "#/parameters/continue-QfD61s0i" - }, - { - "$ref": "#/parameters/fieldSelector-xIcQKXFG" - }, - { - "$ref": "#/parameters/labelSelector-5Zw57w4C" - }, - { - "$ref": "#/parameters/limit-1NfNmdNH" - }, - { - "$ref": "#/parameters/namespace-vgWSWtn3" - }, - { - "$ref": "#/parameters/pretty-tJGM1-ng" - }, - { - "$ref": "#/parameters/resourceVersion-5WAnf1kx" - }, - { - "$ref": "#/parameters/resourceVersionMatch-t8XhRHeC" - }, - { - "$ref": "#/parameters/sendInitialEvents-rLXlEK_k" - }, - { - "$ref": "#/parameters/timeoutSeconds-yvYezaOC" - }, - { - "$ref": "#/parameters/watch-XNNPZGbK" - } - ] - }, - "/apis/resource.k8s.io/v1alpha3/watch/namespaces/{namespace}/podschedulingcontexts/{name}": { - "get": { - "consumes": [ - "*/*" - ], - "description": "watch changes to an object of kind PodSchedulingContext. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchResourceV1alpha3NamespacedPodSchedulingContext", - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "watch", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "parameters": [ - { - "$ref": "#/parameters/allowWatchBookmarks-HC2hJt-J" - }, - { - "$ref": "#/parameters/continue-QfD61s0i" - }, - { - "$ref": "#/parameters/fieldSelector-xIcQKXFG" - }, - { - "$ref": "#/parameters/labelSelector-5Zw57w4C" - }, - { - "$ref": "#/parameters/limit-1NfNmdNH" - }, - { - "description": "name of the PodSchedulingContext", - "in": "path", - "name": "name", - "required": true, - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/namespace-vgWSWtn3" - }, - { - "$ref": "#/parameters/pretty-tJGM1-ng" - }, - { - "$ref": "#/parameters/resourceVersion-5WAnf1kx" - }, - { - "$ref": "#/parameters/resourceVersionMatch-t8XhRHeC" - }, - { - "$ref": "#/parameters/sendInitialEvents-rLXlEK_k" - }, - { - "$ref": "#/parameters/timeoutSeconds-yvYezaOC" - }, - { - "$ref": "#/parameters/watch-XNNPZGbK" - } - ] - }, "/apis/resource.k8s.io/v1alpha3/watch/namespaces/{namespace}/resourceclaims": { "get": { "consumes": [ @@ -75181,80 +74127,6 @@ } ] }, - "/apis/resource.k8s.io/v1alpha3/watch/podschedulingcontexts": { - "get": { - "consumes": [ - "*/*" - ], - "description": "watch individual changes to a list of PodSchedulingContext. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha3PodSchedulingContextListForAllNamespaces", - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "watchlist", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "parameters": [ - { - "$ref": "#/parameters/allowWatchBookmarks-HC2hJt-J" - }, - { - "$ref": "#/parameters/continue-QfD61s0i" - }, - { - "$ref": "#/parameters/fieldSelector-xIcQKXFG" - }, - { - "$ref": "#/parameters/labelSelector-5Zw57w4C" - }, - { - "$ref": "#/parameters/limit-1NfNmdNH" - }, - { - "$ref": "#/parameters/pretty-tJGM1-ng" - }, - { - "$ref": "#/parameters/resourceVersion-5WAnf1kx" - }, - { - "$ref": "#/parameters/resourceVersionMatch-t8XhRHeC" - }, - { - "$ref": "#/parameters/sendInitialEvents-rLXlEK_k" - }, - { - "$ref": "#/parameters/timeoutSeconds-yvYezaOC" - }, - { - "$ref": "#/parameters/watch-XNNPZGbK" - } - ] - }, "/apis/resource.k8s.io/v1alpha3/watch/resourceclaims": { "get": { "consumes": [ diff --git a/api/openapi-spec/v3/apis__resource.k8s.io__v1alpha3_openapi.json b/api/openapi-spec/v3/apis__resource.k8s.io__v1alpha3_openapi.json index 88128b5fb67..c107e1d1f2f 100644 --- a/api/openapi-spec/v3/apis__resource.k8s.io__v1alpha3_openapi.json +++ b/api/openapi-spec/v3/apis__resource.k8s.io__v1alpha3_openapi.json @@ -89,10 +89,6 @@ "io.k8s.api.resource.v1alpha3.AllocationResult": { "description": "AllocationResult contains attributes of an allocated resource.", "properties": { - "controller": { - "description": "Controller is the name of the DRA driver which handled the allocation. That driver is also responsible for deallocating the claim. It is empty when the claim can be deallocated without involving a driver.\n\nA driver may allocate devices provided by other drivers, so this driver name here can be different from the driver names listed for the results.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", - "type": "string" - }, "devices": { "allOf": [ { @@ -457,14 +453,6 @@ }, "type": "array", "x-kubernetes-list-type": "atomic" - }, - "suitableNodes": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelector" - } - ], - "description": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a claim that has not been allocated yet *and* that claim gets allocated through a control plane controller. It is ignored when the claim does not use a control plane controller for allocation.\n\nSetting this field is optional. If unset, all Nodes are candidates.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate." } }, "type": "object" @@ -604,143 +592,6 @@ ], "type": "object" }, - "io.k8s.api.resource.v1alpha3.PodSchedulingContext": { - "description": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DRAControlPlaneController feature gate.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" - } - ], - "default": {}, - "description": "Standard object metadata" - }, - "spec": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec" - } - ], - "default": {}, - "description": "Spec describes where resources for the Pod are needed." - }, - "status": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus" - } - ], - "default": {}, - "description": "Status describes where resources for the Pod can be allocated." - } - }, - "required": [ - "spec" - ], - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - ] - }, - "io.k8s.api.resource.v1alpha3.PodSchedulingContextList": { - "description": "PodSchedulingContextList is a collection of Pod scheduling objects.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "items": { - "description": "Items is the list of PodSchedulingContext objects.", - "items": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - ], - "default": {} - }, - "type": "array" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" - } - ], - "default": {}, - "description": "Standard list metadata" - } - }, - "required": [ - "items" - ], - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "resource.k8s.io", - "kind": "PodSchedulingContextList", - "version": "v1alpha3" - } - ] - }, - "io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec": { - "description": "PodSchedulingContextSpec describes where resources for the Pod are needed.", - "properties": { - "potentialNodes": { - "description": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", - "items": { - "default": "", - "type": "string" - }, - "type": "array", - "x-kubernetes-list-type": "atomic" - }, - "selectedNode": { - "description": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", - "type": "string" - } - }, - "type": "object" - }, - "io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus": { - "description": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", - "properties": { - "resourceClaims": { - "description": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", - "items": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus" - } - ], - "default": {} - }, - "type": "array", - "x-kubernetes-list-map-keys": [ - "name" - ], - "x-kubernetes-list-type": "map" - } - }, - "type": "object" - }, "io.k8s.api.resource.v1alpha3.ResourceClaim": { "description": "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "properties": { @@ -867,36 +718,9 @@ } ] }, - "io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus": { - "description": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", - "properties": { - "name": { - "default": "", - "description": "Name matches the pod.spec.resourceClaims[*].Name field.", - "type": "string" - }, - "unsuitableNodes": { - "description": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.", - "items": { - "default": "", - "type": "string" - }, - "type": "array", - "x-kubernetes-list-type": "atomic" - } - }, - "required": [ - "name" - ], - "type": "object" - }, "io.k8s.api.resource.v1alpha3.ResourceClaimSpec": { "description": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.", "properties": { - "controller": { - "description": "Controller is the name of the DRA driver that is meant to handle allocation of this claim. If empty, allocation is handled by the scheduler while scheduling a pod.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", - "type": "string" - }, "devices": { "allOf": [ { @@ -920,10 +744,6 @@ ], "description": "Allocation is set once the claim has been allocated successfully." }, - "deallocationRequested": { - "description": "Indicates that a claim is to be deallocated. While this is set, no new consumers may be added to ReservedFor.\n\nThis is only used if the claim needs to be deallocated by a DRA driver. That driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", - "type": "boolean" - }, "reservedFor": { "description": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", "items": { @@ -3247,1126 +3067,6 @@ } } }, - "/apis/resource.k8s.io/v1alpha3/namespaces/{namespace}/podschedulingcontexts": { - "delete": { - "description": "delete collection of PodSchedulingContext", - "operationId": "deleteResourceV1alpha3CollectionNamespacedPodSchedulingContext", - "parameters": [ - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - "in": "query", - "name": "gracePeriodSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "in": "query", - "name": "orphanDependents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", - "in": "query", - "name": "propagationPolicy", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "in": "query", - "name": "sendInitialEvents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "deletecollection", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "get": { - "description": "list or watch objects of kind PodSchedulingContext", - "operationId": "listResourceV1alpha3NamespacedPodSchedulingContext", - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "in": "query", - "name": "sendInitialEvents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ], - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContextList" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContextList" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContextList" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContextList" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContextList" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "list", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "parameters": [ - { - "description": "object name and auth scope, such as for teams and projects", - "in": "path", - "name": "namespace", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "post": { - "description": "create a PodSchedulingContext", - "operationId": "createResourceV1alpha3NamespacedPodSchedulingContext", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "required": true - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "Created" - }, - "202": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "Accepted" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "post", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - } - }, - "/apis/resource.k8s.io/v1alpha3/namespaces/{namespace}/podschedulingcontexts/{name}": { - "delete": { - "description": "delete a PodSchedulingContext", - "operationId": "deleteResourceV1alpha3NamespacedPodSchedulingContext", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - "in": "query", - "name": "gracePeriodSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "in": "query", - "name": "orphanDependents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", - "in": "query", - "name": "propagationPolicy", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "OK" - }, - "202": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "Accepted" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "delete", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "get": { - "description": "read the specified PodSchedulingContext", - "operationId": "readResourceV1alpha3NamespacedPodSchedulingContext", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "get", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "parameters": [ - { - "description": "name of the PodSchedulingContext", - "in": "path", - "name": "name", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "object name and auth scope, such as for teams and projects", - "in": "path", - "name": "namespace", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "patch": { - "description": "partially update the specified PodSchedulingContext", - "operationId": "patchResourceV1alpha3NamespacedPodSchedulingContext", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", - "in": "query", - "name": "force", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "application/apply-patch+yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/json-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/merge-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/strategic-merge-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - } - }, - "required": true - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "Created" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "patch", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "put": { - "description": "replace the specified PodSchedulingContext", - "operationId": "replaceResourceV1alpha3NamespacedPodSchedulingContext", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "required": true - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "Created" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "put", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - } - }, - "/apis/resource.k8s.io/v1alpha3/namespaces/{namespace}/podschedulingcontexts/{name}/status": { - "get": { - "description": "read status of the specified PodSchedulingContext", - "operationId": "readResourceV1alpha3NamespacedPodSchedulingContextStatus", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "get", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "parameters": [ - { - "description": "name of the PodSchedulingContext", - "in": "path", - "name": "name", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "object name and auth scope, such as for teams and projects", - "in": "path", - "name": "namespace", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "patch": { - "description": "partially update status of the specified PodSchedulingContext", - "operationId": "patchResourceV1alpha3NamespacedPodSchedulingContextStatus", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", - "in": "query", - "name": "force", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "application/apply-patch+yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/json-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/merge-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/strategic-merge-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - } - }, - "required": true - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "Created" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "patch", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "put": { - "description": "replace status of the specified PodSchedulingContext", - "operationId": "replaceResourceV1alpha3NamespacedPodSchedulingContextStatus", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "required": true - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContext" - } - } - }, - "description": "Created" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "put", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - } - }, "/apis/resource.k8s.io/v1alpha3/namespaces/{namespace}/resourceclaims": { "delete": { "description": "delete collection of ResourceClaim", @@ -6318,157 +5018,6 @@ } } }, - "/apis/resource.k8s.io/v1alpha3/podschedulingcontexts": { - "get": { - "description": "list or watch objects of kind PodSchedulingContext", - "operationId": "listResourceV1alpha3PodSchedulingContextForAllNamespaces", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContextList" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContextList" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContextList" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContextList" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.resource.v1alpha3.PodSchedulingContextList" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "list", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "in": "query", - "name": "sendInitialEvents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ] - }, "/apis/resource.k8s.io/v1alpha3/resourceclaims": { "get": { "description": "list or watch objects of kind ResourceClaim", @@ -7894,338 +6443,6 @@ } ] }, - "/apis/resource.k8s.io/v1alpha3/watch/namespaces/{namespace}/podschedulingcontexts": { - "get": { - "description": "watch individual changes to a list of PodSchedulingContext. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha3NamespacedPodSchedulingContextList", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "watchlist", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "object name and auth scope, such as for teams and projects", - "in": "path", - "name": "namespace", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "in": "query", - "name": "sendInitialEvents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ] - }, - "/apis/resource.k8s.io/v1alpha3/watch/namespaces/{namespace}/podschedulingcontexts/{name}": { - "get": { - "description": "watch changes to an object of kind PodSchedulingContext. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchResourceV1alpha3NamespacedPodSchedulingContext", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "watch", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "name of the PodSchedulingContext", - "in": "path", - "name": "name", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "object name and auth scope, such as for teams and projects", - "in": "path", - "name": "namespace", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "in": "query", - "name": "sendInitialEvents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ] - }, "/apis/resource.k8s.io/v1alpha3/watch/namespaces/{namespace}/resourceclaims": { "get": { "description": "watch individual changes to a list of ResourceClaim. deprecated: use the 'watch' parameter with a list operation instead.", @@ -8890,157 +7107,6 @@ } ] }, - "/apis/resource.k8s.io/v1alpha3/watch/podschedulingcontexts": { - "get": { - "description": "watch individual changes to a list of PodSchedulingContext. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchResourceV1alpha3PodSchedulingContextListForAllNamespaces", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "resource_v1alpha3" - ], - "x-kubernetes-action": "watchlist", - "x-kubernetes-group-version-kind": { - "group": "resource.k8s.io", - "kind": "PodSchedulingContext", - "version": "v1alpha3" - } - }, - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed. Defaults to 'false' unless the user-agent indicates a browser or command-line HTTP tool (curl and wget).", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "in": "query", - "name": "sendInitialEvents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ] - }, "/apis/resource.k8s.io/v1alpha3/watch/resourceclaims": { "get": { "description": "watch individual changes to a list of ResourceClaim. deprecated: use the 'watch' parameter with a list operation instead.", diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index b3637f273af..d0a967edaeb 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -467,7 +467,6 @@ func startResourceClaimController(ctx context.Context, controllerContext Control klog.FromContext(ctx), controllerContext.ClientBuilder.ClientOrDie("resource-claim-controller"), controllerContext.InformerFactory.Core().V1().Pods(), - controllerContext.InformerFactory.Resource().V1alpha3().PodSchedulingContexts(), controllerContext.InformerFactory.Resource().V1alpha3().ResourceClaims(), controllerContext.InformerFactory.Resource().V1alpha3().ResourceClaimTemplates()) if err != nil { diff --git a/pkg/apis/resource/register.go b/pkg/apis/resource/register.go index 37136ac1f9e..986dc2168be 100644 --- a/pkg/apis/resource/register.go +++ b/pkg/apis/resource/register.go @@ -58,8 +58,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ResourceClaimList{}, &ResourceClaimTemplate{}, &ResourceClaimTemplateList{}, - &PodSchedulingContext{}, - &PodSchedulingContextList{}, &ResourceSlice{}, &ResourceSliceList{}, ) diff --git a/pkg/apis/resource/types.go b/pkg/apis/resource/types.go index 2c03a304fdf..dc63be713f8 100644 --- a/pkg/apis/resource/types.go +++ b/pkg/apis/resource/types.go @@ -324,19 +324,10 @@ type ResourceClaimSpec struct { // +optional Devices DeviceClaim - // Controller is the name of the DRA driver that is meant - // to handle allocation of this claim. If empty, allocation is handled - // by the scheduler while scheduling a pod. - // - // Must be a DNS subdomain and should end with a DNS domain owned by the - // vendor of the driver. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - Controller string + // Controller is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // Controller string } // DeviceClaim defines how to request devices with a ResourceClaim. @@ -652,19 +643,10 @@ type ResourceClaimStatus struct { // +patchMergeKey=uid ReservedFor []ResourceClaimConsumerReference - // Indicates that a claim is to be deallocated. While this is set, - // no new consumers may be added to ReservedFor. - // - // This is only used if the claim needs to be deallocated by a DRA driver. - // That driver then must deallocate this claim and reset the field - // together with clearing the Allocation field. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - DeallocationRequested bool + // DeallocationRequested is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // DeallocationRequested bool } // ReservedForMaxSize is the maximum number of entries in @@ -704,21 +686,10 @@ type AllocationResult struct { // +optional NodeSelector *core.NodeSelector - // Controller is the name of the DRA driver which handled the - // allocation. That driver is also responsible for deallocating the - // claim. It is empty when the claim can be deallocated without - // involving a driver. - // - // A driver may allocate devices provided by other drivers, so this - // driver name here can be different from the driver names listed for - // the results. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - Controller string + // Controller is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // Controller string } // DeviceAllocationResult is the result of allocating devices. @@ -823,104 +794,6 @@ type ResourceClaimList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// PodSchedulingContext objects hold information that is needed to schedule -// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation -// mode. -// -// This is an alpha type and requires enabling the DRAControlPlaneController -// feature gate. -type PodSchedulingContext struct { - metav1.TypeMeta - // Standard object metadata - // +optional - metav1.ObjectMeta - - // Spec describes where resources for the Pod are needed. - Spec PodSchedulingContextSpec - - // Status describes where resources for the Pod can be allocated. - // - // +optional - Status PodSchedulingContextStatus -} - -// PodSchedulingContextSpec describes where resources for the Pod are needed. -type PodSchedulingContextSpec struct { - // SelectedNode is the node for which allocation of ResourceClaims that - // are referenced by the Pod and that use "WaitForFirstConsumer" - // allocation is to be attempted. - // - // +optional - SelectedNode string - - // PotentialNodes lists nodes where the Pod might be able to run. - // - // The size of this field is limited to 128. This is large enough for - // many clusters. Larger clusters may need more attempts to find a node - // that suits all pending resources. This may get increased in the - // future, but not reduced. - // - // +optional - // +listType=atomic - PotentialNodes []string -} - -// PodSchedulingContextStatus describes where resources for the Pod can be allocated. -type PodSchedulingContextStatus struct { - // ResourceClaims describes resource availability for each - // pod.spec.resourceClaim entry where the corresponding ResourceClaim - // uses "WaitForFirstConsumer" allocation mode. - // - // +listType=map - // +listMapKey=name - // +optional - ResourceClaims []ResourceClaimSchedulingStatus - - // If there ever is a need to support other kinds of resources - // than ResourceClaim, then new fields could get added here - // for those other resources. -} - -// ResourceClaimSchedulingStatus contains information about one particular -// ResourceClaim with "WaitForFirstConsumer" allocation mode. -type ResourceClaimSchedulingStatus struct { - // Name matches the pod.spec.resourceClaims[*].Name field. - // - // +required - Name string - - // UnsuitableNodes lists nodes that the ResourceClaim cannot be - // allocated for. - // - // The size of this field is limited to 128, the same as for - // PodSchedulingSpec.PotentialNodes. This may get increased in the - // future, but not reduced. - // - // +optional - // +listType=atomic - UnsuitableNodes []string -} - -// PodSchedulingNodeListMaxSize defines the maximum number of entries in the -// node lists that are stored in PodSchedulingContext objects. This limit is part -// of the API. -const PodSchedulingNodeListMaxSize = 128 - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodSchedulingContextList is a collection of Pod scheduling objects. -type PodSchedulingContextList struct { - metav1.TypeMeta - // Standard list metadata - // +optional - metav1.ListMeta - - // Items is the list of PodSchedulingContext objects. - Items []PodSchedulingContext -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - // DeviceClass is a vendor- or admin-provided resource that contains // device configuration and selectors. It can be referenced in // the device requests of a claim to apply these presets. @@ -964,21 +837,10 @@ type DeviceClassSpec struct { // +listType=atomic Config []DeviceClassConfiguration - // Only nodes matching the selector will be considered by the scheduler - // when trying to find a Node that fits a Pod when that Pod uses - // a claim that has not been allocated yet *and* that claim - // gets allocated through a control plane controller. It is ignored - // when the claim does not use a control plane controller - // for allocation. - // - // Setting this field is optional. If unset, all Nodes are candidates. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - SuitableNodes *core.NodeSelector + // SuitableNodes is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"` } // DeviceClassConfiguration is used in DeviceClass. diff --git a/pkg/apis/resource/v1alpha3/zz_generated.conversion.go b/pkg/apis/resource/v1alpha3/zz_generated.conversion.go index 15725f7a0a7..a998aac2ac5 100644 --- a/pkg/apis/resource/v1alpha3/zz_generated.conversion.go +++ b/pkg/apis/resource/v1alpha3/zz_generated.conversion.go @@ -231,46 +231,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*resourcev1alpha3.PodSchedulingContext)(nil), (*resource.PodSchedulingContext)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_PodSchedulingContext_To_resource_PodSchedulingContext(a.(*resourcev1alpha3.PodSchedulingContext), b.(*resource.PodSchedulingContext), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingContext)(nil), (*resourcev1alpha3.PodSchedulingContext)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_PodSchedulingContext_To_v1alpha3_PodSchedulingContext(a.(*resource.PodSchedulingContext), b.(*resourcev1alpha3.PodSchedulingContext), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resourcev1alpha3.PodSchedulingContextList)(nil), (*resource.PodSchedulingContextList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_PodSchedulingContextList_To_resource_PodSchedulingContextList(a.(*resourcev1alpha3.PodSchedulingContextList), b.(*resource.PodSchedulingContextList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingContextList)(nil), (*resourcev1alpha3.PodSchedulingContextList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_PodSchedulingContextList_To_v1alpha3_PodSchedulingContextList(a.(*resource.PodSchedulingContextList), b.(*resourcev1alpha3.PodSchedulingContextList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resourcev1alpha3.PodSchedulingContextSpec)(nil), (*resource.PodSchedulingContextSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(a.(*resourcev1alpha3.PodSchedulingContextSpec), b.(*resource.PodSchedulingContextSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingContextSpec)(nil), (*resourcev1alpha3.PodSchedulingContextSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_PodSchedulingContextSpec_To_v1alpha3_PodSchedulingContextSpec(a.(*resource.PodSchedulingContextSpec), b.(*resourcev1alpha3.PodSchedulingContextSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resourcev1alpha3.PodSchedulingContextStatus)(nil), (*resource.PodSchedulingContextStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(a.(*resourcev1alpha3.PodSchedulingContextStatus), b.(*resource.PodSchedulingContextStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.PodSchedulingContextStatus)(nil), (*resourcev1alpha3.PodSchedulingContextStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_PodSchedulingContextStatus_To_v1alpha3_PodSchedulingContextStatus(a.(*resource.PodSchedulingContextStatus), b.(*resourcev1alpha3.PodSchedulingContextStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*resourcev1alpha3.ResourceClaim)(nil), (*resource.ResourceClaim)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_ResourceClaim_To_resource_ResourceClaim(a.(*resourcev1alpha3.ResourceClaim), b.(*resource.ResourceClaim), scope) }); err != nil { @@ -301,16 +261,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*resourcev1alpha3.ResourceClaimSchedulingStatus)(nil), (*resource.ResourceClaimSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(a.(*resourcev1alpha3.ResourceClaimSchedulingStatus), b.(*resource.ResourceClaimSchedulingStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*resource.ResourceClaimSchedulingStatus)(nil), (*resourcev1alpha3.ResourceClaimSchedulingStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_ResourceClaimSchedulingStatus_To_v1alpha3_ResourceClaimSchedulingStatus(a.(*resource.ResourceClaimSchedulingStatus), b.(*resourcev1alpha3.ResourceClaimSchedulingStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*resourcev1alpha3.ResourceClaimSpec)(nil), (*resource.ResourceClaimSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_ResourceClaimSpec_To_resource_ResourceClaimSpec(a.(*resourcev1alpha3.ResourceClaimSpec), b.(*resource.ResourceClaimSpec), scope) }); err != nil { @@ -409,7 +359,6 @@ func autoConvert_v1alpha3_AllocationResult_To_resource_AllocationResult(in *reso return err } out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(in.NodeSelector)) - out.Controller = in.Controller return nil } @@ -423,7 +372,6 @@ func autoConvert_resource_AllocationResult_To_v1alpha3_AllocationResult(in *reso return err } out.NodeSelector = (*v1.NodeSelector)(unsafe.Pointer(in.NodeSelector)) - out.Controller = in.Controller return nil } @@ -697,7 +645,6 @@ func Convert_resource_DeviceClassList_To_v1alpha3_DeviceClassList(in *resource.D func autoConvert_v1alpha3_DeviceClassSpec_To_resource_DeviceClassSpec(in *resourcev1alpha3.DeviceClassSpec, out *resource.DeviceClassSpec, s conversion.Scope) error { out.Selectors = *(*[]resource.DeviceSelector)(unsafe.Pointer(&in.Selectors)) out.Config = *(*[]resource.DeviceClassConfiguration)(unsafe.Pointer(&in.Config)) - out.SuitableNodes = (*core.NodeSelector)(unsafe.Pointer(in.SuitableNodes)) return nil } @@ -709,7 +656,6 @@ func Convert_v1alpha3_DeviceClassSpec_To_resource_DeviceClassSpec(in *resourcev1 func autoConvert_resource_DeviceClassSpec_To_v1alpha3_DeviceClassSpec(in *resource.DeviceClassSpec, out *resourcev1alpha3.DeviceClassSpec, s conversion.Scope) error { out.Selectors = *(*[]resourcev1alpha3.DeviceSelector)(unsafe.Pointer(&in.Selectors)) out.Config = *(*[]resourcev1alpha3.DeviceClassConfiguration)(unsafe.Pointer(&in.Config)) - out.SuitableNodes = (*v1.NodeSelector)(unsafe.Pointer(in.SuitableNodes)) return nil } @@ -858,102 +804,6 @@ func Convert_resource_OpaqueDeviceConfiguration_To_v1alpha3_OpaqueDeviceConfigur return autoConvert_resource_OpaqueDeviceConfiguration_To_v1alpha3_OpaqueDeviceConfiguration(in, out, s) } -func autoConvert_v1alpha3_PodSchedulingContext_To_resource_PodSchedulingContext(in *resourcev1alpha3.PodSchedulingContext, out *resource.PodSchedulingContext, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha3_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1alpha3_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha3_PodSchedulingContext_To_resource_PodSchedulingContext is an autogenerated conversion function. -func Convert_v1alpha3_PodSchedulingContext_To_resource_PodSchedulingContext(in *resourcev1alpha3.PodSchedulingContext, out *resource.PodSchedulingContext, s conversion.Scope) error { - return autoConvert_v1alpha3_PodSchedulingContext_To_resource_PodSchedulingContext(in, out, s) -} - -func autoConvert_resource_PodSchedulingContext_To_v1alpha3_PodSchedulingContext(in *resource.PodSchedulingContext, out *resourcev1alpha3.PodSchedulingContext, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_resource_PodSchedulingContextSpec_To_v1alpha3_PodSchedulingContextSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_resource_PodSchedulingContextStatus_To_v1alpha3_PodSchedulingContextStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_resource_PodSchedulingContext_To_v1alpha3_PodSchedulingContext is an autogenerated conversion function. -func Convert_resource_PodSchedulingContext_To_v1alpha3_PodSchedulingContext(in *resource.PodSchedulingContext, out *resourcev1alpha3.PodSchedulingContext, s conversion.Scope) error { - return autoConvert_resource_PodSchedulingContext_To_v1alpha3_PodSchedulingContext(in, out, s) -} - -func autoConvert_v1alpha3_PodSchedulingContextList_To_resource_PodSchedulingContextList(in *resourcev1alpha3.PodSchedulingContextList, out *resource.PodSchedulingContextList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]resource.PodSchedulingContext)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha3_PodSchedulingContextList_To_resource_PodSchedulingContextList is an autogenerated conversion function. -func Convert_v1alpha3_PodSchedulingContextList_To_resource_PodSchedulingContextList(in *resourcev1alpha3.PodSchedulingContextList, out *resource.PodSchedulingContextList, s conversion.Scope) error { - return autoConvert_v1alpha3_PodSchedulingContextList_To_resource_PodSchedulingContextList(in, out, s) -} - -func autoConvert_resource_PodSchedulingContextList_To_v1alpha3_PodSchedulingContextList(in *resource.PodSchedulingContextList, out *resourcev1alpha3.PodSchedulingContextList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]resourcev1alpha3.PodSchedulingContext)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_resource_PodSchedulingContextList_To_v1alpha3_PodSchedulingContextList is an autogenerated conversion function. -func Convert_resource_PodSchedulingContextList_To_v1alpha3_PodSchedulingContextList(in *resource.PodSchedulingContextList, out *resourcev1alpha3.PodSchedulingContextList, s conversion.Scope) error { - return autoConvert_resource_PodSchedulingContextList_To_v1alpha3_PodSchedulingContextList(in, out, s) -} - -func autoConvert_v1alpha3_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(in *resourcev1alpha3.PodSchedulingContextSpec, out *resource.PodSchedulingContextSpec, s conversion.Scope) error { - out.SelectedNode = in.SelectedNode - out.PotentialNodes = *(*[]string)(unsafe.Pointer(&in.PotentialNodes)) - return nil -} - -// Convert_v1alpha3_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec is an autogenerated conversion function. -func Convert_v1alpha3_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(in *resourcev1alpha3.PodSchedulingContextSpec, out *resource.PodSchedulingContextSpec, s conversion.Scope) error { - return autoConvert_v1alpha3_PodSchedulingContextSpec_To_resource_PodSchedulingContextSpec(in, out, s) -} - -func autoConvert_resource_PodSchedulingContextSpec_To_v1alpha3_PodSchedulingContextSpec(in *resource.PodSchedulingContextSpec, out *resourcev1alpha3.PodSchedulingContextSpec, s conversion.Scope) error { - out.SelectedNode = in.SelectedNode - out.PotentialNodes = *(*[]string)(unsafe.Pointer(&in.PotentialNodes)) - return nil -} - -// Convert_resource_PodSchedulingContextSpec_To_v1alpha3_PodSchedulingContextSpec is an autogenerated conversion function. -func Convert_resource_PodSchedulingContextSpec_To_v1alpha3_PodSchedulingContextSpec(in *resource.PodSchedulingContextSpec, out *resourcev1alpha3.PodSchedulingContextSpec, s conversion.Scope) error { - return autoConvert_resource_PodSchedulingContextSpec_To_v1alpha3_PodSchedulingContextSpec(in, out, s) -} - -func autoConvert_v1alpha3_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(in *resourcev1alpha3.PodSchedulingContextStatus, out *resource.PodSchedulingContextStatus, s conversion.Scope) error { - out.ResourceClaims = *(*[]resource.ResourceClaimSchedulingStatus)(unsafe.Pointer(&in.ResourceClaims)) - return nil -} - -// Convert_v1alpha3_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus is an autogenerated conversion function. -func Convert_v1alpha3_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(in *resourcev1alpha3.PodSchedulingContextStatus, out *resource.PodSchedulingContextStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_PodSchedulingContextStatus_To_resource_PodSchedulingContextStatus(in, out, s) -} - -func autoConvert_resource_PodSchedulingContextStatus_To_v1alpha3_PodSchedulingContextStatus(in *resource.PodSchedulingContextStatus, out *resourcev1alpha3.PodSchedulingContextStatus, s conversion.Scope) error { - out.ResourceClaims = *(*[]resourcev1alpha3.ResourceClaimSchedulingStatus)(unsafe.Pointer(&in.ResourceClaims)) - return nil -} - -// Convert_resource_PodSchedulingContextStatus_To_v1alpha3_PodSchedulingContextStatus is an autogenerated conversion function. -func Convert_resource_PodSchedulingContextStatus_To_v1alpha3_PodSchedulingContextStatus(in *resource.PodSchedulingContextStatus, out *resourcev1alpha3.PodSchedulingContextStatus, s conversion.Scope) error { - return autoConvert_resource_PodSchedulingContextStatus_To_v1alpha3_PodSchedulingContextStatus(in, out, s) -} - func autoConvert_v1alpha3_ResourceClaim_To_resource_ResourceClaim(in *resourcev1alpha3.ResourceClaim, out *resource.ResourceClaim, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha3_ResourceClaimSpec_To_resource_ResourceClaimSpec(&in.Spec, &out.Spec, s); err != nil { @@ -1034,33 +884,10 @@ func Convert_resource_ResourceClaimList_To_v1alpha3_ResourceClaimList(in *resour return autoConvert_resource_ResourceClaimList_To_v1alpha3_ResourceClaimList(in, out, s) } -func autoConvert_v1alpha3_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(in *resourcev1alpha3.ResourceClaimSchedulingStatus, out *resource.ResourceClaimSchedulingStatus, s conversion.Scope) error { - out.Name = in.Name - out.UnsuitableNodes = *(*[]string)(unsafe.Pointer(&in.UnsuitableNodes)) - return nil -} - -// Convert_v1alpha3_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus is an autogenerated conversion function. -func Convert_v1alpha3_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(in *resourcev1alpha3.ResourceClaimSchedulingStatus, out *resource.ResourceClaimSchedulingStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_ResourceClaimSchedulingStatus_To_resource_ResourceClaimSchedulingStatus(in, out, s) -} - -func autoConvert_resource_ResourceClaimSchedulingStatus_To_v1alpha3_ResourceClaimSchedulingStatus(in *resource.ResourceClaimSchedulingStatus, out *resourcev1alpha3.ResourceClaimSchedulingStatus, s conversion.Scope) error { - out.Name = in.Name - out.UnsuitableNodes = *(*[]string)(unsafe.Pointer(&in.UnsuitableNodes)) - return nil -} - -// Convert_resource_ResourceClaimSchedulingStatus_To_v1alpha3_ResourceClaimSchedulingStatus is an autogenerated conversion function. -func Convert_resource_ResourceClaimSchedulingStatus_To_v1alpha3_ResourceClaimSchedulingStatus(in *resource.ResourceClaimSchedulingStatus, out *resourcev1alpha3.ResourceClaimSchedulingStatus, s conversion.Scope) error { - return autoConvert_resource_ResourceClaimSchedulingStatus_To_v1alpha3_ResourceClaimSchedulingStatus(in, out, s) -} - func autoConvert_v1alpha3_ResourceClaimSpec_To_resource_ResourceClaimSpec(in *resourcev1alpha3.ResourceClaimSpec, out *resource.ResourceClaimSpec, s conversion.Scope) error { if err := Convert_v1alpha3_DeviceClaim_To_resource_DeviceClaim(&in.Devices, &out.Devices, s); err != nil { return err } - out.Controller = in.Controller return nil } @@ -1073,7 +900,6 @@ func autoConvert_resource_ResourceClaimSpec_To_v1alpha3_ResourceClaimSpec(in *re if err := Convert_resource_DeviceClaim_To_v1alpha3_DeviceClaim(&in.Devices, &out.Devices, s); err != nil { return err } - out.Controller = in.Controller return nil } @@ -1085,7 +911,6 @@ func Convert_resource_ResourceClaimSpec_To_v1alpha3_ResourceClaimSpec(in *resour func autoConvert_v1alpha3_ResourceClaimStatus_To_resource_ResourceClaimStatus(in *resourcev1alpha3.ResourceClaimStatus, out *resource.ResourceClaimStatus, s conversion.Scope) error { out.Allocation = (*resource.AllocationResult)(unsafe.Pointer(in.Allocation)) out.ReservedFor = *(*[]resource.ResourceClaimConsumerReference)(unsafe.Pointer(&in.ReservedFor)) - out.DeallocationRequested = in.DeallocationRequested return nil } @@ -1097,7 +922,6 @@ func Convert_v1alpha3_ResourceClaimStatus_To_resource_ResourceClaimStatus(in *re func autoConvert_resource_ResourceClaimStatus_To_v1alpha3_ResourceClaimStatus(in *resource.ResourceClaimStatus, out *resourcev1alpha3.ResourceClaimStatus, s conversion.Scope) error { out.Allocation = (*resourcev1alpha3.AllocationResult)(unsafe.Pointer(in.Allocation)) out.ReservedFor = *(*[]resourcev1alpha3.ResourceClaimConsumerReference)(unsafe.Pointer(&in.ReservedFor)) - out.DeallocationRequested = in.DeallocationRequested return nil } diff --git a/pkg/apis/resource/validation/validation.go b/pkg/apis/resource/validation/validation.go index 4b4567345f5..2e2a4ee8212 100644 --- a/pkg/apis/resource/validation/validation.go +++ b/pkg/apis/resource/validation/validation.go @@ -91,9 +91,6 @@ func ValidateResourceClaimStatusUpdate(resourceClaim, oldClaim *resource.Resourc func validateResourceClaimSpec(spec *resource.ResourceClaimSpec, fldPath *field.Path, stored bool) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, validateDeviceClaim(&spec.Devices, fldPath.Child("devices"), stored)...) - if spec.Controller != "" { - allErrs = append(allErrs, validateDriverName(spec.Controller, fldPath.Child("controller"))...) - } return allErrs } @@ -268,7 +265,7 @@ func validateResourceClaimStatusUpdate(status, oldStatus *resource.ResourceClaim } else { // Items may be removed from ReservedFor while the claim is meant to be deallocated, // but not added. - if claimDeleted || status.DeallocationRequested { + if claimDeleted { oldSet := sets.New(oldStatus.ReservedFor...) newSet := sets.New(status.ReservedFor...) newItems := newSet.Difference(oldSet) @@ -284,30 +281,6 @@ func validateResourceClaimStatusUpdate(status, oldStatus *resource.ResourceClaim allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(status.Allocation, oldStatus.Allocation, fldPath.Child("allocation"))...) } - if !oldStatus.DeallocationRequested && - status.DeallocationRequested && - len(status.ReservedFor) > 0 { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("deallocationRequested"), "deallocation cannot be requested while `reservedFor` is set")) - } - - if status.Allocation == nil && - status.DeallocationRequested { - // Either one or the other field was modified incorrectly. - // For the sake of simplicity this only reports the invalid - // end result. - allErrs = append(allErrs, field.Forbidden(fldPath, "`allocation` must be set when `deallocationRequested` is set")) - } - - // Once deallocation has been requested, that request cannot be removed - // anymore because the deallocation may already have started. The field - // can only get reset by the driver together with removing the - // allocation. - if oldStatus.DeallocationRequested && - !status.DeallocationRequested && - status.Allocation != nil { - allErrs = append(allErrs, field.Forbidden(fldPath.Child("deallocationRequested"), "may not be cleared when `allocation` is set")) - } - return allErrs } @@ -335,9 +308,6 @@ func validateAllocationResult(allocation *resource.AllocationResult, fldPath *fi if allocation.NodeSelector != nil { allErrs = append(allErrs, corevalidation.ValidateNodeSelector(allocation.NodeSelector, fldPath.Child("nodeSelector"))...) } - if allocation.Controller != "" { - allErrs = append(allErrs, validateDriverName(allocation.Controller, fldPath.Child("controller"))...) - } return allErrs } @@ -416,9 +386,6 @@ func validateDeviceClassSpec(spec, oldSpec *resource.DeviceClassSpec, fldPath *f }, fldPath.Child("selectors"))...) allErrs = append(allErrs, validateSlice(spec.Config, resource.DeviceConfigMaxSize, validateDeviceClassConfiguration, fldPath.Child("config"))...) - if spec.SuitableNodes != nil { - allErrs = append(allErrs, corevalidation.ValidateNodeSelector(spec.SuitableNodes, field.NewPath("suitableNodes"))...) - } return allErrs } @@ -426,55 +393,6 @@ func validateDeviceClassConfiguration(config resource.DeviceClassConfiguration, return validateDeviceConfiguration(config.DeviceConfiguration, fldPath) } -// ValidatePodSchedulingContext validates a PodSchedulingContext. -func ValidatePodSchedulingContexts(schedulingCtx *resource.PodSchedulingContext) field.ErrorList { - allErrs := corevalidation.ValidateObjectMeta(&schedulingCtx.ObjectMeta, true, corevalidation.ValidatePodName, field.NewPath("metadata")) - allErrs = append(allErrs, validatePodSchedulingSpec(&schedulingCtx.Spec, field.NewPath("spec"))...) - return allErrs -} - -func validatePodSchedulingSpec(spec *resource.PodSchedulingContextSpec, fldPath *field.Path) field.ErrorList { - allErrs := validateSet(spec.PotentialNodes, resource.PodSchedulingNodeListMaxSize, validateNodeName, stringKey, fldPath.Child("potentialNodes")) - return allErrs -} - -// ValidatePodSchedulingContextUpdate tests if an update to PodSchedulingContext is valid. -func ValidatePodSchedulingContextUpdate(schedulingCtx, oldSchedulingCtx *resource.PodSchedulingContext) field.ErrorList { - allErrs := corevalidation.ValidateObjectMetaUpdate(&schedulingCtx.ObjectMeta, &oldSchedulingCtx.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, ValidatePodSchedulingContexts(schedulingCtx)...) - return allErrs -} - -// ValidatePodSchedulingContextStatusUpdate tests if an update to the status of a PodSchedulingContext is valid. -func ValidatePodSchedulingContextStatusUpdate(schedulingCtx, oldSchedulingCtx *resource.PodSchedulingContext) field.ErrorList { - allErrs := corevalidation.ValidateObjectMetaUpdate(&schedulingCtx.ObjectMeta, &oldSchedulingCtx.ObjectMeta, field.NewPath("metadata")) - allErrs = append(allErrs, validatePodSchedulingStatus(&schedulingCtx.Status, field.NewPath("status"))...) - return allErrs -} - -func validatePodSchedulingStatus(status *resource.PodSchedulingContextStatus, fldPath *field.Path) field.ErrorList { - return validatePodSchedulingClaims(status.ResourceClaims, fldPath.Child("claims")) -} - -func validatePodSchedulingClaims(claimStatuses []resource.ResourceClaimSchedulingStatus, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - names := sets.NewString() - for i, claimStatus := range claimStatuses { - allErrs = append(allErrs, validatePodSchedulingClaim(claimStatus, fldPath.Index(i))...) - if names.Has(claimStatus.Name) { - allErrs = append(allErrs, field.Duplicate(fldPath.Index(i), claimStatus.Name)) - } else { - names.Insert(claimStatus.Name) - } - } - return allErrs -} - -func validatePodSchedulingClaim(status resource.ResourceClaimSchedulingStatus, fldPath *field.Path) field.ErrorList { - allErrs := validateSet(status.UnsuitableNodes, resource.PodSchedulingNodeListMaxSize, validateNodeName, stringKey, fldPath.Child("unsuitableNodes")) - return allErrs -} - // ValidateResourceClaimTemplate validates a ResourceClaimTemplate. func ValidateResourceClaimTemplate(template *resource.ResourceClaimTemplate) field.ErrorList { allErrs := corevalidation.ValidateObjectMeta(&template.ObjectMeta, true, corevalidation.ValidateResourceClaimTemplateName, field.NewPath("metadata")) diff --git a/pkg/apis/resource/validation/validation_deviceclass_test.go b/pkg/apis/resource/validation/validation_deviceclass_test.go index 3acc4be9d18..f51abea4373 100644 --- a/pkg/apis/resource/validation/validation_deviceclass_test.go +++ b/pkg/apis/resource/validation/validation_deviceclass_test.go @@ -23,7 +23,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/resource" "k8s.io/utils/ptr" ) @@ -174,30 +173,6 @@ func TestValidateClass(t *testing.T) { return class }(), }, - "invalid-node-selector": { - wantFailures: field.ErrorList{field.Required(field.NewPath("suitableNodes", "nodeSelectorTerms"), "must have at least one node selector term")}, - class: func() *resource.DeviceClass { - class := testClass(goodName) - class.Spec.SuitableNodes = &core.NodeSelector{ - // Must not be empty. - } - return class - }(), - }, - "valid-node-selector": { - class: func() *resource.DeviceClass { - class := testClass(goodName) - class.Spec.SuitableNodes = &core.NodeSelector{ - NodeSelectorTerms: []core.NodeSelectorTerm{{ - MatchExpressions: []core.NodeSelectorRequirement{{ - Key: "foo", - Operator: core.NodeSelectorOpDoesNotExist, - }}, - }}, - } - return class - }(), - }, } for name, scenario := range scenarios { @@ -220,21 +195,6 @@ func TestValidateClassUpdate(t *testing.T) { oldClass: validClass, update: func(class *resource.DeviceClass) *resource.DeviceClass { return class }, }, - "update-node-selector": { - oldClass: validClass, - update: func(class *resource.DeviceClass) *resource.DeviceClass { - class = class.DeepCopy() - class.Spec.SuitableNodes = &core.NodeSelector{ - NodeSelectorTerms: []core.NodeSelectorTerm{{ - MatchExpressions: []core.NodeSelectorRequirement{{ - Key: "foo", - Operator: core.NodeSelectorOpDoesNotExist, - }}, - }}, - } - return class - }, - }, } for name, scenario := range scenarios { diff --git a/pkg/apis/resource/validation/validation_podschedulingcontext_test.go b/pkg/apis/resource/validation/validation_podschedulingcontext_test.go deleted file mode 100644 index 6568c0ac69e..00000000000 --- a/pkg/apis/resource/validation/validation_podschedulingcontext_test.go +++ /dev/null @@ -1,342 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/apis/resource" - "k8s.io/utils/pointer" -) - -func testPodSchedulingContexts(name, namespace string, spec resource.PodSchedulingContextSpec) *resource.PodSchedulingContext { - return &resource.PodSchedulingContext{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: spec, - } -} - -func TestValidatePodSchedulingContexts(t *testing.T) { - goodName := "foo" - goodNS := "ns" - goodPodSchedulingSpec := resource.PodSchedulingContextSpec{} - now := metav1.Now() - badName := "!@#$%^" - badValue := "spaces not allowed" - - scenarios := map[string]struct { - schedulingCtx *resource.PodSchedulingContext - wantFailures field.ErrorList - }{ - "good-schedulingCtx": { - schedulingCtx: testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec), - }, - "missing-name": { - wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")}, - schedulingCtx: testPodSchedulingContexts("", goodNS, goodPodSchedulingSpec), - }, - "bad-name": { - wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")}, - schedulingCtx: testPodSchedulingContexts(badName, goodNS, goodPodSchedulingSpec), - }, - "missing-namespace": { - wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")}, - schedulingCtx: testPodSchedulingContexts(goodName, "", goodPodSchedulingSpec), - }, - "generate-name": { - schedulingCtx: func() *resource.PodSchedulingContext { - schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec) - schedulingCtx.GenerateName = "pvc-" - return schedulingCtx - }(), - }, - "uid": { - schedulingCtx: func() *resource.PodSchedulingContext { - schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec) - schedulingCtx.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d" - return schedulingCtx - }(), - }, - "resource-version": { - schedulingCtx: func() *resource.PodSchedulingContext { - schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec) - schedulingCtx.ResourceVersion = "1" - return schedulingCtx - }(), - }, - "generation": { - schedulingCtx: func() *resource.PodSchedulingContext { - schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec) - schedulingCtx.Generation = 100 - return schedulingCtx - }(), - }, - "creation-timestamp": { - schedulingCtx: func() *resource.PodSchedulingContext { - schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec) - schedulingCtx.CreationTimestamp = now - return schedulingCtx - }(), - }, - "deletion-grace-period-seconds": { - schedulingCtx: func() *resource.PodSchedulingContext { - schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec) - schedulingCtx.DeletionGracePeriodSeconds = pointer.Int64(10) - return schedulingCtx - }(), - }, - "owner-references": { - schedulingCtx: func() *resource.PodSchedulingContext { - schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec) - schedulingCtx.OwnerReferences = []metav1.OwnerReference{ - { - APIVersion: "v1", - Kind: "pod", - Name: "foo", - UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d", - }, - } - return schedulingCtx - }(), - }, - "finalizers": { - schedulingCtx: func() *resource.PodSchedulingContext { - schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec) - schedulingCtx.Finalizers = []string{ - "example.com/foo", - } - return schedulingCtx - }(), - }, - "managed-fields": { - schedulingCtx: func() *resource.PodSchedulingContext { - schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec) - schedulingCtx.ManagedFields = []metav1.ManagedFieldsEntry{ - { - FieldsType: "FieldsV1", - Operation: "Apply", - APIVersion: "apps/v1", - Manager: "foo", - }, - } - return schedulingCtx - }(), - }, - "good-labels": { - schedulingCtx: func() *resource.PodSchedulingContext { - schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec) - schedulingCtx.Labels = map[string]string{ - "apps.kubernetes.io/name": "test", - } - return schedulingCtx - }(), - }, - "bad-labels": { - wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")}, - schedulingCtx: func() *resource.PodSchedulingContext { - schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec) - schedulingCtx.Labels = map[string]string{ - "hello-world": badValue, - } - return schedulingCtx - }(), - }, - "good-annotations": { - schedulingCtx: func() *resource.PodSchedulingContext { - schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec) - schedulingCtx.Annotations = map[string]string{ - "foo": "bar", - } - return schedulingCtx - }(), - }, - "bad-annotations": { - wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")}, - schedulingCtx: func() *resource.PodSchedulingContext { - schedulingCtx := testPodSchedulingContexts(goodName, goodNS, goodPodSchedulingSpec) - schedulingCtx.Annotations = map[string]string{ - badName: "hello world", - } - return schedulingCtx - }(), - }, - } - - for name, scenario := range scenarios { - t.Run(name, func(t *testing.T) { - errs := ValidatePodSchedulingContexts(scenario.schedulingCtx) - assert.Equal(t, scenario.wantFailures, errs) - }) - } -} - -func TestValidatePodSchedulingUpdate(t *testing.T) { - validScheduling := testPodSchedulingContexts("foo", "ns", resource.PodSchedulingContextSpec{}) - badName := "!@#$%^" - - scenarios := map[string]struct { - oldScheduling *resource.PodSchedulingContext - update func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext - wantFailures field.ErrorList - }{ - "valid-no-op-update": { - oldScheduling: validScheduling, - update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext { - return schedulingCtx - }, - }, - "add-selected-node": { - oldScheduling: validScheduling, - update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext { - schedulingCtx.Spec.SelectedNode = "worker1" - return schedulingCtx - }, - }, - "add-potential-nodes": { - oldScheduling: validScheduling, - update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext { - for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ { - schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, fmt.Sprintf("worker%d", i)) - } - return schedulingCtx - }, - }, - "invalid-potential-nodes-too-long": { - wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("spec", "potentialNodes"), 129, resource.PodSchedulingNodeListMaxSize)}, - oldScheduling: validScheduling, - update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext { - for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ { - schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, fmt.Sprintf("worker%d", i)) - } - return schedulingCtx - }, - }, - "invalid-potential-nodes-name": { - wantFailures: field.ErrorList{field.Invalid(field.NewPath("spec", "potentialNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")}, - oldScheduling: validScheduling, - update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext { - schedulingCtx.Spec.PotentialNodes = append(schedulingCtx.Spec.PotentialNodes, badName) - return schedulingCtx - }, - }, - } - - for name, scenario := range scenarios { - t.Run(name, func(t *testing.T) { - scenario.oldScheduling.ResourceVersion = "1" - errs := ValidatePodSchedulingContextUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling) - assert.Equal(t, scenario.wantFailures, errs) - }) - } -} - -func TestValidatePodSchedulingStatusUpdate(t *testing.T) { - validScheduling := testPodSchedulingContexts("foo", "ns", resource.PodSchedulingContextSpec{}) - badName := "!@#$%^" - - scenarios := map[string]struct { - oldScheduling *resource.PodSchedulingContext - update func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext - wantFailures field.ErrorList - }{ - "valid-no-op-update": { - oldScheduling: validScheduling, - update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext { - return schedulingCtx - }, - }, - "add-claim-status": { - oldScheduling: validScheduling, - update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext { - schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims, - resource.ResourceClaimSchedulingStatus{ - Name: "my-claim", - }, - ) - for i := 0; i < resource.PodSchedulingNodeListMaxSize; i++ { - schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes = append( - schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes, - fmt.Sprintf("worker%d", i), - ) - } - return schedulingCtx - }, - }, - "invalid-duplicated-claim-status": { - wantFailures: field.ErrorList{field.Duplicate(field.NewPath("status", "claims").Index(1), "my-claim")}, - oldScheduling: validScheduling, - update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext { - for i := 0; i < 2; i++ { - schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims, - resource.ResourceClaimSchedulingStatus{Name: "my-claim"}, - ) - } - return schedulingCtx - }, - }, - "invalid-too-long-claim-status": { - wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes"), 129, resource.PodSchedulingNodeListMaxSize)}, - oldScheduling: validScheduling, - update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext { - schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims, - resource.ResourceClaimSchedulingStatus{ - Name: "my-claim", - }, - ) - for i := 0; i < resource.PodSchedulingNodeListMaxSize+1; i++ { - schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes = append( - schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes, - fmt.Sprintf("worker%d", i), - ) - } - return schedulingCtx - }, - }, - "invalid-node-name": { - wantFailures: field.ErrorList{field.Invalid(field.NewPath("status", "claims").Index(0).Child("unsuitableNodes").Index(0), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")}, - oldScheduling: validScheduling, - update: func(schedulingCtx *resource.PodSchedulingContext) *resource.PodSchedulingContext { - schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims, - resource.ResourceClaimSchedulingStatus{ - Name: "my-claim", - }, - ) - schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes = append( - schedulingCtx.Status.ResourceClaims[0].UnsuitableNodes, - badName, - ) - return schedulingCtx - }, - }, - } - - for name, scenario := range scenarios { - t.Run(name, func(t *testing.T) { - scenario.oldScheduling.ResourceVersion = "1" - errs := ValidatePodSchedulingContextStatusUpdate(scenario.update(scenario.oldScheduling.DeepCopy()), scenario.oldScheduling) - assert.Equal(t, scenario.wantFailures, errs) - }) - } -} diff --git a/pkg/apis/resource/validation/validation_resourceclaim_test.go b/pkg/apis/resource/validation/validation_resourceclaim_test.go index 64c1c75797e..1d8afee6b8f 100644 --- a/pkg/apis/resource/validation/validation_resourceclaim_test.go +++ b/pkg/apis/resource/validation/validation_resourceclaim_test.go @@ -569,55 +569,6 @@ func TestValidateClaimStatusUpdate(t *testing.T) { return claim }, }, - "invalid-reserved-deallocation-requested": { - wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "reservedFor"), "new entries may not be added while `deallocationRequested` or `deletionTimestamp` are set")}, - oldClaim: func() *resource.ResourceClaim { - claim := validAllocatedClaim.DeepCopy() - claim.Status.DeallocationRequested = true - return claim - }(), - update: func(claim *resource.ResourceClaim) *resource.ResourceClaim { - claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{ - { - Resource: "pods", - Name: "foo", - UID: "1", - }, - } - return claim - }, - }, - "add-deallocation-requested": { - oldClaim: validAllocatedClaim, - update: func(claim *resource.ResourceClaim) *resource.ResourceClaim { - claim.Status.DeallocationRequested = true - return claim - }, - }, - "remove-allocation": { - oldClaim: func() *resource.ResourceClaim { - claim := validAllocatedClaim.DeepCopy() - claim.Status.DeallocationRequested = true - return claim - }(), - update: func(claim *resource.ResourceClaim) *resource.ResourceClaim { - claim.Status.DeallocationRequested = false - claim.Status.Allocation = nil - return claim - }, - }, - "invalid-deallocation-requested-removal": { - wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "deallocationRequested"), "may not be cleared when `allocation` is set")}, - oldClaim: func() *resource.ResourceClaim { - claim := validAllocatedClaim.DeepCopy() - claim.Status.DeallocationRequested = true - return claim - }(), - update: func(claim *resource.ResourceClaim) *resource.ResourceClaim { - claim.Status.DeallocationRequested = false - return claim - }, - }, "invalid-allocation-modification": { wantFailures: field.ErrorList{field.Invalid(field.NewPath("status.allocation"), func() *resource.AllocationResult { claim := validAllocatedClaim.DeepCopy() @@ -630,44 +581,6 @@ func TestValidateClaimStatusUpdate(t *testing.T) { return claim }, }, - "invalid-deallocation-requested-in-use": { - wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status", "deallocationRequested"), "deallocation cannot be requested while `reservedFor` is set")}, - oldClaim: func() *resource.ResourceClaim { - claim := validAllocatedClaim.DeepCopy() - claim.Status.ReservedFor = []resource.ResourceClaimConsumerReference{ - { - Resource: "pods", - Name: "foo", - UID: "1", - }, - } - return claim - }(), - update: func(claim *resource.ResourceClaim) *resource.ResourceClaim { - claim.Status.DeallocationRequested = true - return claim - }, - }, - "invalid-deallocation-not-allocated": { - wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status"), "`allocation` must be set when `deallocationRequested` is set")}, - oldClaim: validClaim, - update: func(claim *resource.ResourceClaim) *resource.ResourceClaim { - claim.Status.DeallocationRequested = true - return claim - }, - }, - "invalid-allocation-removal-not-reset": { - wantFailures: field.ErrorList{field.Forbidden(field.NewPath("status"), "`allocation` must be set when `deallocationRequested` is set")}, - oldClaim: func() *resource.ResourceClaim { - claim := validAllocatedClaim.DeepCopy() - claim.Status.DeallocationRequested = true - return claim - }(), - update: func(claim *resource.ResourceClaim) *resource.ResourceClaim { - claim.Status.Allocation = nil - return claim - }, - }, "invalid-request-name": { wantFailures: field.ErrorList{ field.Invalid(field.NewPath("status", "allocation", "devices", "config").Index(0).Child("requests").Index(1), badName, "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?')"), diff --git a/pkg/apis/resource/zz_generated.deepcopy.go b/pkg/apis/resource/zz_generated.deepcopy.go index 54b43208f40..7243a553af6 100644 --- a/pkg/apis/resource/zz_generated.deepcopy.go +++ b/pkg/apis/resource/zz_generated.deepcopy.go @@ -355,11 +355,6 @@ func (in *DeviceClassSpec) DeepCopyInto(out *DeviceClassSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.SuitableNodes != nil { - in, out := &in.SuitableNodes, &out.SuitableNodes - *out = new(core.NodeSelector) - (*in).DeepCopyInto(*out) - } return } @@ -497,111 +492,6 @@ func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext. -func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext { - if in == nil { - return nil - } - out := new(PodSchedulingContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSchedulingContext) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSchedulingContext, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList. -func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList { - if in == nil { - return nil - } - out := new(PodSchedulingContextList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) { - *out = *in - if in.PotentialNodes != nil { - in, out := &in.PotentialNodes, &out.PotentialNodes - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec. -func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec { - if in == nil { - return nil - } - out := new(PodSchedulingContextSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) { - *out = *in - if in.ResourceClaims != nil { - in, out := &in.ResourceClaims, &out.ResourceClaims - *out = make([]ResourceClaimSchedulingStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus. -func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus { - if in == nil { - return nil - } - out := new(PodSchedulingContextStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) { *out = *in @@ -679,27 +569,6 @@ func (in *ResourceClaimList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimSchedulingStatus) DeepCopyInto(out *ResourceClaimSchedulingStatus) { - *out = *in - if in.UnsuitableNodes != nil { - in, out := &in.UnsuitableNodes, &out.UnsuitableNodes - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSchedulingStatus. -func (in *ResourceClaimSchedulingStatus) DeepCopy() *ResourceClaimSchedulingStatus { - if in == nil { - return nil - } - out := new(ResourceClaimSchedulingStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) { *out = *in diff --git a/pkg/controller/resourceclaim/controller.go b/pkg/controller/resourceclaim/controller.go index b810987608f..19cc61c2cc7 100644 --- a/pkg/controller/resourceclaim/controller.go +++ b/pkg/controller/resourceclaim/controller.go @@ -88,13 +88,6 @@ type Controller struct { podLister v1listers.PodLister podSynced cache.InformerSynced - // podSchedulingList is the shared PodSchedulingContext lister used to - // fetch scheduling objects from the API server. It is shared with other - // controllers and therefore the objects in its store should be treated - // as immutable. - podSchedulingLister resourcelisters.PodSchedulingContextLister - podSchedulingSynced cache.InformerSynced - // templateLister is the shared ResourceClaimTemplate lister used to // fetch template objects from the API server. It is shared with other // controllers and therefore the objects in its store should be treated @@ -127,21 +120,18 @@ func NewController( logger klog.Logger, kubeClient clientset.Interface, podInformer v1informers.PodInformer, - podSchedulingInformer resourceinformers.PodSchedulingContextInformer, claimInformer resourceinformers.ResourceClaimInformer, templateInformer resourceinformers.ResourceClaimTemplateInformer) (*Controller, error) { ec := &Controller{ - kubeClient: kubeClient, - podLister: podInformer.Lister(), - podIndexer: podInformer.Informer().GetIndexer(), - podSynced: podInformer.Informer().HasSynced, - podSchedulingLister: podSchedulingInformer.Lister(), - podSchedulingSynced: podSchedulingInformer.Informer().HasSynced, - claimLister: claimInformer.Lister(), - claimsSynced: claimInformer.Informer().HasSynced, - templateLister: templateInformer.Lister(), - templatesSynced: templateInformer.Informer().HasSynced, + kubeClient: kubeClient, + podLister: podInformer.Lister(), + podIndexer: podInformer.Informer().GetIndexer(), + podSynced: podInformer.Informer().HasSynced, + claimLister: claimInformer.Lister(), + claimsSynced: claimInformer.Informer().HasSynced, + templateLister: templateInformer.Lister(), + templatesSynced: templateInformer.Informer().HasSynced, queue: workqueue.NewTypedRateLimitingQueueWithConfig( workqueue.DefaultTypedControllerRateLimiter[string](), workqueue.TypedRateLimitingQueueConfig[string]{Name: "resource_claim"}, @@ -322,27 +312,6 @@ func (ec *Controller) podNeedsWork(pod *v1.Pod) (bool, string) { continue } - // Create PodSchedulingContext if the pod got scheduled without triggering - // delayed allocation. - // - // These can happen when: - // - a user created a pod with spec.nodeName set, perhaps for testing - // - some scheduler was used which is unaware of DRA - // - DRA was not enabled in kube-scheduler (version skew, configuration) - if claim.Status.Allocation == nil { - scheduling, err := ec.podSchedulingLister.PodSchedulingContexts(pod.Namespace).Get(pod.Name) - if apierrors.IsNotFound(err) { - return true, "need to create PodSchedulingContext for scheduled pod" - } - if err != nil { - // Shouldn't happen. - return true, fmt.Sprintf("internal error while checking for PodSchedulingContext: %v", err) - } - if scheduling.Spec.SelectedNode != pod.Spec.NodeName { - // Need to update PodSchedulingContext. - return true, fmt.Sprintf("need to update PodSchedulingContext %s for scheduled pod", klog.KObj(scheduling)) - } - } if claim.Status.Allocation != nil && !resourceclaim.IsReservedForPod(pod, claim) && resourceclaim.CanBeReserved(claim) { @@ -404,7 +373,7 @@ func (ec *Controller) Run(ctx context.Context, workers int) { ec.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "resource_claim"}) defer eventBroadcaster.Shutdown() - if !cache.WaitForNamedCacheSync("resource_claim", ctx.Done(), ec.podSynced, ec.podSchedulingSynced, ec.claimsSynced, ec.templatesSynced) { + if !cache.WaitForNamedCacheSync("resource_claim", ctx.Done(), ec.podSynced, ec.claimsSynced, ec.templatesSynced) { return } @@ -505,7 +474,7 @@ func (ec *Controller) syncPod(ctx context.Context, namespace, name string) error } if pod.Spec.NodeName == "" { - // Scheduler will handle PodSchedulingContext and reservations. + // Scheduler will handle reservations. logger.V(5).Info("nothing to do for pod, scheduler will deal with it") return nil } @@ -532,10 +501,6 @@ func (ec *Controller) syncPod(ctx context.Context, namespace, name string) error return err } } - if claim.Status.Allocation == nil { - logger.V(5).Info("create PodSchedulingContext because claim needs to be allocated", "resourceClaim", klog.KObj(claim)) - return ec.ensurePodSchedulingContext(ctx, pod) - } if claim.Status.Allocation != nil && !resourceclaim.IsReservedForPod(pod, claim) && resourceclaim.CanBeReserved(claim) { @@ -703,50 +668,6 @@ func (ec *Controller) findPodResourceClaim(pod *v1.Pod, podClaim v1.PodResourceC return nil, nil } -func (ec *Controller) ensurePodSchedulingContext(ctx context.Context, pod *v1.Pod) error { - scheduling, err := ec.podSchedulingLister.PodSchedulingContexts(pod.Namespace).Get(pod.Name) - if err != nil && !apierrors.IsNotFound(err) { - return fmt.Errorf("retrieve PodSchedulingContext: %v", err) - } - if scheduling == nil { - scheduling = &resourceapi.PodSchedulingContext{ - ObjectMeta: metav1.ObjectMeta{ - Name: pod.Name, - Namespace: pod.Namespace, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "v1", - Kind: "Pod", - Name: pod.Name, - UID: pod.UID, - Controller: ptr.To(true), - }, - }, - }, - Spec: resourceapi.PodSchedulingContextSpec{ - SelectedNode: pod.Spec.NodeName, - // There is no need for negotiation about - // potential and suitable nodes anymore, so - // PotentialNodes can be left empty. - }, - } - if _, err := ec.kubeClient.ResourceV1alpha3().PodSchedulingContexts(pod.Namespace).Create(ctx, scheduling, metav1.CreateOptions{}); err != nil { - return fmt.Errorf("create PodSchedulingContext %s: %w", klog.KObj(scheduling), err) - } - return nil - } - - if scheduling.Spec.SelectedNode != pod.Spec.NodeName { - scheduling := scheduling.DeepCopy() - scheduling.Spec.SelectedNode = pod.Spec.NodeName - if _, err := ec.kubeClient.ResourceV1alpha3().PodSchedulingContexts(pod.Namespace).Update(ctx, scheduling, metav1.UpdateOptions{}); err != nil { - return fmt.Errorf("update spec.selectedNode in PodSchedulingContext %s: %w", klog.KObj(scheduling), err) - } - } - - return nil -} - func (ec *Controller) reserveForPod(ctx context.Context, pod *v1.Pod, claim *resourceapi.ResourceClaim) error { claim = claim.DeepCopy() claim.Status.ReservedFor = append(claim.Status.ReservedFor, @@ -841,8 +762,8 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err claim := claim.DeepCopy() claim.Status.ReservedFor = valid - // When a ResourceClaim uses delayed allocation, then it makes sense to - // deallocate the claim as soon as the last consumer stops using + // DRA always performs delayed allocations. Relatedly, it also + // deallocates a claim as soon as the last consumer stops using // it. This ensures that the claim can be allocated again as needed by // some future consumer instead of trying to schedule that consumer // onto the node that was chosen for the previous consumer. It also @@ -858,14 +779,13 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err // pod is done. However, it doesn't hurt to also trigger deallocation // for such claims and not checking for them keeps this code simpler. if len(valid) == 0 { + // This is a sanity check. There shouldn't be any claims without this + // finalizer because there's no longer any other way of allocating claims. + // Classic DRA was the alternative earlier. if builtinControllerFinalizer >= 0 { // Allocated by scheduler with structured parameters. We can "deallocate" // by clearing the allocation. claim.Status.Allocation = nil - } else { - // DRA driver controller in the control plane - // needs to do the deallocation. - claim.Status.DeallocationRequested = true } } diff --git a/pkg/controller/resourceclaim/controller_test.go b/pkg/controller/resourceclaim/controller_test.go index 7dce9f36c12..28eb2dc3915 100644 --- a/pkg/controller/resourceclaim/controller_test.go +++ b/pkg/controller/resourceclaim/controller_test.go @@ -39,7 +39,6 @@ import ( "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/controller" ephemeralvolumemetrics "k8s.io/kubernetes/pkg/controller/resourceclaim/metrics" - "k8s.io/utils/pointer" ) var ( @@ -78,25 +77,6 @@ var ( }) return pod }() - - podSchedulingContext = resourceapi.PodSchedulingContext{ - ObjectMeta: metav1.ObjectMeta{ - Name: testPodName, - Namespace: testNamespace, - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: "v1", - Kind: "Pod", - Name: testPodName, - UID: testPodUID, - Controller: pointer.Bool(true), - }, - }, - }, - Spec: resourceapi.PodSchedulingContextSpec{ - SelectedNode: nodeName, - }, - } ) func init() { @@ -105,18 +85,17 @@ func init() { func TestSyncHandler(t *testing.T) { tests := []struct { - name string - key string - claims []*resourceapi.ResourceClaim - claimsInCache []*resourceapi.ResourceClaim - pods []*v1.Pod - podsLater []*v1.Pod - templates []*resourceapi.ResourceClaimTemplate - expectedClaims []resourceapi.ResourceClaim - expectedPodSchedulingContexts []resourceapi.PodSchedulingContext - expectedStatuses map[string][]v1.PodResourceClaimStatus - expectedError bool - expectedMetrics expectedMetrics + name string + key string + claims []*resourceapi.ResourceClaim + claimsInCache []*resourceapi.ResourceClaim + pods []*v1.Pod + podsLater []*v1.Pod + templates []*resourceapi.ResourceClaimTemplate + expectedClaims []resourceapi.ResourceClaim + expectedStatuses map[string][]v1.PodResourceClaimStatus + expectedError bool + expectedMetrics expectedMetrics }{ { name: "create", @@ -267,18 +246,6 @@ func TestSyncHandler(t *testing.T) { expectedClaims: []resourceapi.ResourceClaim{*testClaimReserved}, expectedMetrics: expectedMetrics{0, 0}, }, - { - name: "clear-reserved", - pods: []*v1.Pod{}, - key: claimKey(testClaimReserved), - claims: []*resourceapi.ResourceClaim{testClaimReserved}, - expectedClaims: func() []resourceapi.ResourceClaim { - claim := testClaimAllocated.DeepCopy() - claim.Status.DeallocationRequested = true - return []resourceapi.ResourceClaim{*claim} - }(), - expectedMetrics: expectedMetrics{0, 0}, - }, { name: "clear-reserved-structured", pods: []*v1.Pod{}, @@ -356,7 +323,6 @@ func TestSyncHandler(t *testing.T) { expectedClaims: func() []resourceapi.ResourceClaim { claims := []resourceapi.ResourceClaim{*testClaimAllocated.DeepCopy()} claims[0].OwnerReferences = nil - claims[0].Status.DeallocationRequested = true return claims }(), expectedMetrics: expectedMetrics{0, 0}, @@ -381,21 +347,6 @@ func TestSyncHandler(t *testing.T) { expectedClaims: nil, expectedMetrics: expectedMetrics{0, 0}, }, - { - name: "trigger-allocation", - pods: []*v1.Pod{testPodWithNodeName}, - key: podKey(testPodWithNodeName), - templates: []*resourceapi.ResourceClaimTemplate{template}, - claims: []*resourceapi.ResourceClaim{generatedTestClaim}, - expectedClaims: []resourceapi.ResourceClaim{*generatedTestClaim}, - expectedStatuses: map[string][]v1.PodResourceClaimStatus{ - testPodWithNodeName.Name: { - {Name: testPodWithNodeName.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name}, - }, - }, - expectedPodSchedulingContexts: []resourceapi.PodSchedulingContext{podSchedulingContext}, - expectedMetrics: expectedMetrics{0, 0}, - }, { name: "add-reserved", pods: []*v1.Pod{testPodWithNodeName}, @@ -438,11 +389,10 @@ func TestSyncHandler(t *testing.T) { setupMetrics() informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc()) podInformer := informerFactory.Core().V1().Pods() - podSchedulingInformer := informerFactory.Resource().V1alpha3().PodSchedulingContexts() claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims() templateInformer := informerFactory.Resource().V1alpha3().ResourceClaimTemplates() - ec, err := NewController(klog.FromContext(ctx), fakeKubeClient, podInformer, podSchedulingInformer, claimInformer, templateInformer) + ec, err := NewController(klog.FromContext(ctx), fakeKubeClient, podInformer, claimInformer, templateInformer) if err != nil { t.Fatalf("error creating ephemeral controller : %v", err) } @@ -500,12 +450,6 @@ func TestSyncHandler(t *testing.T) { } assert.Equal(t, tc.expectedStatuses, actualStatuses, "pod resource claim statuses") - scheduling, err := fakeKubeClient.ResourceV1alpha3().PodSchedulingContexts("").List(ctx, metav1.ListOptions{}) - if err != nil { - t.Fatalf("unexpected error while listing claims: %v", err) - } - assert.Equal(t, normalizeScheduling(tc.expectedPodSchedulingContexts), normalizeScheduling(scheduling.Items)) - expectMetrics(t, tc.expectedMetrics) }) } @@ -627,14 +571,6 @@ func normalizeClaims(claims []resourceapi.ResourceClaim) []resourceapi.ResourceC return claims } -func normalizeScheduling(scheduling []resourceapi.PodSchedulingContext) []resourceapi.PodSchedulingContext { - sort.Slice(scheduling, func(i, j int) bool { - return scheduling[i].Namespace < scheduling[j].Namespace || - scheduling[i].Name < scheduling[j].Name - }) - return scheduling -} - func createTestClient(objects ...runtime.Object) *fake.Clientset { fakeClient := fake.NewSimpleClientset(objects...) fakeClient.PrependReactor("create", "resourceclaims", createResourceClaimReactor()) diff --git a/pkg/controlplane/apiserver/options/validation.go b/pkg/controlplane/apiserver/options/validation.go index 7e2b1782f71..4838d1f29ef 100644 --- a/pkg/controlplane/apiserver/options/validation.go +++ b/pkg/controlplane/apiserver/options/validation.go @@ -77,13 +77,6 @@ func validateNodeSelectorAuthorizationFeature() []error { return nil } -func validateDRAControlPlaneControllerFeature() []error { - if utilfeature.DefaultFeatureGate.Enabled(features.DRAControlPlaneController) && !utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { - return []error{fmt.Errorf("DRAControlPlaneController feature requires DynamicResourceAllocation feature to be enabled")} - } - return nil -} - func validateUnknownVersionInteroperabilityProxyFeature() []error { if utilfeature.DefaultFeatureGate.Enabled(features.UnknownVersionInteroperabilityProxy) { if utilfeature.DefaultFeatureGate.Enabled(genericfeatures.StorageVersionAPI) { @@ -128,7 +121,6 @@ func (s *Options) Validate() []error { errs = append(errs, validateUnknownVersionInteroperabilityProxyFeature()...) errs = append(errs, validateUnknownVersionInteroperabilityProxyFlags(s)...) errs = append(errs, validateNodeSelectorAuthorizationFeature()...) - errs = append(errs, validateDRAControlPlaneControllerFeature()...) return errs } diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index d7fd604e1dd..d76c6443047 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -194,14 +194,6 @@ const ( // DisableNodeKubeProxyVersion disable the status.nodeInfo.kubeProxyVersion field of v1.Node DisableNodeKubeProxyVersion featuregate.Feature = "DisableNodeKubeProxyVersion" - // owner: @pohly - // kep: http://kep.k8s.io/3063 - // - // Enables support for resources with custom parameters and a lifecycle - // that is independent of a Pod. Resource allocation is done by a DRA driver's - // "control plane controller" in cooperation with the scheduler. - DRAControlPlaneController featuregate.Feature = "DRAControlPlaneController" - // owner: @pohly // kep: http://kep.k8s.io/4381 // diff --git a/pkg/features/versioned_kube_features.go b/pkg/features/versioned_kube_features.go index 7db027c4614..523f5eca97b 100644 --- a/pkg/features/versioned_kube_features.go +++ b/pkg/features/versioned_kube_features.go @@ -165,10 +165,6 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Deprecated}, }, - DRAControlPlaneController: { - {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, - }, - DynamicResourceAllocation: { {Version: version.MustParse("1.26"), Default: false, PreRelease: featuregate.Alpha}, }, diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 840c0c1c6af..681a180b1f2 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -910,14 +910,9 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/resource/v1alpha3.DeviceRequestAllocationResult": schema_k8sio_api_resource_v1alpha3_DeviceRequestAllocationResult(ref), "k8s.io/api/resource/v1alpha3.DeviceSelector": schema_k8sio_api_resource_v1alpha3_DeviceSelector(ref), "k8s.io/api/resource/v1alpha3.OpaqueDeviceConfiguration": schema_k8sio_api_resource_v1alpha3_OpaqueDeviceConfiguration(ref), - "k8s.io/api/resource/v1alpha3.PodSchedulingContext": schema_k8sio_api_resource_v1alpha3_PodSchedulingContext(ref), - "k8s.io/api/resource/v1alpha3.PodSchedulingContextList": schema_k8sio_api_resource_v1alpha3_PodSchedulingContextList(ref), - "k8s.io/api/resource/v1alpha3.PodSchedulingContextSpec": schema_k8sio_api_resource_v1alpha3_PodSchedulingContextSpec(ref), - "k8s.io/api/resource/v1alpha3.PodSchedulingContextStatus": schema_k8sio_api_resource_v1alpha3_PodSchedulingContextStatus(ref), "k8s.io/api/resource/v1alpha3.ResourceClaim": schema_k8sio_api_resource_v1alpha3_ResourceClaim(ref), "k8s.io/api/resource/v1alpha3.ResourceClaimConsumerReference": schema_k8sio_api_resource_v1alpha3_ResourceClaimConsumerReference(ref), "k8s.io/api/resource/v1alpha3.ResourceClaimList": schema_k8sio_api_resource_v1alpha3_ResourceClaimList(ref), - "k8s.io/api/resource/v1alpha3.ResourceClaimSchedulingStatus": schema_k8sio_api_resource_v1alpha3_ResourceClaimSchedulingStatus(ref), "k8s.io/api/resource/v1alpha3.ResourceClaimSpec": schema_k8sio_api_resource_v1alpha3_ResourceClaimSpec(ref), "k8s.io/api/resource/v1alpha3.ResourceClaimStatus": schema_k8sio_api_resource_v1alpha3_ResourceClaimStatus(ref), "k8s.io/api/resource/v1alpha3.ResourceClaimTemplate": schema_k8sio_api_resource_v1alpha3_ResourceClaimTemplate(ref), @@ -45860,13 +45855,6 @@ func schema_k8sio_api_resource_v1alpha3_AllocationResult(ref common.ReferenceCal Ref: ref("k8s.io/api/core/v1.NodeSelector"), }, }, - "controller": { - SchemaProps: spec.SchemaProps{ - Description: "Controller is the name of the DRA driver which handled the allocation. That driver is also responsible for deallocating the claim. It is empty when the claim can be deallocated without involving a driver.\n\nA driver may allocate devices provided by other drivers, so this driver name here can be different from the driver names listed for the results.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", - Type: []string{"string"}, - Format: "", - }, - }, }, }, }, @@ -46389,17 +46377,11 @@ func schema_k8sio_api_resource_v1alpha3_DeviceClassSpec(ref common.ReferenceCall }, }, }, - "suitableNodes": { - SchemaProps: spec.SchemaProps{ - Description: "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a claim that has not been allocated yet *and* that claim gets allocated through a control plane controller. It is ignored when the claim does not use a control plane controller for allocation.\n\nSetting this field is optional. If unset, all Nodes are candidates.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", - Ref: ref("k8s.io/api/core/v1.NodeSelector"), - }, - }, }, }, }, Dependencies: []string{ - "k8s.io/api/core/v1.NodeSelector", "k8s.io/api/resource/v1alpha3.DeviceClassConfiguration", "k8s.io/api/resource/v1alpha3.DeviceSelector"}, + "k8s.io/api/resource/v1alpha3.DeviceClassConfiguration", "k8s.io/api/resource/v1alpha3.DeviceSelector"}, } } @@ -46634,185 +46616,6 @@ func schema_k8sio_api_resource_v1alpha3_OpaqueDeviceConfiguration(ref common.Ref } } -func schema_k8sio_api_resource_v1alpha3_PodSchedulingContext(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DRAControlPlaneController feature gate.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object metadata", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "Spec describes where resources for the Pod are needed.", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha3.PodSchedulingContextSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Description: "Status describes where resources for the Pod can be allocated.", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha3.PodSchedulingContextStatus"), - }, - }, - }, - Required: []string{"spec"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/resource/v1alpha3.PodSchedulingContextSpec", "k8s.io/api/resource/v1alpha3.PodSchedulingContextStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_resource_v1alpha3_PodSchedulingContextList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodSchedulingContextList is a collection of Pod scheduling objects.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard list metadata", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "Items is the list of PodSchedulingContext objects.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha3.PodSchedulingContext"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/resource/v1alpha3.PodSchedulingContext", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_resource_v1alpha3_PodSchedulingContextSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodSchedulingContextSpec describes where resources for the Pod are needed.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "selectedNode": { - SchemaProps: spec.SchemaProps{ - Description: "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", - Type: []string{"string"}, - Format: "", - }, - }, - "potentialNodes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - }, - }, - } -} - -func schema_k8sio_api_resource_v1alpha3_PodSchedulingContextStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "resourceClaims": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-map-keys": []interface{}{ - "name", - }, - "x-kubernetes-list-type": "map", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/resource/v1alpha3.ResourceClaimSchedulingStatus"), - }, - }, - }, - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/resource/v1alpha3.ResourceClaimSchedulingStatus"}, - } -} - func schema_k8sio_api_resource_v1alpha3_ResourceClaim(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -46960,48 +46763,6 @@ func schema_k8sio_api_resource_v1alpha3_ResourceClaimList(ref common.ReferenceCa } } -func schema_k8sio_api_resource_v1alpha3_ResourceClaimSchedulingStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "name": { - SchemaProps: spec.SchemaProps{ - Description: "Name matches the pod.spec.resourceClaims[*].Name field.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "unsuitableNodes": { - VendorExtensible: spec.VendorExtensible{ - Extensions: spec.Extensions{ - "x-kubernetes-list-type": "atomic", - }, - }, - SchemaProps: spec.SchemaProps{ - Description: "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"name"}, - }, - }, - } -} - func schema_k8sio_api_resource_v1alpha3_ResourceClaimSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -47016,13 +46777,6 @@ func schema_k8sio_api_resource_v1alpha3_ResourceClaimSpec(ref common.ReferenceCa Ref: ref("k8s.io/api/resource/v1alpha3.DeviceClaim"), }, }, - "controller": { - SchemaProps: spec.SchemaProps{ - Description: "Controller is the name of the DRA driver that is meant to handle allocation of this claim. If empty, allocation is handled by the scheduler while scheduling a pod.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", - Type: []string{"string"}, - Format: "", - }, - }, }, }, }, @@ -47068,13 +46822,6 @@ func schema_k8sio_api_resource_v1alpha3_ResourceClaimStatus(ref common.Reference }, }, }, - "deallocationRequested": { - SchemaProps: spec.SchemaProps{ - Description: "Indicates that a claim is to be deallocated. While this is set, no new consumers may be added to ReservedFor.\n\nThis is only used if the claim needs to be deallocated by a DRA driver. That driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", - Type: []string{"boolean"}, - Format: "", - }, - }, }, }, }, diff --git a/pkg/printers/internalversion/printers.go b/pkg/printers/internalversion/printers.go index aef363988fc..00f23e34356 100644 --- a/pkg/printers/internalversion/printers.go +++ b/pkg/printers/internalversion/printers.go @@ -658,14 +658,6 @@ func AddHandlers(h printers.PrintHandler) { _ = h.TableHandler(resourceClaimTemplateColumnDefinitions, printResourceClaimTemplate) _ = h.TableHandler(resourceClaimTemplateColumnDefinitions, printResourceClaimTemplateList) - podSchedulingCtxColumnDefinitions := []metav1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "SelectedNode", Type: "string", Description: resourceapi.PodSchedulingContextSpec{}.SwaggerDoc()["selectedNode"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - } - _ = h.TableHandler(podSchedulingCtxColumnDefinitions, printPodSchedulingContext) - _ = h.TableHandler(podSchedulingCtxColumnDefinitions, printPodSchedulingContextList) - nodeResourceSliceColumnDefinitions := []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, {Name: "Node", Type: "string", Description: resourceapi.ResourceSliceSpec{}.SwaggerDoc()["nodeName"]}, @@ -3044,8 +3036,6 @@ func resourceClaimState(obj *resource.ResourceClaim) string { states = append(states, "allocated") if len(obj.Status.ReservedFor) > 0 { states = append(states, "reserved") - } else if obj.DeletionTimestamp != nil || obj.Status.DeallocationRequested { - states = append(states, "deallocating") } } return strings.Join(states, ",") @@ -3084,27 +3074,6 @@ func printResourceClaimTemplateList(list *resource.ResourceClaimTemplateList, op return rows, nil } -func printPodSchedulingContext(obj *resource.PodSchedulingContext, options printers.GenerateOptions) ([]metav1.TableRow, error) { - row := metav1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - row.Cells = append(row.Cells, obj.Name, obj.Spec.SelectedNode, translateTimestampSince(obj.CreationTimestamp)) - - return []metav1.TableRow{row}, nil -} - -func printPodSchedulingContextList(list *resource.PodSchedulingContextList, options printers.GenerateOptions) ([]metav1.TableRow, error) { - rows := make([]metav1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printPodSchedulingContext(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - func printResourceSlice(obj *resource.ResourceSlice, options printers.GenerateOptions) ([]metav1.TableRow, error) { row := metav1.TableRow{ Object: runtime.RawExtension{Object: obj}, diff --git a/pkg/printers/internalversion/printers_test.go b/pkg/printers/internalversion/printers_test.go index 3cdccbb8ffb..fff2f0ffb9e 100644 --- a/pkg/printers/internalversion/printers_test.go +++ b/pkg/printers/internalversion/printers_test.go @@ -6256,16 +6256,6 @@ func TestPrintDeviceClass(t *testing.T) { Name: "test-deviceclass", CreationTimestamp: metav1.Time{Time: time.Now().Add(-3e11)}, }, - Spec: resourceapis.DeviceClassSpec{ - SuitableNodes: &api.NodeSelector{ - NodeSelectorTerms: []api.NodeSelectorTerm{{ - MatchExpressions: []api.NodeSelectorRequirement{{ - Key: "foo", - Operator: api.NodeSelectorOpExists, - }}, - }}, - }, - }, }, // Columns: Name, Age expected: []metav1.TableRow{{Cells: []interface{}{"test-deviceclass", "5m"}}}, @@ -6277,16 +6267,6 @@ func TestPrintDeviceClass(t *testing.T) { Name: "test-deviceclass", CreationTimestamp: metav1.Time{}, }, - Spec: resourceapis.DeviceClassSpec{ - SuitableNodes: &api.NodeSelector{ - NodeSelectorTerms: []api.NodeSelectorTerm{{ - MatchExpressions: []api.NodeSelectorRequirement{{ - Key: "foo", - Operator: api.NodeSelectorOpExists, - }}, - }}, - }, - }, }, // Columns: Name, Age expected: []metav1.TableRow{{Cells: []interface{}{"test-deviceclass", ""}}}, @@ -6361,35 +6341,6 @@ func TestPrintResourceClaim(t *testing.T) { // Columns: Name, State, Age expected: []metav1.TableRow{{Cells: []interface{}{"test-resourceclaim", "pending", ""}}}, }, - { - name: "ResourceClaim with Allocated State and Deallocation State", - resourceClaim: resourceapis.ResourceClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-resourceclaim", - CreationTimestamp: metav1.Time{Time: time.Now().Add(-3e11)}, - }, - Spec: resourceapis.ResourceClaimSpec{ - Devices: resourceapis.DeviceClaim{ - Requests: []resourceapis.DeviceRequest{ - { - Name: "deviceRequest", - DeviceClassName: "deviceClass", - AllocationMode: resourceapis.DeviceAllocationModeExactCount, - Count: 1, - }, - }, - }, - }, - Status: resourceapis.ResourceClaimStatus{ - Allocation: &resourceapis.AllocationResult{ - Controller: "dra.example.com", - }, - DeallocationRequested: true, - }, - }, - // Columns: Name, State, Age - expected: []metav1.TableRow{{Cells: []interface{}{"test-resourceclaim", "allocated,deallocating", "5m"}}}, - }, { name: "ResourceClaim with Allocated and Reserved State", resourceClaim: resourceapis.ResourceClaim{ @@ -6410,9 +6361,7 @@ func TestPrintResourceClaim(t *testing.T) { }, }, Status: resourceapis.ResourceClaimStatus{ - Allocation: &resourceapis.AllocationResult{ - Controller: "dra.example.com", - }, + Allocation: &resourceapis.AllocationResult{}, ReservedFor: []resourceapis.ResourceClaimConsumerReference{ { Resource: "pods", @@ -6420,7 +6369,6 @@ func TestPrintResourceClaim(t *testing.T) { UID: types.UID("pod-test"), }, }, - DeallocationRequested: true, }, }, // Columns: Name, State, Age @@ -6533,79 +6481,6 @@ func TestPrintResourceClaimTemplate(t *testing.T) { } } -func TestPrintPodSchedulingContext(t *testing.T) { - - tests := []struct { - name string - podSchedulingContext resourceapis.PodSchedulingContext - expected []metav1.TableRow - }{ - { - name: "PodSchedulingContext with SelectedNode", - podSchedulingContext: resourceapis.PodSchedulingContext{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-podschedulercontext", - Namespace: "default", - CreationTimestamp: metav1.Time{Time: time.Now().Add(-3e11)}, - }, - Spec: resourceapis.PodSchedulingContextSpec{ - SelectedNode: "worker", - }, - Status: resourceapis.PodSchedulingContextStatus{}, - }, - - // Columns: Name, SelectedNode, Age - expected: []metav1.TableRow{{Cells: []interface{}{"test-podschedulercontext", "worker", "5m"}}}, - }, - { - // test case for Empty creation timestamp that is translated to - name: "PodSchedulingContext with SelectedNode and Empty creation timestamp", - podSchedulingContext: resourceapis.PodSchedulingContext{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-podschedulercontext", - Namespace: "default", - CreationTimestamp: metav1.Time{}, - }, - Spec: resourceapis.PodSchedulingContextSpec{ - SelectedNode: "worker", - }, - Status: resourceapis.PodSchedulingContextStatus{}, - }, - - // Columns: Name, SelectedNode, Age - expected: []metav1.TableRow{{Cells: []interface{}{"test-podschedulercontext", "worker", ""}}}, - }, - { - name: "PodSchedulingContext without SelectedNode", - podSchedulingContext: resourceapis.PodSchedulingContext{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-podschedulercontext", - Namespace: "default", - CreationTimestamp: metav1.Time{Time: time.Now().Add(-3e11)}, - }, - Spec: resourceapis.PodSchedulingContextSpec{}, - Status: resourceapis.PodSchedulingContextStatus{}, - }, - - // Columns: Name, SelectedNode, Age - expected: []metav1.TableRow{{Cells: []interface{}{"test-podschedulercontext", "", "5m"}}}, - }, - } - - for i, test := range tests { - rows, err := printPodSchedulingContext(&test.podSchedulingContext, printers.GenerateOptions{}) - if err != nil { - t.Fatal(err) - } - for i := range rows { - rows[i].Object.Object = nil - } - if !reflect.DeepEqual(test.expected, rows) { - t.Errorf("%d mismatch: %s", i, cmp.Diff(test.expected, rows)) - } - } -} - func TestPrintResourceSlice(t *testing.T) { tests := []struct { @@ -7211,12 +7086,6 @@ func TestTableRowDeepCopyShouldNotPanic(t *testing.T) { return printResourceClaimTemplate(&resourceapis.ResourceClaimTemplate{}, printers.GenerateOptions{}) }, }, - { - name: "PodSchedulingContext", - printer: func() ([]metav1.TableRow, error) { - return printPodSchedulingContext(&resourceapis.PodSchedulingContext{}, printers.GenerateOptions{}) - }, - }, { name: "ResourceSlice", printer: func() ([]metav1.TableRow, error) { diff --git a/pkg/registry/resource/deviceclass/strategy.go b/pkg/registry/resource/deviceclass/strategy.go index c42e29480d0..b14281bc2b8 100644 --- a/pkg/registry/resource/deviceclass/strategy.go +++ b/pkg/registry/resource/deviceclass/strategy.go @@ -23,11 +23,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/apiserver/pkg/storage/names" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/resource" "k8s.io/kubernetes/pkg/apis/resource/validation" - "k8s.io/kubernetes/pkg/features" ) // deviceClassStrategy implements behavior for DeviceClass objects @@ -89,21 +87,6 @@ func (deviceClassStrategy) AllowUnconditionalUpdate() bool { return true } -// dropDisabledFields removes fields which are covered by the optional DRAControlPlaneController feature gate. +// dropDisabledFields removes fields which are covered by a feature gate. func dropDisabledFields(newClass, oldClass *resource.DeviceClass) { - if utilfeature.DefaultFeatureGate.Enabled(features.DRAControlPlaneController) { - // No need to drop anything. - return - } - - if oldClass == nil { - // Always drop on create. - newClass.Spec.SuitableNodes = nil - return - } - - // Drop on update only if not already set. - if oldClass.Spec.SuitableNodes == nil { - newClass.Spec.SuitableNodes = nil - } } diff --git a/pkg/registry/resource/deviceclass/strategy_test.go b/pkg/registry/resource/deviceclass/strategy_test.go index 77fa7752e48..b19cf835867 100644 --- a/pkg/registry/resource/deviceclass/strategy_test.go +++ b/pkg/registry/resource/deviceclass/strategy_test.go @@ -23,11 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - utilfeature "k8s.io/apiserver/pkg/util/feature" - featuregatetesting "k8s.io/component-base/featuregate/testing" - "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/resource" - "k8s.io/kubernetes/pkg/features" ) var obj = &resource.DeviceClass{ @@ -37,23 +33,6 @@ var obj = &resource.DeviceClass{ }, } -var objWithGatedFields = &resource.DeviceClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: "valid-class", - Generation: 1, - }, - Spec: resource.DeviceClassSpec{ - SuitableNodes: &core.NodeSelector{ - NodeSelectorTerms: []core.NodeSelectorTerm{{ - MatchExpressions: []core.NodeSelectorRequirement{{ - Key: "foo", - Operator: core.NodeSelectorOpExists, - }}, - }}, - }, - }, -} - func TestStrategy(t *testing.T) { if Strategy.NamespaceScoped() { t.Errorf("DeviceClass must not be namespace scoped") @@ -67,10 +46,9 @@ func TestStrategyCreate(t *testing.T) { ctx := genericapirequest.NewDefaultContext() testcases := map[string]struct { - obj *resource.DeviceClass - controlPlaneController bool - expectValidationError bool - expectObj *resource.DeviceClass + obj *resource.DeviceClass + expectValidationError bool + expectObj *resource.DeviceClass }{ "simple": { obj: obj, @@ -84,22 +62,10 @@ func TestStrategyCreate(t *testing.T) { }(), expectValidationError: true, }, - "drop-fields": { - obj: objWithGatedFields, - controlPlaneController: false, - expectObj: obj, - }, - "keep-fields": { - obj: objWithGatedFields, - controlPlaneController: true, - expectObj: objWithGatedFields, - }, } for name, tc := range testcases { t.Run(name, func(t *testing.T) { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAControlPlaneController, tc.controlPlaneController) - obj := tc.obj.DeepCopy() Strategy.PrepareForCreate(ctx, obj) if errs := Strategy.Validate(ctx, obj); len(errs) != 0 { @@ -123,11 +89,10 @@ func TestStrategyUpdate(t *testing.T) { ctx := genericapirequest.NewDefaultContext() testcases := map[string]struct { - oldObj *resource.DeviceClass - newObj *resource.DeviceClass - controlPlaneController bool - expectValidationError bool - expectObj *resource.DeviceClass + oldObj *resource.DeviceClass + newObj *resource.DeviceClass + expectValidationError bool + expectObj *resource.DeviceClass }{ "no-changes-okay": { oldObj: obj, @@ -143,34 +108,10 @@ func TestStrategyUpdate(t *testing.T) { }(), expectValidationError: true, }, - "drop-fields": { - oldObj: obj, - newObj: objWithGatedFields, - controlPlaneController: false, - expectObj: obj, - }, - "keep-fields": { - oldObj: obj, - newObj: objWithGatedFields, - controlPlaneController: true, - expectObj: func() *resource.DeviceClass { - obj := objWithGatedFields.DeepCopy() - // Spec changes -> generation gets bumped. - obj.Generation++ - return obj - }(), - }, - "keep-existing-fields": { - oldObj: objWithGatedFields, - newObj: objWithGatedFields, - controlPlaneController: false, - expectObj: objWithGatedFields, - }, } for name, tc := range testcases { t.Run(name, func(t *testing.T) { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAControlPlaneController, tc.controlPlaneController) oldObj := tc.oldObj.DeepCopy() newObj := tc.newObj.DeepCopy() newObj.ResourceVersion = "4" diff --git a/pkg/registry/resource/podschedulingcontext/storage/storage.go b/pkg/registry/resource/podschedulingcontext/storage/storage.go deleted file mode 100644 index 9dab4ef6e66..00000000000 --- a/pkg/registry/resource/podschedulingcontext/storage/storage.go +++ /dev/null @@ -1,101 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "context" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/registry/generic" - genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/apiserver/pkg/registry/rest" - "k8s.io/kubernetes/pkg/apis/resource" - "k8s.io/kubernetes/pkg/printers" - printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" - printerstorage "k8s.io/kubernetes/pkg/printers/storage" - "k8s.io/kubernetes/pkg/registry/resource/podschedulingcontext" - "sigs.k8s.io/structured-merge-diff/v4/fieldpath" -) - -// REST implements a RESTStorage for PodSchedulingContext. -type REST struct { - *genericregistry.Store -} - -// NewREST returns a RESTStorage object that will work against PodSchedulingContext. -func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, error) { - store := &genericregistry.Store{ - NewFunc: func() runtime.Object { return &resource.PodSchedulingContext{} }, - NewListFunc: func() runtime.Object { return &resource.PodSchedulingContextList{} }, - PredicateFunc: podschedulingcontext.Match, - DefaultQualifiedResource: resource.Resource("podschedulingcontexts"), - SingularQualifiedResource: resource.Resource("podschedulingcontext"), - - CreateStrategy: podschedulingcontext.Strategy, - UpdateStrategy: podschedulingcontext.Strategy, - DeleteStrategy: podschedulingcontext.Strategy, - ReturnDeletedObject: true, - ResetFieldsStrategy: podschedulingcontext.Strategy, - - TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, - } - options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: podschedulingcontext.GetAttrs} - if err := store.CompleteWithOptions(options); err != nil { - return nil, nil, err - } - - statusStore := *store - statusStore.UpdateStrategy = podschedulingcontext.StatusStrategy - statusStore.ResetFieldsStrategy = podschedulingcontext.StatusStrategy - - rest := &REST{store} - - return rest, &StatusREST{store: &statusStore}, nil -} - -// StatusREST implements the REST endpoint for changing the status of a PodSchedulingContext. -type StatusREST struct { - store *genericregistry.Store -} - -// New creates a new PodSchedulingContext object. -func (r *StatusREST) New() runtime.Object { - return &resource.PodSchedulingContext{} -} - -func (r *StatusREST) Destroy() { - // Given that underlying store is shared with REST, - // we don't destroy it here explicitly. -} - -// Get retrieves the object from the storage. It is required to support Patch. -func (r *StatusREST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) { - return r.store.Get(ctx, name, options) -} - -// Update alters the status subset of an object. -func (r *StatusREST) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) { - // We are explicitly setting forceAllowCreate to false in the call to the underlying storage because - // subresources should never allow create on update. - return r.store.Update(ctx, name, objInfo, createValidation, updateValidation, false, options) -} - -// GetResetFields implements rest.ResetFieldsStrategy -func (r *StatusREST) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { - return r.store.GetResetFields() -} diff --git a/pkg/registry/resource/podschedulingcontext/storage/storage_test.go b/pkg/registry/resource/podschedulingcontext/storage/storage_test.go deleted file mode 100644 index 98f1840aef0..00000000000 --- a/pkg/registry/resource/podschedulingcontext/storage/storage_test.go +++ /dev/null @@ -1,184 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "testing" - - "github.com/google/go-cmp/cmp" - apiequality "k8s.io/apimachinery/pkg/api/equality" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/apiserver/pkg/registry/generic" - genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing" - "k8s.io/apiserver/pkg/registry/rest" - etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing" - "k8s.io/kubernetes/pkg/apis/resource" - _ "k8s.io/kubernetes/pkg/apis/resource/install" - "k8s.io/kubernetes/pkg/registry/registrytest" -) - -func newStorage(t *testing.T) (*REST, *StatusREST, *etcd3testing.EtcdTestServer) { - etcdStorage, server := registrytest.NewEtcdStorage(t, resource.GroupName) - restOptions := generic.RESTOptions{ - StorageConfig: etcdStorage, - Decorator: generic.UndecoratedStorage, - DeleteCollectionWorkers: 1, - ResourcePrefix: "podschedulingcontexts", - } - podSchedulingStorage, statusStorage, err := NewREST(restOptions) - if err != nil { - t.Fatalf("unexpected error from REST storage: %v", err) - } - return podSchedulingStorage, statusStorage, server -} - -func validNewPodSchedulingContexts(name, ns string) *resource.PodSchedulingContext { - schedulingCtx := &resource.PodSchedulingContext{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - }, - Spec: resource.PodSchedulingContextSpec{ - SelectedNode: "worker", - }, - Status: resource.PodSchedulingContextStatus{}, - } - return schedulingCtx -} - -func TestCreate(t *testing.T) { - storage, _, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - schedulingCtx := validNewPodSchedulingContexts("foo", metav1.NamespaceDefault) - schedulingCtx.ObjectMeta = metav1.ObjectMeta{} - test.TestCreate( - // valid - schedulingCtx, - // invalid - &resource.PodSchedulingContext{ - ObjectMeta: metav1.ObjectMeta{Name: "*BadName!"}, - }, - ) -} - -func TestUpdate(t *testing.T) { - storage, _, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - test.TestUpdate( - // valid - validNewPodSchedulingContexts("foo", metav1.NamespaceDefault), - // updateFunc - func(obj runtime.Object) runtime.Object { - object := obj.(*resource.PodSchedulingContext) - if object.Labels == nil { - object.Labels = map[string]string{} - } - object.Labels["foo"] = "bar" - return object - }, - ) -} - -func TestDelete(t *testing.T) { - storage, _, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store).ReturnDeletedObject() - test.TestDelete(validNewPodSchedulingContexts("foo", metav1.NamespaceDefault)) -} - -func TestGet(t *testing.T) { - storage, _, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - test.TestGet(validNewPodSchedulingContexts("foo", metav1.NamespaceDefault)) -} - -func TestList(t *testing.T) { - storage, _, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - test.TestList(validNewPodSchedulingContexts("foo", metav1.NamespaceDefault)) -} - -func TestWatch(t *testing.T) { - storage, _, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - test.TestWatch( - validNewPodSchedulingContexts("foo", metav1.NamespaceDefault), - // matching labels - []labels.Set{}, - // not matching labels - []labels.Set{ - {"foo": "bar"}, - }, - // matching fields - []fields.Set{ - {"metadata.name": "foo"}, - }, - // not matching fields - []fields.Set{ - {"metadata.name": "bar"}, - }, - ) -} - -func TestUpdateStatus(t *testing.T) { - storage, statusStorage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - ctx := genericapirequest.NewDefaultContext() - - key, _ := storage.KeyFunc(ctx, "foo") - schedulingStart := validNewPodSchedulingContexts("foo", metav1.NamespaceDefault) - err := storage.Storage.Create(ctx, key, schedulingStart, nil, 0, false) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - - schedulingCtx := schedulingStart.DeepCopy() - schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims, - resource.ResourceClaimSchedulingStatus{ - Name: "my-claim", - }, - ) - _, _, err = statusStorage.Update(ctx, schedulingCtx.Name, rest.DefaultUpdatedObjectInfo(schedulingCtx), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}) - if err != nil { - t.Fatalf("Unexpected error: %v", err) - } - obj, err := storage.Get(ctx, "foo", &metav1.GetOptions{}) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - schedulingOut := obj.(*resource.PodSchedulingContext) - // only compare relevant changes b/c of difference in metadata - if !apiequality.Semantic.DeepEqual(schedulingCtx.Status, schedulingOut.Status) { - t.Errorf("unexpected object: %s", cmp.Diff(schedulingCtx.Status, schedulingOut.Status)) - } -} diff --git a/pkg/registry/resource/podschedulingcontext/strategy.go b/pkg/registry/resource/podschedulingcontext/strategy.go deleted file mode 100644 index 5e26cafb47b..00000000000 --- a/pkg/registry/resource/podschedulingcontext/strategy.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package podschedulingcontext - -import ( - "context" - "errors" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/apiserver/pkg/registry/generic" - "k8s.io/apiserver/pkg/storage" - "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/apis/resource" - "k8s.io/kubernetes/pkg/apis/resource/validation" - "sigs.k8s.io/structured-merge-diff/v4/fieldpath" -) - -// podSchedulingStrategy implements behavior for PodSchedulingContext objects -type podSchedulingStrategy struct { - runtime.ObjectTyper - names.NameGenerator -} - -// Strategy is the default logic that applies when creating and updating -// ResourceClaim objects via the REST API. -var Strategy = podSchedulingStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} - -func (podSchedulingStrategy) NamespaceScoped() bool { - return true -} - -// GetResetFields returns the set of fields that get reset by the strategy and -// should not be modified by the user. For a new PodSchedulingContext that is the -// status. -func (podSchedulingStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { - fields := map[fieldpath.APIVersion]*fieldpath.Set{ - "resource.k8s.io/v1alpha3": fieldpath.NewSet( - fieldpath.MakePathOrDie("status"), - ), - } - - return fields -} - -func (podSchedulingStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) { - scheduling := obj.(*resource.PodSchedulingContext) - // Status must not be set by user on create. - scheduling.Status = resource.PodSchedulingContextStatus{} -} - -func (podSchedulingStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList { - scheduling := obj.(*resource.PodSchedulingContext) - return validation.ValidatePodSchedulingContexts(scheduling) -} - -func (podSchedulingStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string { - return nil -} - -func (podSchedulingStrategy) Canonicalize(obj runtime.Object) { -} - -func (podSchedulingStrategy) AllowCreateOnUpdate() bool { - return false -} - -func (podSchedulingStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) { - newScheduling := obj.(*resource.PodSchedulingContext) - oldScheduling := old.(*resource.PodSchedulingContext) - newScheduling.Status = oldScheduling.Status -} - -func (podSchedulingStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { - newScheduling := obj.(*resource.PodSchedulingContext) - oldScheduling := old.(*resource.PodSchedulingContext) - errorList := validation.ValidatePodSchedulingContexts(newScheduling) - return append(errorList, validation.ValidatePodSchedulingContextUpdate(newScheduling, oldScheduling)...) -} - -func (podSchedulingStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string { - return nil -} - -func (podSchedulingStrategy) AllowUnconditionalUpdate() bool { - return true -} - -type podSchedulingStatusStrategy struct { - podSchedulingStrategy -} - -var StatusStrategy = podSchedulingStatusStrategy{Strategy} - -// GetResetFields returns the set of fields that get reset by the strategy and -// should not be modified by the user. For a status update that is the spec. -func (podSchedulingStatusStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set { - fields := map[fieldpath.APIVersion]*fieldpath.Set{ - "resource.k8s.io/v1alpha3": fieldpath.NewSet( - fieldpath.MakePathOrDie("spec"), - ), - } - - return fields -} - -func (podSchedulingStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) { - newScheduling := obj.(*resource.PodSchedulingContext) - oldScheduling := old.(*resource.PodSchedulingContext) - newScheduling.Spec = oldScheduling.Spec - metav1.ResetObjectMetaForStatus(&newScheduling.ObjectMeta, &oldScheduling.ObjectMeta) -} - -func (podSchedulingStatusStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { - newScheduling := obj.(*resource.PodSchedulingContext) - oldScheduling := old.(*resource.PodSchedulingContext) - return validation.ValidatePodSchedulingContextStatusUpdate(newScheduling, oldScheduling) -} - -// WarningsOnUpdate returns warnings for the given update. -func (podSchedulingStatusStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string { - return nil -} - -// Match returns a generic matcher for a given label and field selector. -func Match(label labels.Selector, field fields.Selector) storage.SelectionPredicate { - return storage.SelectionPredicate{ - Label: label, - Field: field, - GetAttrs: GetAttrs, - } -} - -// GetAttrs returns labels and fields of a given object for filtering purposes. -func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) { - scheduling, ok := obj.(*resource.PodSchedulingContext) - if !ok { - return nil, nil, errors.New("not a PodSchedulingContext") - } - return labels.Set(scheduling.Labels), toSelectableFields(scheduling), nil -} - -// toSelectableFields returns a field set that represents the object -func toSelectableFields(scheduling *resource.PodSchedulingContext) fields.Set { - fields := generic.ObjectMetaFieldsSet(&scheduling.ObjectMeta, true) - return fields -} diff --git a/pkg/registry/resource/podschedulingcontext/strategy_test.go b/pkg/registry/resource/podschedulingcontext/strategy_test.go deleted file mode 100644 index 37d3ceefaf0..00000000000 --- a/pkg/registry/resource/podschedulingcontext/strategy_test.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package podschedulingcontext - -import ( - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - "k8s.io/kubernetes/pkg/apis/resource" -) - -var schedulingCtx = &resource.PodSchedulingContext{ - ObjectMeta: metav1.ObjectMeta{ - Name: "valid-pod", - Namespace: "default", - }, - Spec: resource.PodSchedulingContextSpec{ - SelectedNode: "worker", - }, -} - -func TestPodSchedulingStrategy(t *testing.T) { - if !Strategy.NamespaceScoped() { - t.Errorf("PodSchedulingContext must be namespace scoped") - } - if Strategy.AllowCreateOnUpdate() { - t.Errorf("PodSchedulingContext should not allow create on update") - } -} - -func TestPodSchedulingStrategyCreate(t *testing.T) { - ctx := genericapirequest.NewDefaultContext() - schedulingCtx := schedulingCtx.DeepCopy() - - Strategy.PrepareForCreate(ctx, schedulingCtx) - errs := Strategy.Validate(ctx, schedulingCtx) - if len(errs) != 0 { - t.Errorf("unexpected error validating for create %v", errs) - } -} - -func TestPodSchedulingStrategyUpdate(t *testing.T) { - t.Run("no-changes-okay", func(t *testing.T) { - ctx := genericapirequest.NewDefaultContext() - schedulingCtx := schedulingCtx.DeepCopy() - newSchedulingCtx := schedulingCtx.DeepCopy() - newSchedulingCtx.ResourceVersion = "4" - - Strategy.PrepareForUpdate(ctx, newSchedulingCtx, schedulingCtx) - errs := Strategy.ValidateUpdate(ctx, newSchedulingCtx, schedulingCtx) - if len(errs) != 0 { - t.Errorf("unexpected validation errors: %v", errs) - } - }) - - t.Run("name-change-not-allowed", func(t *testing.T) { - ctx := genericapirequest.NewDefaultContext() - schedulingCtx := schedulingCtx.DeepCopy() - newSchedulingCtx := schedulingCtx.DeepCopy() - newSchedulingCtx.Name = "valid-claim-2" - newSchedulingCtx.ResourceVersion = "4" - - Strategy.PrepareForUpdate(ctx, newSchedulingCtx, schedulingCtx) - errs := Strategy.ValidateUpdate(ctx, newSchedulingCtx, schedulingCtx) - if len(errs) == 0 { - t.Errorf("expected a validation error") - } - }) -} diff --git a/pkg/registry/resource/resourceclaim/strategy.go b/pkg/registry/resource/resourceclaim/strategy.go index 2e0f1e7dc3a..507446f9a07 100644 --- a/pkg/registry/resource/resourceclaim/strategy.go +++ b/pkg/registry/resource/resourceclaim/strategy.go @@ -28,11 +28,9 @@ import ( "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/names" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/resource" "k8s.io/kubernetes/pkg/apis/resource/validation" - "k8s.io/kubernetes/pkg/features" "sigs.k8s.io/structured-merge-diff/v4/fieldpath" ) @@ -172,35 +170,6 @@ func toSelectableFields(claim *resource.ResourceClaim) fields.Set { return fields } -// dropDisabledFields removes fields which are covered by the optional DRAControlPlaneController feature gate. +// dropDisabledFields removes fields which are covered by a feature gate. func dropDisabledFields(newClaim, oldClaim *resource.ResourceClaim) { - if utilfeature.DefaultFeatureGate.Enabled(features.DRAControlPlaneController) { - // No need to drop anything. - return - } - - if oldClaim == nil { - // Always drop on create. There's no status yet, so nothing to do there. - newClaim.Spec.Controller = "" - return - } - - // Drop on (status) update only if not already set. - if oldClaim.Spec.Controller == "" { - newClaim.Spec.Controller = "" - } - // If the claim is handled by a control plane controller, allow - // setting it also in the status. Stripping that field would be bad. - if oldClaim.Spec.Controller == "" && - newClaim.Status.Allocation != nil && - oldClaim.Status.Allocation == nil && - (oldClaim.Status.Allocation == nil || oldClaim.Status.Allocation.Controller == "") { - newClaim.Status.Allocation.Controller = "" - } - // If there is an existing allocation which used a control plane controller, then - // allow requesting its deallocation. - if !oldClaim.Status.DeallocationRequested && - (newClaim.Status.Allocation == nil || newClaim.Status.Allocation.Controller == "") { - newClaim.Status.DeallocationRequested = false - } } diff --git a/pkg/registry/resource/resourceclaim/strategy_test.go b/pkg/registry/resource/resourceclaim/strategy_test.go index 29651180a85..fb7b3721368 100644 --- a/pkg/registry/resource/resourceclaim/strategy_test.go +++ b/pkg/registry/resource/resourceclaim/strategy_test.go @@ -23,10 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - utilfeature "k8s.io/apiserver/pkg/util/feature" - featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/kubernetes/pkg/apis/resource" - "k8s.io/kubernetes/pkg/features" ) var obj = &resource.ResourceClaim{ @@ -36,42 +33,6 @@ var obj = &resource.ResourceClaim{ }, } -var objWithStatus = &resource.ResourceClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "valid-claim", - Namespace: "default", - }, - Status: resource.ResourceClaimStatus{ - Allocation: &resource.AllocationResult{}, - }, -} - -var objWithGatedFields = &resource.ResourceClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "valid-claim", - Namespace: "default", - }, - Spec: resource.ResourceClaimSpec{ - Controller: "dra.example.com", - }, -} - -var objWithGatedStatusFields = &resource.ResourceClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "valid-claim", - Namespace: "default", - }, - Spec: resource.ResourceClaimSpec{ - Controller: "dra.example.com", - }, - Status: resource.ResourceClaimStatus{ - Allocation: &resource.AllocationResult{ - Controller: "dra.example.com", - }, - DeallocationRequested: true, - }, -} - func TestStrategy(t *testing.T) { if !Strategy.NamespaceScoped() { t.Errorf("ResourceClaim must be namespace scoped") @@ -85,10 +46,9 @@ func TestStrategyCreate(t *testing.T) { ctx := genericapirequest.NewDefaultContext() testcases := map[string]struct { - obj *resource.ResourceClaim - controlPlaneController bool - expectValidationError bool - expectObj *resource.ResourceClaim + obj *resource.ResourceClaim + expectValidationError bool + expectObj *resource.ResourceClaim }{ "simple": { obj: obj, @@ -102,22 +62,10 @@ func TestStrategyCreate(t *testing.T) { }(), expectValidationError: true, }, - "drop-fields": { - obj: objWithGatedFields, - controlPlaneController: false, - expectObj: obj, - }, - "keep-fields": { - obj: objWithGatedFields, - controlPlaneController: true, - expectObj: objWithGatedFields, - }, } for name, tc := range testcases { t.Run(name, func(t *testing.T) { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAControlPlaneController, tc.controlPlaneController) - obj := tc.obj.DeepCopy() Strategy.PrepareForCreate(ctx, obj) if errs := Strategy.Validate(ctx, obj); len(errs) != 0 { @@ -141,11 +89,10 @@ func TestStrategyUpdate(t *testing.T) { ctx := genericapirequest.NewDefaultContext() testcases := map[string]struct { - oldObj *resource.ResourceClaim - newObj *resource.ResourceClaim - controlPlaneController bool - expectValidationError bool - expectObj *resource.ResourceClaim + oldObj *resource.ResourceClaim + newObj *resource.ResourceClaim + expectValidationError bool + expectObj *resource.ResourceClaim }{ "no-changes-okay": { oldObj: obj, @@ -161,29 +108,10 @@ func TestStrategyUpdate(t *testing.T) { }(), expectValidationError: true, }, - "drop-fields": { - oldObj: obj, - newObj: objWithGatedFields, - controlPlaneController: false, - expectObj: obj, - }, - "keep-fields": { - oldObj: obj, - newObj: objWithGatedFields, - controlPlaneController: true, - expectValidationError: true, // Spec is immutable. - }, - "keep-existing-fields": { - oldObj: objWithGatedFields, - newObj: objWithGatedFields, - controlPlaneController: false, - expectObj: objWithGatedFields, - }, } for name, tc := range testcases { t.Run(name, func(t *testing.T) { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAControlPlaneController, tc.controlPlaneController) oldObj := tc.oldObj.DeepCopy() newObj := tc.newObj.DeepCopy() newObj.ResourceVersion = "4" @@ -213,11 +141,10 @@ func TestStatusStrategyUpdate(t *testing.T) { ctx := genericapirequest.NewDefaultContext() testcases := map[string]struct { - oldObj *resource.ResourceClaim - newObj *resource.ResourceClaim - controlPlaneController bool - expectValidationError bool - expectObj *resource.ResourceClaim + oldObj *resource.ResourceClaim + newObj *resource.ResourceClaim + expectValidationError bool + expectObj *resource.ResourceClaim }{ "no-changes-okay": { oldObj: obj, @@ -245,51 +172,10 @@ func TestStatusStrategyUpdate(t *testing.T) { }(), expectObj: obj, }, - "drop-fields": { - oldObj: obj, - newObj: objWithGatedStatusFields, - controlPlaneController: false, - expectObj: objWithStatus, - }, - "keep-fields": { - oldObj: obj, - newObj: objWithGatedStatusFields, - controlPlaneController: true, - expectObj: func() *resource.ResourceClaim { - expectObj := objWithGatedStatusFields.DeepCopy() - // Spec remains unchanged. - expectObj.Spec = obj.Spec - return expectObj - }(), - }, - "keep-fields-because-of-spec": { - oldObj: objWithGatedFields, - newObj: objWithGatedStatusFields, - controlPlaneController: false, - expectObj: objWithGatedStatusFields, - }, - // Normally a claim without a controller in the spec shouldn't - // have one in the status either, but it's not invalid and thus - // let's test this. - "keep-fields-because-of-status": { - oldObj: func() *resource.ResourceClaim { - oldObj := objWithGatedStatusFields.DeepCopy() - oldObj.Spec.Controller = "" - return oldObj - }(), - newObj: objWithGatedStatusFields, - controlPlaneController: false, - expectObj: func() *resource.ResourceClaim { - oldObj := objWithGatedStatusFields.DeepCopy() - oldObj.Spec.Controller = "" - return oldObj - }(), - }, } for name, tc := range testcases { t.Run(name, func(t *testing.T) { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAControlPlaneController, tc.controlPlaneController) oldObj := tc.oldObj.DeepCopy() newObj := tc.newObj.DeepCopy() newObj.ResourceVersion = "4" diff --git a/pkg/registry/resource/rest/storage_resource.go b/pkg/registry/resource/rest/storage_resource.go index 5d5bab1d18a..a829abeff79 100644 --- a/pkg/registry/resource/rest/storage_resource.go +++ b/pkg/registry/resource/rest/storage_resource.go @@ -25,12 +25,15 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/resource" deviceclassstore "k8s.io/kubernetes/pkg/registry/resource/deviceclass/storage" - podschedulingcontextsstore "k8s.io/kubernetes/pkg/registry/resource/podschedulingcontext/storage" resourceclaimstore "k8s.io/kubernetes/pkg/registry/resource/resourceclaim/storage" resourceclaimtemplatestore "k8s.io/kubernetes/pkg/registry/resource/resourceclaimtemplate/storage" resourceslicestore "k8s.io/kubernetes/pkg/registry/resource/resourceslice/storage" ) +// The REST storage registers resource kinds also without the corresponding +// feature gate because it might be useful to provide access to these resources +// while their feature is off to allow cleaning them up. + type RESTStorageProvider struct{} func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (genericapiserver.APIGroupInfo, error) { @@ -75,19 +78,6 @@ func (p RESTStorageProvider) v1alpha3Storage(apiResourceConfigSource serverstora storage[resource] = resourceClaimTemplateStorage } - // Registered also without the corresponding DRAControlPlaneController feature gate for the - // same reasons as registering the other types without a feature gate check: it might be - // useful to provide access to these resources while their feature is off to allow cleaning - // them up. - if resource := "podschedulingcontexts"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha3.SchemeGroupVersion.WithResource(resource)) { - podSchedulingStorage, podSchedulingStatusStorage, err := podschedulingcontextsstore.NewREST(restOptionsGetter) - if err != nil { - return nil, err - } - storage[resource] = podSchedulingStorage - storage[resource+"/status"] = podSchedulingStatusStorage - } - if resource := "resourceslices"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha3.SchemeGroupVersion.WithResource(resource)) { resourceSliceStorage, err := resourceslicestore.NewREST(restOptionsGetter) if err != nil { diff --git a/pkg/scheduler/eventhandlers.go b/pkg/scheduler/eventhandlers.go index f385bd8bd2c..a00adee5625 100644 --- a/pkg/scheduler/eventhandlers.go +++ b/pkg/scheduler/eventhandlers.go @@ -525,15 +525,6 @@ func addAllEventHandlers( return err } handlers = append(handlers, handlerRegistration) - case framework.PodSchedulingContext: - if utilfeature.DefaultFeatureGate.Enabled(features.DRAControlPlaneController) { - if handlerRegistration, err = informerFactory.Resource().V1alpha3().PodSchedulingContexts().Informer().AddEventHandler( - buildEvtResHandler(at, framework.PodSchedulingContext, "PodSchedulingContext"), - ); err != nil { - return err - } - handlers = append(handlers, handlerRegistration) - } case framework.ResourceClaim: if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { handlerRegistration = resourceClaimCache.AddEventHandler( diff --git a/pkg/scheduler/eventhandlers_test.go b/pkg/scheduler/eventhandlers_test.go index 4110ca5d33c..89bd7e331b7 100644 --- a/pkg/scheduler/eventhandlers_test.go +++ b/pkg/scheduler/eventhandlers_test.go @@ -244,7 +244,6 @@ func TestAddAllEventHandlers(t *testing.T) { name string gvkMap map[framework.GVK]framework.ActionType enableDRA bool - enableClassicDRA bool expectStaticInformers map[reflect.Type]bool expectDynamicInformers map[schema.GroupVersionResource]bool }{ @@ -261,10 +260,9 @@ func TestAddAllEventHandlers(t *testing.T) { { name: "DRA events disabled", gvkMap: map[framework.GVK]framework.ActionType{ - framework.PodSchedulingContext: framework.Add, - framework.ResourceClaim: framework.Add, - framework.ResourceSlice: framework.Add, - framework.DeviceClass: framework.Add, + framework.ResourceClaim: framework.Add, + framework.ResourceSlice: framework.Add, + framework.DeviceClass: framework.Add, }, expectStaticInformers: map[reflect.Type]bool{ reflect.TypeOf(&v1.Pod{}): true, @@ -274,12 +272,11 @@ func TestAddAllEventHandlers(t *testing.T) { expectDynamicInformers: map[schema.GroupVersionResource]bool{}, }, { - name: "some DRA events enabled", + name: "all DRA events enabled", gvkMap: map[framework.GVK]framework.ActionType{ - framework.PodSchedulingContext: framework.Add, - framework.ResourceClaim: framework.Add, - framework.ResourceSlice: framework.Add, - framework.DeviceClass: framework.Add, + framework.ResourceClaim: framework.Add, + framework.ResourceSlice: framework.Add, + framework.DeviceClass: framework.Add, }, enableDRA: true, expectStaticInformers: map[reflect.Type]bool{ @@ -292,27 +289,6 @@ func TestAddAllEventHandlers(t *testing.T) { }, expectDynamicInformers: map[schema.GroupVersionResource]bool{}, }, - { - name: "all DRA events enabled", - gvkMap: map[framework.GVK]framework.ActionType{ - framework.PodSchedulingContext: framework.Add, - framework.ResourceClaim: framework.Add, - framework.ResourceSlice: framework.Add, - framework.DeviceClass: framework.Add, - }, - enableDRA: true, - enableClassicDRA: true, - expectStaticInformers: map[reflect.Type]bool{ - reflect.TypeOf(&v1.Pod{}): true, - reflect.TypeOf(&v1.Node{}): true, - reflect.TypeOf(&v1.Namespace{}): true, - reflect.TypeOf(&resourceapi.PodSchedulingContext{}): true, - reflect.TypeOf(&resourceapi.ResourceClaim{}): true, - reflect.TypeOf(&resourceapi.ResourceSlice{}): true, - reflect.TypeOf(&resourceapi.DeviceClass{}): true, - }, - expectDynamicInformers: map[schema.GroupVersionResource]bool{}, - }, { name: "add GVKs handlers defined in framework dynamically", gvkMap: map[framework.GVK]framework.ActionType{ @@ -372,7 +348,7 @@ func TestAddAllEventHandlers(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DynamicResourceAllocation, tt.enableDRA) - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAControlPlaneController, tt.enableClassicDRA) + logger, ctx := ktesting.NewTestContext(t) ctx, cancel := context.WithCancel(ctx) defer cancel() diff --git a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go index 2bee649a323..27ea690296e 100644 --- a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go +++ b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go @@ -21,7 +21,6 @@ import ( "errors" "fmt" "slices" - "sort" "sync" "github.com/google/go-cmp/cmp" @@ -32,10 +31,8 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" - resourceapiapply "k8s.io/client-go/applyconfigurations/resource/v1alpha3" "k8s.io/client-go/kubernetes" resourcelisters "k8s.io/client-go/listers/resource/v1alpha3" "k8s.io/client-go/util/retry" @@ -48,7 +45,6 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" schedutil "k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/util/assumecache" - "k8s.io/utils/ptr" ) const ( @@ -62,9 +58,6 @@ const ( // framework.CycleState, in the later phases we don't need to call Write method // to update the value type stateData struct { - // preScored is true if PreScore was invoked. - preScored bool - // A copy of all claims for the Pod (i.e. 1:1 match with // pod.Spec.ResourceClaims), initially with the status from the start // of the scheduling cycle. Each claim instance is read-only because it @@ -74,10 +67,6 @@ type stateData struct { // Empty if the Pod has no claims. claims []*resourceapi.ResourceClaim - // podSchedulingState keeps track of the PodSchedulingContext - // (if one exists) and the changes made to it. - podSchedulingState podSchedulingState - // Allocator handles claims with structured parameters. allocator *structured.Allocator @@ -104,181 +93,22 @@ func (d *stateData) Clone() framework.StateData { } type informationForClaim struct { - // Node selectors based on the claim status (single entry, key is empty) if allocated, - // otherwise the device class AvailableOnNodes selectors (potentially multiple entries, - // key is the device class name). - availableOnNodes map[string]*nodeaffinity.NodeSelector - - // The status of the claim got from the - // schedulingCtx by PreFilter for repeated - // evaluation in Filter. Nil for claim which don't have it. - status *resourceapi.ResourceClaimSchedulingStatus - - structuredParameters bool + // Node selector based on the claim status if allocated. + availableOnNodes *nodeaffinity.NodeSelector // Set by Reserved, published by PreBind. allocation *resourceapi.AllocationResult } -type podSchedulingState struct { - // A pointer to the PodSchedulingContext object for the pod, if one exists - // in the API server. - // - // Conceptually, this object belongs into the scheduler framework - // where it might get shared by different plugins. But in practice, - // it is currently only used by dynamic provisioning and thus - // managed entirely here. - schedulingCtx *resourceapi.PodSchedulingContext - - // selectedNode is set if (and only if) a node has been selected. - selectedNode *string - - // potentialNodes is set if (and only if) the potential nodes field - // needs to be updated or set. - potentialNodes *[]string -} - -func (p *podSchedulingState) isDirty() bool { - return p.selectedNode != nil || - p.potentialNodes != nil -} - -// init checks whether there is already a PodSchedulingContext object. -// Must not be called concurrently, -func (p *podSchedulingState) init(ctx context.Context, pod *v1.Pod, podSchedulingContextLister resourcelisters.PodSchedulingContextLister) error { - if podSchedulingContextLister == nil { - return nil - } - schedulingCtx, err := podSchedulingContextLister.PodSchedulingContexts(pod.Namespace).Get(pod.Name) - switch { - case apierrors.IsNotFound(err): - return nil - case err != nil: - return err - default: - // We have an object, but it might be obsolete. - if !metav1.IsControlledBy(schedulingCtx, pod) { - return fmt.Errorf("PodSchedulingContext object with UID %s is not owned by Pod %s/%s", schedulingCtx.UID, pod.Namespace, pod.Name) - } - } - p.schedulingCtx = schedulingCtx - return nil -} - -// publish creates or updates the PodSchedulingContext object, if necessary. -// Must not be called concurrently. -func (p *podSchedulingState) publish(ctx context.Context, pod *v1.Pod, clientset kubernetes.Interface) error { - if !p.isDirty() { - return nil - } - - var err error - logger := klog.FromContext(ctx) - if p.schedulingCtx != nil { - // Update it. - schedulingCtx := p.schedulingCtx.DeepCopy() - if p.selectedNode != nil { - schedulingCtx.Spec.SelectedNode = *p.selectedNode - } - if p.potentialNodes != nil { - schedulingCtx.Spec.PotentialNodes = *p.potentialNodes - } - if loggerV := logger.V(6); loggerV.Enabled() { - // At a high enough log level, dump the entire object. - loggerV.Info("Updating PodSchedulingContext", "podSchedulingCtx", klog.KObj(schedulingCtx), "podSchedulingCtxObject", klog.Format(schedulingCtx)) - } else { - logger.V(5).Info("Updating PodSchedulingContext", "podSchedulingCtx", klog.KObj(schedulingCtx)) - } - _, err = clientset.ResourceV1alpha3().PodSchedulingContexts(schedulingCtx.Namespace).Update(ctx, schedulingCtx, metav1.UpdateOptions{}) - if apierrors.IsConflict(err) { - // We don't use SSA by default for performance reasons - // (https://github.com/kubernetes/kubernetes/issues/113700#issuecomment-1698563918) - // because most of the time an Update doesn't encounter - // a conflict and is faster. - // - // We could return an error here and rely on - // backoff+retry, but scheduling attempts are expensive - // and the backoff delay would cause a (small) - // slowdown. Therefore we fall back to SSA here if needed. - // - // Using SSA instead of Get+Update has the advantage that - // there is no delay for the Get. SSA is safe because only - // the scheduler updates these fields. - spec := resourceapiapply.PodSchedulingContextSpec() - spec.SelectedNode = p.selectedNode - if p.potentialNodes != nil { - spec.PotentialNodes = *p.potentialNodes - } else { - // Unchanged. Has to be set because the object that we send - // must represent the "fully specified intent". Not sending - // the list would clear it. - spec.PotentialNodes = p.schedulingCtx.Spec.PotentialNodes - } - schedulingCtxApply := resourceapiapply.PodSchedulingContext(pod.Name, pod.Namespace).WithSpec(spec) - - if loggerV := logger.V(6); loggerV.Enabled() { - // At a high enough log level, dump the entire object. - loggerV.Info("Patching PodSchedulingContext", "podSchedulingCtx", klog.KObj(pod), "podSchedulingCtxApply", klog.Format(schedulingCtxApply)) - } else { - logger.V(5).Info("Patching PodSchedulingContext", "podSchedulingCtx", klog.KObj(pod)) - } - _, err = clientset.ResourceV1alpha3().PodSchedulingContexts(pod.Namespace).Apply(ctx, schedulingCtxApply, metav1.ApplyOptions{FieldManager: "kube-scheduler", Force: true}) - } - - } else { - // Create it. - schedulingCtx := &resourceapi.PodSchedulingContext{ - ObjectMeta: metav1.ObjectMeta{ - Name: pod.Name, - Namespace: pod.Namespace, - OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(pod, schema.GroupVersionKind{Version: "v1", Kind: "Pod"})}, - }, - } - if p.selectedNode != nil { - schedulingCtx.Spec.SelectedNode = *p.selectedNode - } - if p.potentialNodes != nil { - schedulingCtx.Spec.PotentialNodes = *p.potentialNodes - } - if loggerV := logger.V(6); loggerV.Enabled() { - // At a high enough log level, dump the entire object. - loggerV.Info("Creating PodSchedulingContext", "podSchedulingCtx", klog.KObj(schedulingCtx), "podSchedulingCtxObject", klog.Format(schedulingCtx)) - } else { - logger.V(5).Info("Creating PodSchedulingContext", "podSchedulingCtx", klog.KObj(schedulingCtx)) - } - _, err = clientset.ResourceV1alpha3().PodSchedulingContexts(schedulingCtx.Namespace).Create(ctx, schedulingCtx, metav1.CreateOptions{}) - } - if err != nil { - return err - } - p.potentialNodes = nil - p.selectedNode = nil - return nil -} - -func statusForClaim(schedulingCtx *resourceapi.PodSchedulingContext, podClaimName string) *resourceapi.ResourceClaimSchedulingStatus { - if schedulingCtx == nil { - return nil - } - for _, status := range schedulingCtx.Status.ResourceClaims { - if status.Name == podClaimName { - return &status - } - } - return nil -} - // dynamicResources is a plugin that ensures that ResourceClaims are allocated. type dynamicResources struct { - enabled bool - enableSchedulingQueueHint bool - controlPlaneControllerEnabled bool + enabled bool + enableSchedulingQueueHint bool - fh framework.Handle - clientset kubernetes.Interface - classLister resourcelisters.DeviceClassLister - podSchedulingContextLister resourcelisters.PodSchedulingContextLister // nil if and only if DRAControlPlaneController is disabled - sliceLister resourcelisters.ResourceSliceLister + fh framework.Handle + clientset kubernetes.Interface + classLister resourcelisters.DeviceClassLister + sliceLister resourcelisters.ResourceSliceLister // claimAssumeCache enables temporarily storing a newer claim object // while the scheduler has allocated it and the corresponding object @@ -344,9 +174,8 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe } pl := &dynamicResources{ - enabled: true, - controlPlaneControllerEnabled: fts.EnableDRAControlPlaneController, - enableSchedulingQueueHint: fts.EnableSchedulingQueueHint, + enabled: true, + enableSchedulingQueueHint: fts.EnableSchedulingQueueHint, fh: fh, clientset: fh.ClientSet(), @@ -354,9 +183,6 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe sliceLister: fh.SharedInformerFactory().Resource().V1alpha3().ResourceSlices().Lister(), claimAssumeCache: fh.ResourceClaimCache(), } - if pl.controlPlaneControllerEnabled { - pl.podSchedulingContextLister = fh.SharedInformerFactory().Resource().V1alpha3().PodSchedulingContexts().Lister() - } return pl, nil } @@ -365,11 +191,9 @@ var _ framework.PreEnqueuePlugin = &dynamicResources{} var _ framework.PreFilterPlugin = &dynamicResources{} var _ framework.FilterPlugin = &dynamicResources{} var _ framework.PostFilterPlugin = &dynamicResources{} -var _ framework.PreScorePlugin = &dynamicResources{} var _ framework.ReservePlugin = &dynamicResources{} var _ framework.EnqueueExtensions = &dynamicResources{} var _ framework.PreBindPlugin = &dynamicResources{} -var _ framework.PostBindPlugin = &dynamicResources{} // Name returns name of the plugin. It is used in logs, etc. func (pl *dynamicResources) Name() string { @@ -407,14 +231,6 @@ func (pl *dynamicResources) EventsToRegister(_ context.Context) ([]framework.Clu {Event: framework.ClusterEvent{Resource: framework.ResourceSlice, ActionType: framework.Add | framework.Update}, QueueingHintFn: pl.isSchedulableAfterResourceSliceChange}, } - if pl.podSchedulingContextLister != nil { - events = append(events, - // When a driver has provided additional information, a pod waiting for that information - // may be schedulable. - framework.ClusterEventWithHint{Event: framework.ClusterEvent{Resource: framework.PodSchedulingContext, ActionType: framework.Add | framework.Update}, QueueingHintFn: pl.isSchedulableAfterPodSchedulingContextChange}, - ) - } - return events, nil } @@ -461,13 +277,9 @@ func (pl *dynamicResources) isSchedulableAfterClaimChange(logger klog.Logger, po if originalClaim != nil && originalClaim.Status.Allocation != nil && - originalClaim.Status.Allocation.Controller == "" && modifiedClaim.Status.Allocation == nil { // A claim with structured parameters was deallocated. This might have made // resources available for other pods. - // - // TODO (https://github.com/kubernetes/kubernetes/issues/123697): - // check that the pending claims depend on structured parameters (depends on refactoring foreachPodResourceClaim, see other TODO). logger.V(6).Info("claim with structured parameters got deallocated", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim)) return framework.Queue, nil } @@ -560,129 +372,6 @@ func (pl *dynamicResources) isSchedulableAfterResourceSliceChange(logger klog.Lo return framework.Queue, nil } -// isSchedulableAfterPodSchedulingContextChange is invoked for all -// PodSchedulingContext events reported by an informer. It checks whether that -// change made a previously unschedulable pod schedulable (updated) or a new -// attempt is needed to re-create the object (deleted). It errs on the side of -// letting a pod scheduling attempt happen. -func (pl *dynamicResources) isSchedulableAfterPodSchedulingContextChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) { - // Deleted? That can happen because we ourselves delete the PodSchedulingContext while - // working on the pod. This can be ignored. - if oldObj != nil && newObj == nil { - logger.V(5).Info("PodSchedulingContext got deleted") - return framework.QueueSkip, nil - } - - oldPodScheduling, newPodScheduling, err := schedutil.As[*resourceapi.PodSchedulingContext](oldObj, newObj) - if err != nil { - // Shouldn't happen. - return framework.Queue, fmt.Errorf("unexpected object in isSchedulableAfterPodSchedulingContextChange: %w", err) - } - podScheduling := newPodScheduling // Never nil because deletes are handled above. - - if podScheduling.Name != pod.Name || podScheduling.Namespace != pod.Namespace { - logger.V(7).Info("PodSchedulingContext for unrelated pod got modified", "pod", klog.KObj(pod), "podScheduling", klog.KObj(podScheduling)) - return framework.QueueSkip, nil - } - - // If the drivers have provided information about all - // unallocated claims with delayed allocation, then the next - // scheduling attempt is able to pick a node, so we let it run - // immediately if this occurred for the first time, otherwise - // we allow backoff. - pendingDelayedClaims := 0 - if err := pl.foreachPodResourceClaim(pod, func(podResourceName string, claim *resourceapi.ResourceClaim) { - if claim.Status.Allocation == nil && - !podSchedulingHasClaimInfo(podScheduling, podResourceName) { - pendingDelayedClaims++ - } - }); err != nil { - // This is not an unexpected error: we know that - // foreachPodResourceClaim only returns errors for "not - // schedulable". - logger.V(5).Info("pod is not schedulable, keep waiting", "pod", klog.KObj(pod), "reason", err.Error()) - return framework.QueueSkip, nil - } - - // Some driver responses missing? - if pendingDelayedClaims > 0 { - // We could start a pod scheduling attempt to refresh the - // potential nodes list. But pod scheduling attempts are - // expensive and doing them too often causes the pod to enter - // backoff. Let's wait instead for all drivers to reply. - if loggerV := logger.V(6); loggerV.Enabled() { - loggerV.Info("PodSchedulingContext with missing resource claim information, keep waiting", "pod", klog.KObj(pod), "podSchedulingDiff", cmp.Diff(oldPodScheduling, podScheduling)) - } else { - logger.V(5).Info("PodSchedulingContext with missing resource claim information, keep waiting", "pod", klog.KObj(pod)) - } - return framework.QueueSkip, nil - } - - if oldPodScheduling == nil /* create */ || - len(oldPodScheduling.Status.ResourceClaims) < len(podScheduling.Status.ResourceClaims) /* new information and not incomplete (checked above) */ { - // This definitely is new information for the scheduler. Try again immediately. - logger.V(5).Info("PodSchedulingContext for pod has all required information, schedule immediately", "pod", klog.KObj(pod)) - return framework.Queue, nil - } - - // The other situation where the scheduler needs to do - // something immediately is when the selected node doesn't - // work: waiting in the backoff queue only helps eventually - // resources on the selected node become available again. It's - // much more likely, in particular when trying to fill up the - // cluster, that the choice simply didn't work out. The risk - // here is that in a situation where the cluster really is - // full, backoff won't be used because the scheduler keeps - // trying different nodes. This should not happen when it has - // full knowledge about resource availability (= - // PodSchedulingContext.*.UnsuitableNodes is complete) but may happen - // when it doesn't (= PodSchedulingContext.*.UnsuitableNodes had to be - // truncated). - // - // Truncation only happens for very large clusters and then may slow - // down scheduling, but should not break it completely. This is - // acceptable while DRA is alpha and will be investigated further - // before moving DRA to beta. - if podScheduling.Spec.SelectedNode != "" { - for _, claimStatus := range podScheduling.Status.ResourceClaims { - if slices.Contains(claimStatus.UnsuitableNodes, podScheduling.Spec.SelectedNode) { - logger.V(5).Info("PodSchedulingContext has unsuitable selected node, schedule immediately", "pod", klog.KObj(pod), "selectedNode", podScheduling.Spec.SelectedNode, "podResourceName", claimStatus.Name) - return framework.Queue, nil - } - } - } - - // Update with only the spec modified? - if oldPodScheduling != nil && - !apiequality.Semantic.DeepEqual(&oldPodScheduling.Spec, &podScheduling.Spec) && - apiequality.Semantic.DeepEqual(&oldPodScheduling.Status, &podScheduling.Status) { - logger.V(5).Info("PodSchedulingContext has only the scheduler spec changes, ignore the update", "pod", klog.KObj(pod)) - return framework.QueueSkip, nil - } - - // Once we get here, all changes which are known to require special responses - // have been checked for. Whatever the change was, we don't know exactly how - // to handle it and thus return Queue. This will cause the - // scheduler to treat the event as if no event hint callback had been provided. - // Developers who want to investigate this can enable a diff at log level 6. - if loggerV := logger.V(6); loggerV.Enabled() { - loggerV.Info("PodSchedulingContext for pod with unknown changes, maybe schedule", "pod", klog.KObj(pod), "podSchedulingDiff", cmp.Diff(oldPodScheduling, podScheduling)) - } else { - logger.V(5).Info("PodSchedulingContext for pod with unknown changes, maybe schedule", "pod", klog.KObj(pod)) - } - return framework.Queue, nil - -} - -func podSchedulingHasClaimInfo(podScheduling *resourceapi.PodSchedulingContext, podResourceName string) bool { - for _, claimStatus := range podScheduling.Status.ResourceClaims { - if claimStatus.Name == podResourceName { - return true - } - } - return false -} - // podResourceClaims returns the ResourceClaims for all pod.Spec.PodResourceClaims. func (pl *dynamicResources) podResourceClaims(pod *v1.Pod) ([]*resourceapi.ResourceClaim, error) { claims := make([]*resourceapi.ResourceClaim, 0, len(pod.Spec.ResourceClaims)) @@ -766,31 +455,11 @@ func (pl *dynamicResources) PreFilter(ctx context.Context, state *framework.Cycl return nil, framework.NewStatus(framework.Skip) } - // Fetch PodSchedulingContext, it's going to be needed when checking claims. - // Doesn't do anything when DRAControlPlaneController is disabled. - if err := s.podSchedulingState.init(ctx, pod, pl.podSchedulingContextLister); err != nil { - return nil, statusError(logger, err) - } - // All claims which the scheduler needs to allocate itself. allocateClaims := make([]*resourceapi.ResourceClaim, 0, len(claims)) s.informationsForClaim = make([]informationForClaim, len(claims)) for index, claim := range claims { - if claim.Spec.Controller != "" && - !pl.controlPlaneControllerEnabled { - // This keeps the pod as unschedulable until the - // scheduler gets restarted with "classic DRA" enabled - // or the claim gets replaced with one which doesn't - // need the feature. That is a cluster event that - // re-enqueues the pod. - return nil, statusUnschedulable(logger, "resourceclaim depends on disabled DRAControlPlaneController feature", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim)) - } - - if claim.Status.DeallocationRequested { - // This will get resolved by the resource driver. - return nil, statusUnschedulable(logger, "resourceclaim must be reallocated", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim)) - } if claim.Status.Allocation != nil && !resourceclaim.CanBeReserved(claim) && !resourceclaim.IsReservedForPod(pod, claim) { @@ -799,28 +468,21 @@ func (pl *dynamicResources) PreFilter(ctx context.Context, state *framework.Cycl } if claim.Status.Allocation != nil { - s.informationsForClaim[index].structuredParameters = claim.Status.Allocation.Controller == "" if claim.Status.Allocation.NodeSelector != nil { nodeSelector, err := nodeaffinity.NewNodeSelector(claim.Status.Allocation.NodeSelector) if err != nil { return nil, statusError(logger, err) } - s.informationsForClaim[index].availableOnNodes = map[string]*nodeaffinity.NodeSelector{"": nodeSelector} + s.informationsForClaim[index].availableOnNodes = nodeSelector } } else { - structuredParameters := claim.Spec.Controller == "" - s.informationsForClaim[index].structuredParameters = structuredParameters - if structuredParameters { - allocateClaims = append(allocateClaims, claim) + allocateClaims = append(allocateClaims, claim) - // Allocation in flight? Better wait for that - // to finish, see inFlightAllocations - // documentation for details. - if _, found := pl.inFlightAllocations.Load(claim.UID); found { - return nil, statusUnschedulable(logger, fmt.Sprintf("resource claim %s is in the process of being allocated", klog.KObj(claim))) - } - } else { - s.informationsForClaim[index].status = statusForClaim(s.podSchedulingState.schedulingCtx, pod.Spec.ResourceClaims[index].Name) + // Allocation in flight? Better wait for that + // to finish, see inFlightAllocations + // documentation for details. + if _, found := pl.inFlightAllocations.Load(claim.UID); found { + return nil, statusUnschedulable(logger, fmt.Sprintf("resource claim %s is in the process of being allocated", klog.KObj(claim))) } // Check all requests and device classes. If a class @@ -836,7 +498,7 @@ func (pl *dynamicResources) PreFilter(ctx context.Context, state *framework.Cycl return nil, statusError(logger, fmt.Errorf("request %s: unsupported request type", request.Name)) } - class, err := pl.classLister.Get(request.DeviceClassName) + _, err := pl.classLister.Get(request.DeviceClassName) if err != nil { // If the class cannot be retrieved, allocation cannot proceed. if apierrors.IsNotFound(err) { @@ -847,16 +509,6 @@ func (pl *dynamicResources) PreFilter(ctx context.Context, state *framework.Cycl // Other error, retry with backoff. return nil, statusError(logger, fmt.Errorf("request %s: look up device class: %w", request.Name, err)) } - if class.Spec.SuitableNodes != nil && !structuredParameters { - selector, err := nodeaffinity.NewNodeSelector(class.Spec.SuitableNodes) - if err != nil { - return nil, statusError(logger, err) - } - if s.informationsForClaim[index].availableOnNodes == nil { - s.informationsForClaim[index].availableOnNodes = make(map[string]*nodeaffinity.NodeSelector) - } - s.informationsForClaim[index].availableOnNodes[class.Name] = selector - } } } } @@ -953,35 +605,10 @@ func (pl *dynamicResources) Filter(ctx context.Context, cs *framework.CycleState for index, claim := range state.claims { logger.V(10).Info("filtering based on resource claims of the pod", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim)) - if claim.Status.Allocation != nil { - for _, nodeSelector := range state.informationsForClaim[index].availableOnNodes { - if !nodeSelector.Match(node) { - logger.V(5).Info("AvailableOnNodes does not match", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim)) - unavailableClaims = append(unavailableClaims, index) - break - } - } - continue - } - - if claim.Status.DeallocationRequested { - // We shouldn't get here. PreFilter already checked this. - return statusUnschedulable(logger, "resourceclaim must be reallocated", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim)) - } - - for className, nodeSelector := range state.informationsForClaim[index].availableOnNodes { - if !nodeSelector.Match(node) { - return statusUnschedulable(logger, "excluded by device class node filter", "pod", klog.KObj(pod), "node", klog.KObj(node), "deviceclass", klog.KRef("", className)) - } - } - - // Use information from control plane controller? - if status := state.informationsForClaim[index].status; status != nil { - for _, unsuitableNode := range status.UnsuitableNodes { - if node.Name == unsuitableNode { - return statusUnschedulable(logger, "resourceclaim cannot be allocated for the node (unsuitable)", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim), "unsuitablenodes", status.UnsuitableNodes) - } - } + // This node selector only gets set if the claim is allocated. + if nodeSelector := state.informationsForClaim[index].availableOnNodes; nodeSelector != nil && !nodeSelector.Match(node) { + logger.V(5).Info("allocation's node selector does not match", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim)) + unavailableClaims = append(unavailableClaims, index) } } @@ -1065,34 +692,10 @@ func (pl *dynamicResources) PostFilter(ctx context.Context, cs *framework.CycleS claim := state.claims[index] if len(claim.Status.ReservedFor) == 0 || len(claim.Status.ReservedFor) == 1 && claim.Status.ReservedFor[0].UID == pod.UID { - // Is the claim is handled by the builtin controller? - // Then we can simply clear the allocation. Once the - // claim informer catches up, the controllers will - // be notified about this change. - clearAllocation := state.informationsForClaim[index].structuredParameters - - // Before we tell a driver to deallocate a claim, we - // have to stop telling it to allocate. Otherwise, - // depending on timing, it will deallocate the claim, - // see a PodSchedulingContext with selected node, and - // allocate again for that same node. - if !clearAllocation && - state.podSchedulingState.schedulingCtx != nil && - state.podSchedulingState.schedulingCtx.Spec.SelectedNode != "" { - state.podSchedulingState.selectedNode = ptr.To("") - if err := state.podSchedulingState.publish(ctx, pod, pl.clientset); err != nil { - return nil, statusError(logger, err) - } - } - claim := claim.DeepCopy() claim.Status.ReservedFor = nil - if clearAllocation { - claim.Status.Allocation = nil - } else { - claim.Status.DeallocationRequested = true - } - logger.V(5).Info("Requesting deallocation of ResourceClaim", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim)) + claim.Status.Allocation = nil + logger.V(5).Info("Deallocation of ResourceClaim", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim)) if _, err := pl.clientset.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}); err != nil { return nil, statusError(logger, err) } @@ -1102,91 +705,6 @@ func (pl *dynamicResources) PostFilter(ctx context.Context, cs *framework.CycleS return nil, framework.NewStatus(framework.Unschedulable, "still not schedulable") } -// PreScore is passed a list of all nodes that would fit the pod. Not all -// claims are necessarily allocated yet, so here we can set the SuitableNodes -// field for those which are pending. -func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodes []*framework.NodeInfo) *framework.Status { - if !pl.enabled { - return nil - } - state, err := getStateData(cs) - if err != nil { - return statusError(klog.FromContext(ctx), err) - } - defer func() { - state.preScored = true - }() - if len(state.claims) == 0 { - return nil - } - - logger := klog.FromContext(ctx) - pending := false - for index, claim := range state.claims { - if claim.Status.Allocation == nil && - !state.informationsForClaim[index].structuredParameters { - pending = true - break - } - } - if !pending { - logger.V(5).Info("no pending claims with control plane controller", "pod", klog.KObj(pod)) - return nil - } - - if haveAllPotentialNodes(state.podSchedulingState.schedulingCtx, nodes) { - logger.V(5).Info("all potential nodes already set", "pod", klog.KObj(pod), "potentialnodes", klog.KObjSlice(nodes)) - return nil - } - - // Remember the potential nodes. The object will get created or - // updated in Reserve. This is both an optimization and - // covers the case that PreScore doesn't get called when there - // is only a single node. - logger.V(5).Info("remembering potential nodes", "pod", klog.KObj(pod), "potentialnodes", klog.KObjSlice(nodes)) - numNodes := len(nodes) - if numNodes > resourceapi.PodSchedulingNodeListMaxSize { - numNodes = resourceapi.PodSchedulingNodeListMaxSize - } - potentialNodes := make([]string, 0, numNodes) - if numNodes == len(nodes) { - // Copy all node names. - for _, node := range nodes { - potentialNodes = append(potentialNodes, node.Node().Name) - } - } else { - // Select a random subset of the nodes to comply with - // the PotentialNodes length limit. Randomization is - // done for us by Go which iterates over map entries - // randomly. - nodeNames := map[string]struct{}{} - for _, node := range nodes { - nodeNames[node.Node().Name] = struct{}{} - } - for nodeName := range nodeNames { - if len(potentialNodes) >= resourceapi.PodSchedulingNodeListMaxSize { - break - } - potentialNodes = append(potentialNodes, nodeName) - } - } - sort.Strings(potentialNodes) - state.podSchedulingState.potentialNodes = &potentialNodes - return nil -} - -func haveAllPotentialNodes(schedulingCtx *resourceapi.PodSchedulingContext, nodes []*framework.NodeInfo) bool { - if schedulingCtx == nil { - return false - } - for _, node := range nodes { - if !slices.Contains(schedulingCtx.Spec.PotentialNodes, node.Node().Name) { - return false - } - } - return true -} - // Reserve reserves claims for the pod. func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) { if !pl.enabled { @@ -1202,10 +720,8 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat logger := klog.FromContext(ctx) - numDelayedAllocationPending := 0 - numClaimsWithStatusInfo := 0 numClaimsWithAllocator := 0 - for index, claim := range state.claims { + for _, claim := range state.claims { if claim.Status.Allocation != nil { // Allocated, but perhaps not reserved yet. We checked in PreFilter that // the pod could reserve the claim. Instead of reserving here by @@ -1215,42 +731,14 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat continue } - // Do we use the allocator for it? - if state.informationsForClaim[index].structuredParameters { - numClaimsWithAllocator++ - continue - } - - // Must be delayed allocation with control plane controller. - numDelayedAllocationPending++ - - // Did the driver provide information that steered node - // selection towards a node that it can support? - if statusForClaim(state.podSchedulingState.schedulingCtx, pod.Spec.ResourceClaims[index].Name) != nil { - numClaimsWithStatusInfo++ - } + numClaimsWithAllocator++ } - if numDelayedAllocationPending == 0 && numClaimsWithAllocator == 0 { + if numClaimsWithAllocator == 0 { // Nothing left to do. return nil } - if !state.preScored && numDelayedAllocationPending > 0 { - // There was only one candidate that passed the Filters and - // therefore PreScore was not called. - // - // We need to ask whether that node is suitable, otherwise the - // scheduler will pick it forever even when it cannot satisfy - // the claim. - if state.podSchedulingState.schedulingCtx == nil || - !slices.Contains(state.podSchedulingState.schedulingCtx.Spec.PotentialNodes, nodeName) { - potentialNodes := []string{nodeName} - state.podSchedulingState.potentialNodes = &potentialNodes - logger.V(5).Info("asking for information about single potential node", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) - } - } - // Prepare allocation of claims handled by the schedulder. if state.allocator != nil { // Entries in these two slices match each other. @@ -1289,51 +777,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat } } - // When there is only one pending resource, we can go ahead with - // requesting allocation even when we don't have the information from - // the driver yet. Otherwise we wait for information before blindly - // making a decision that might have to be reversed later. - // - // If all pending claims are handled with the builtin controller, - // there is no need for a PodSchedulingContext change. - if numDelayedAllocationPending == 1 && numClaimsWithAllocator == 0 || - numClaimsWithStatusInfo+numClaimsWithAllocator == numDelayedAllocationPending && numClaimsWithAllocator < numDelayedAllocationPending { - // TODO: can we increase the chance that the scheduler picks - // the same node as before when allocation is on-going, - // assuming that that node still fits the pod? Picking a - // different node may lead to some claims being allocated for - // one node and others for another, which then would have to be - // resolved with deallocation. - if state.podSchedulingState.schedulingCtx == nil || - state.podSchedulingState.schedulingCtx.Spec.SelectedNode != nodeName { - state.podSchedulingState.selectedNode = &nodeName - logger.V(5).Info("start allocation", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) - // The actual publish happens in PreBind or Unreserve. - return nil - } - } - - // May have been modified earlier in PreScore or above. - if state.podSchedulingState.isDirty() { - // The actual publish happens in PreBind or Unreserve. - return nil - } - - // If all pending claims are handled with the builtin controller, then - // we can allow the pod to proceed. Allocating and reserving the claims - // will be done in PreBind. - if numDelayedAllocationPending == 0 { - return nil - } - - // More than one pending claim and not enough information about all of them. - // - // TODO: can or should we ensure that schedulingCtx gets aborted while - // waiting for resources *before* triggering delayed volume - // provisioning? On the one hand, volume provisioning is currently - // irreversible, so it better should come last. On the other hand, - // triggering both in parallel might be faster. - return statusPending(logger, "waiting for resource driver to provide information", "pod", klog.KObj(pod)) + return nil } // Unreserve clears the ReservedFor field for all claims. @@ -1352,24 +796,11 @@ func (pl *dynamicResources) Unreserve(ctx context.Context, cs *framework.CycleSt logger := klog.FromContext(ctx) - // Was publishing delayed? If yes, do it now. - // - // The most common scenario is that a different set of potential nodes - // was identified. This revised set needs to be published to enable DRA - // drivers to provide better guidance for future scheduling attempts. - if state.podSchedulingState.isDirty() { - if err := state.podSchedulingState.publish(ctx, pod, pl.clientset); err != nil { - logger.Error(err, "publish PodSchedulingContext") - } - } - for index, claim := range state.claims { // If allocation was in-flight, then it's not anymore and we need to revert the // claim object in the assume cache to what it was before. - if state.informationsForClaim[index].structuredParameters { - if _, found := pl.inFlightAllocations.LoadAndDelete(state.claims[index].UID); found { - pl.claimAssumeCache.Restore(claim.Namespace + "/" + claim.Name) - } + if _, found := pl.inFlightAllocations.LoadAndDelete(state.claims[index].UID); found { + pl.claimAssumeCache.Restore(claim.Namespace + "/" + claim.Name) } if claim.Status.Allocation != nil && @@ -1413,15 +844,6 @@ func (pl *dynamicResources) PreBind(ctx context.Context, cs *framework.CycleStat logger := klog.FromContext(ctx) - // Was publishing delayed? If yes, do it now and then cause binding to stop. - // This will not happen if all claims get handled by builtin controllers. - if state.podSchedulingState.isDirty() { - if err := state.podSchedulingState.publish(ctx, pod, pl.clientset); err != nil { - return statusError(logger, err) - } - return statusPending(logger, "waiting for resource driver", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) - } - for index, claim := range state.claims { if !resourceclaim.IsReservedForPod(pod, claim) { claim, err := pl.bindClaim(ctx, state, index, pod, nodeName) @@ -1523,39 +945,6 @@ func (pl *dynamicResources) bindClaim(ctx context.Context, state *stateData, ind return claim, nil } -// PostBind is called after a pod is successfully bound to a node. Now we are -// sure that a PodSchedulingContext object, if it exists, is definitely not going to -// be needed anymore and can delete it. This is a one-shot thing, there won't -// be any retries. This is okay because it should usually work and in those -// cases where it doesn't, the garbage collector will eventually clean up. -func (pl *dynamicResources) PostBind(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) { - if !pl.enabled { - return - } - state, err := getStateData(cs) - if err != nil { - return - } - if len(state.claims) == 0 { - return - } - - // We cannot know for sure whether the PodSchedulingContext object exists. We - // might have created it in the previous pod schedulingCtx cycle and not - // have it in our informer cache yet. Let's try to delete, just to be - // on the safe side. - logger := klog.FromContext(ctx) - err = pl.clientset.ResourceV1alpha3().PodSchedulingContexts(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}) - switch { - case apierrors.IsNotFound(err): - logger.V(5).Info("no PodSchedulingContext object to delete") - case err != nil: - logger.Error(err, "delete PodSchedulingContext") - default: - logger.V(5).Info("PodSchedulingContext object deleted") - } -} - // statusUnschedulable ensures that there is a log message associated with the // line where the status originated. func statusUnschedulable(logger klog.Logger, reason string, kv ...interface{}) *framework.Status { @@ -1569,21 +958,6 @@ func statusUnschedulable(logger klog.Logger, reason string, kv ...interface{}) * return framework.NewStatus(framework.UnschedulableAndUnresolvable, reason) } -// statusPending ensures that there is a log message associated with the -// line where the status originated. -func statusPending(logger klog.Logger, reason string, kv ...interface{}) *framework.Status { - if loggerV := logger.V(5); loggerV.Enabled() { - helper, loggerV := loggerV.WithCallStackHelper() - helper() - kv = append(kv, "reason", reason) - // nolint: logcheck // warns because it cannot check key/values - loggerV.Info("pod waiting for external component", kv...) - } - - // When we return Pending, we want to block the Pod at the same time. - return framework.NewStatus(framework.Pending, reason) -} - // statusError ensures that there is a log message associated with the // line where the error originated. func statusError(logger klog.Logger, err error, kv ...interface{}) *framework.Status { diff --git a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go index 2b18ed1b88e..1292f1ac680 100644 --- a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go +++ b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources_test.go @@ -38,7 +38,6 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" cgotesting "k8s.io/client-go/testing" - "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature" "k8s.io/kubernetes/pkg/scheduler/framework/runtime" @@ -54,8 +53,7 @@ var ( nodeName = "worker" node2Name = "worker-2" node3Name = "worker-3" - controller = "some-driver" - driver = controller + driver = "some-driver" podName = "my-pod" podUID = "1234" resourceName = "my-resource" @@ -120,7 +118,7 @@ var ( }, } - claim = st.MakeResourceClaim(controller). + claim = st.MakeResourceClaim(). Name(claimName). Namespace(namespace). Request(className). @@ -135,7 +133,6 @@ var ( Name(claimName2). Obj() allocationResult = &resourceapi.AllocationResult{ - Controller: controller, Devices: resourceapi.DeviceAllocationResult{ Results: []resourceapi.DeviceRequestAllocationResult{{ Driver: driver, @@ -148,28 +145,21 @@ var ( return st.MakeNodeSelector().In("metadata.name", []string{nodeName}, st.NodeSelectorTypeMatchFields).Obj() }(), } - deallocatingClaim = st.FromResourceClaim(pendingClaim). - Allocation(allocationResult). - DeallocationRequested(true). - Obj() inUseClaim = st.FromResourceClaim(pendingClaim). Allocation(allocationResult). ReservedForPod(podName, types.UID(podUID)). Obj() - structuredInUseClaim = st.FromResourceClaim(inUseClaim). - Structured(). - Obj() allocatedClaim = st.FromResourceClaim(pendingClaim). Allocation(allocationResult). Obj() allocatedClaimWithWrongTopology = st.FromResourceClaim(allocatedClaim). - Allocation(&resourceapi.AllocationResult{Controller: controller, NodeSelector: st.MakeNodeSelector().In("no-such-label", []string{"no-such-value"}, st.NodeSelectorTypeMatchExpressions).Obj()}). + Allocation(&resourceapi.AllocationResult{NodeSelector: st.MakeNodeSelector().In("no-such-label", []string{"no-such-value"}, st.NodeSelectorTypeMatchExpressions).Obj()}). Obj() allocatedClaimWithGoodTopology = st.FromResourceClaim(allocatedClaim). - Allocation(&resourceapi.AllocationResult{Controller: controller, NodeSelector: st.MakeNodeSelector().In("kubernetes.io/hostname", []string{nodeName}, st.NodeSelectorTypeMatchExpressions).Obj()}). + Allocation(&resourceapi.AllocationResult{NodeSelector: st.MakeNodeSelector().In("kubernetes.io/hostname", []string{nodeName}, st.NodeSelectorTypeMatchExpressions).Obj()}). Obj() - otherClaim = st.MakeResourceClaim(controller). + otherClaim = st.MakeResourceClaim(). Name("not-my-claim"). Namespace(namespace). Request(className). @@ -178,19 +168,6 @@ var ( Allocation(allocationResult). Obj() - scheduling = st.MakePodSchedulingContexts().Name(podName).Namespace(namespace). - OwnerReference(podName, podUID, podKind). - Obj() - schedulingPotential = st.FromPodSchedulingContexts(scheduling). - PotentialNodes(workerNode.Name). - Obj() - schedulingSelectedPotential = st.FromPodSchedulingContexts(schedulingPotential). - SelectedNode(workerNode.Name). - Obj() - schedulingInfo = st.FromPodSchedulingContexts(schedulingPotential). - ResourceClaims(resourceapi.ResourceClaimSchedulingStatus{Name: resourceName}, - resourceapi.ResourceClaimSchedulingStatus{Name: resourceName2}). - Obj() resourceSlice = st.MakeResourceSlice(nodeName, driver).Device("instance-1", nil).Obj() resourceSliceUpdated = st.FromResourceSlice(resourceSlice).Device("instance-1", map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{attrName: {BoolValue: ptr.To(true)}}).Obj() ) @@ -201,12 +178,6 @@ func reserve(claim *resourceapi.ResourceClaim, pod *v1.Pod) *resourceapi.Resourc Obj() } -func structuredClaim(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { - return st.FromResourceClaim(claim). - Structured(). - Obj() -} - func breakCELInClaim(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { claim = claim.DeepCopy() for i := range claim.Spec.Devices.Requests { @@ -260,8 +231,7 @@ type result struct { // functions will get called for all objects of that type. If they needs to // make changes only to a particular instance, then it must check the name. type change struct { - scheduling func(*resourceapi.PodSchedulingContext) *resourceapi.PodSchedulingContext - claim func(*resourceapi.ResourceClaim) *resourceapi.ResourceClaim + claim func(*resourceapi.ResourceClaim) *resourceapi.ResourceClaim } type perNodeResult map[string]result @@ -310,11 +280,10 @@ type prepare struct { func TestPlugin(t *testing.T) { testcases := map[string]struct { - nodes []*v1.Node // default if unset is workerNode - pod *v1.Pod - claims []*resourceapi.ResourceClaim - classes []*resourceapi.DeviceClass - schedulings []*resourceapi.PodSchedulingContext + nodes []*v1.Node // default if unset is workerNode + pod *v1.Pod + claims []*resourceapi.ResourceClaim + classes []*resourceapi.DeviceClass // objs get stored directly in the fake client, without passing // through reactors, in contrast to the types above. @@ -325,8 +294,7 @@ func TestPlugin(t *testing.T) { // Feature gates. False is chosen so that the uncommon case // doesn't need to be set. - disableDRA bool - disableClassicDRA bool + disableDRA bool }{ "empty": { pod: st.MakePod().Name("foo").Namespace("default").Obj(), @@ -356,23 +324,6 @@ func TestPlugin(t *testing.T) { }, }, }, - "claim-reference-structured": { - pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{structuredClaim(allocatedClaim), otherClaim}, - want: want{ - prebind: result{ - changes: change{ - claim: func(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { - if claim.Name == claimName { - claim = claim.DeepCopy() - claim.Status.ReservedFor = inUseClaim.Status.ReservedFor - } - return claim - }, - }, - }, - }, - }, "claim-template": { pod: podWithClaimTemplateInStatus, claims: []*resourceapi.ResourceClaim{allocatedClaim, otherClaim}, @@ -390,23 +341,6 @@ func TestPlugin(t *testing.T) { }, }, }, - "claim-template-structured": { - pod: podWithClaimTemplateInStatus, - claims: []*resourceapi.ResourceClaim{structuredClaim(allocatedClaim), otherClaim}, - want: want{ - prebind: result{ - changes: change{ - claim: func(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { - if claim.Name == claimName { - claim = claim.DeepCopy() - claim.Status.ReservedFor = inUseClaim.Status.ReservedFor - } - return claim - }, - }, - }, - }, - }, "missing-claim": { pod: podWithClaimTemplate, // status not set claims: []*resourceapi.ResourceClaim{allocatedClaim, otherClaim}, @@ -442,9 +376,9 @@ func TestPlugin(t *testing.T) { }, }, }, - "structured-no-resources": { + "no-resources": { pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{structuredClaim(pendingClaim)}, + claims: []*resourceapi.ResourceClaim{pendingClaim}, classes: []*resourceapi.DeviceClass{deviceClass}, want: want{ filter: perNodeResult{ @@ -457,72 +391,72 @@ func TestPlugin(t *testing.T) { }, }, }, - "structured-with-resources": { + "with-resources": { pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{structuredClaim(pendingClaim)}, + claims: []*resourceapi.ResourceClaim{pendingClaim}, classes: []*resourceapi.DeviceClass{deviceClass}, objs: []apiruntime.Object{workerNodeSlice}, want: want{ reserve: result{ - inFlightClaim: structuredClaim(allocatedClaim), + inFlightClaim: allocatedClaim, }, prebind: result{ - assumedClaim: reserve(structuredClaim(allocatedClaim), podWithClaimName), + assumedClaim: reserve(allocatedClaim, podWithClaimName), changes: change{ claim: func(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { if claim.Name == claimName { claim = claim.DeepCopy() - claim.Finalizers = structuredClaim(allocatedClaim).Finalizers - claim.Status = structuredClaim(inUseClaim).Status + claim.Finalizers = allocatedClaim.Finalizers + claim.Status = inUseClaim.Status } return claim }, }, }, postbind: result{ - assumedClaim: reserve(structuredClaim(allocatedClaim), podWithClaimName), + assumedClaim: reserve(allocatedClaim, podWithClaimName), }, }, }, - "structured-with-resources-has-finalizer": { + "with-resources-has-finalizer": { // As before. but the finalizer is already set. Could happen if // the scheduler got interrupted. pod: podWithClaimName, claims: func() []*resourceapi.ResourceClaim { - claim := structuredClaim(pendingClaim) - claim.Finalizers = structuredClaim(allocatedClaim).Finalizers + claim := pendingClaim + claim.Finalizers = allocatedClaim.Finalizers return []*resourceapi.ResourceClaim{claim} }(), classes: []*resourceapi.DeviceClass{deviceClass}, objs: []apiruntime.Object{workerNodeSlice}, want: want{ reserve: result{ - inFlightClaim: structuredClaim(allocatedClaim), + inFlightClaim: allocatedClaim, }, prebind: result{ - assumedClaim: reserve(structuredClaim(allocatedClaim), podWithClaimName), + assumedClaim: reserve(allocatedClaim, podWithClaimName), changes: change{ claim: func(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { if claim.Name == claimName { claim = claim.DeepCopy() - claim.Status = structuredInUseClaim.Status + claim.Status = inUseClaim.Status } return claim }, }, }, postbind: result{ - assumedClaim: reserve(structuredClaim(allocatedClaim), podWithClaimName), + assumedClaim: reserve(allocatedClaim, podWithClaimName), }, }, }, - "structured-with-resources-finalizer-gets-removed": { + "with-resources-finalizer-gets-removed": { // As before. but the finalizer is already set. Then it gets // removed before the scheduler reaches PreBind. pod: podWithClaimName, claims: func() []*resourceapi.ResourceClaim { - claim := structuredClaim(pendingClaim) - claim.Finalizers = structuredClaim(allocatedClaim).Finalizers + claim := pendingClaim + claim.Finalizers = allocatedClaim.Finalizers return []*resourceapi.ResourceClaim{claim} }(), classes: []*resourceapi.DeviceClass{deviceClass}, @@ -537,77 +471,77 @@ func TestPlugin(t *testing.T) { }, want: want{ reserve: result{ - inFlightClaim: structuredClaim(allocatedClaim), + inFlightClaim: allocatedClaim, }, prebind: result{ - assumedClaim: reserve(structuredClaim(allocatedClaim), podWithClaimName), + assumedClaim: reserve(allocatedClaim, podWithClaimName), changes: change{ claim: func(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { if claim.Name == claimName { claim = claim.DeepCopy() - claim.Finalizers = structuredClaim(allocatedClaim).Finalizers - claim.Status = structuredInUseClaim.Status + claim.Finalizers = allocatedClaim.Finalizers + claim.Status = inUseClaim.Status } return claim }, }, }, postbind: result{ - assumedClaim: reserve(structuredClaim(allocatedClaim), podWithClaimName), + assumedClaim: reserve(allocatedClaim, podWithClaimName), }, }, }, - "structured-with-resources-finalizer-gets-added": { + "with-resources-finalizer-gets-added": { // No finalizer initially, then it gets added before // the scheduler reaches PreBind. Shouldn't happen? pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{structuredClaim(pendingClaim)}, + claims: []*resourceapi.ResourceClaim{pendingClaim}, classes: []*resourceapi.DeviceClass{deviceClass}, objs: []apiruntime.Object{workerNodeSlice}, prepare: prepare{ prebind: change{ claim: func(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { - claim.Finalizers = structuredClaim(allocatedClaim).Finalizers + claim.Finalizers = allocatedClaim.Finalizers return claim }, }, }, want: want{ reserve: result{ - inFlightClaim: structuredClaim(allocatedClaim), + inFlightClaim: allocatedClaim, }, prebind: result{ - assumedClaim: reserve(structuredClaim(allocatedClaim), podWithClaimName), + assumedClaim: reserve(allocatedClaim, podWithClaimName), changes: change{ claim: func(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { if claim.Name == claimName { claim = claim.DeepCopy() - claim.Status = structuredInUseClaim.Status + claim.Status = inUseClaim.Status } return claim }, }, }, postbind: result{ - assumedClaim: reserve(structuredClaim(allocatedClaim), podWithClaimName), + assumedClaim: reserve(allocatedClaim, podWithClaimName), }, }, }, - "structured-skip-bind": { + "skip-bind": { pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{structuredClaim(pendingClaim)}, + claims: []*resourceapi.ResourceClaim{pendingClaim}, classes: []*resourceapi.DeviceClass{deviceClass}, objs: []apiruntime.Object{workerNodeSlice}, want: want{ reserve: result{ - inFlightClaim: structuredClaim(allocatedClaim), + inFlightClaim: allocatedClaim, }, unreserveBeforePreBind: &result{}, }, }, - "structured-exhausted-resources": { + "exhausted-resources": { pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{structuredClaim(pendingClaim), structuredClaim(otherAllocatedClaim)}, + claims: []*resourceapi.ResourceClaim{pendingClaim, otherAllocatedClaim}, classes: []*resourceapi.DeviceClass{deviceClass}, objs: []apiruntime.Object{workerNodeSlice}, want: want{ @@ -624,7 +558,7 @@ func TestPlugin(t *testing.T) { "claim-parameters-CEL-runtime-error": { pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{breakCELInClaim(structuredClaim(pendingClaim))}, + claims: []*resourceapi.ResourceClaim{breakCELInClaim(pendingClaim)}, classes: []*resourceapi.DeviceClass{deviceClass}, objs: []apiruntime.Object{workerNodeSlice}, want: want{ @@ -638,7 +572,7 @@ func TestPlugin(t *testing.T) { "class-parameters-CEL-runtime-error": { pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{structuredClaim(pendingClaim)}, + claims: []*resourceapi.ResourceClaim{pendingClaim}, classes: []*resourceapi.DeviceClass{breakCELInClass(deviceClass)}, objs: []apiruntime.Object{workerNodeSlice}, want: want{ @@ -658,7 +592,7 @@ func TestPlugin(t *testing.T) { "CEL-runtime-error-for-one-of-two-nodes": { nodes: []*v1.Node{workerNode, workerNode2}, pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{breakCELInClaim(structuredClaim(pendingClaim))}, + claims: []*resourceapi.ResourceClaim{breakCELInClaim(pendingClaim)}, classes: []*resourceapi.DeviceClass{deviceClass}, objs: []apiruntime.Object{workerNodeSlice, workerNode2Slice}, want: want{ @@ -674,7 +608,7 @@ func TestPlugin(t *testing.T) { "CEL-runtime-error-for-one-of-three-nodes": { nodes: []*v1.Node{workerNode, workerNode2, workerNode3}, pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{breakCELInClaim(structuredClaim(pendingClaim))}, + claims: []*resourceapi.ResourceClaim{breakCELInClaim(pendingClaim)}, classes: []*resourceapi.DeviceClass{deviceClass}, objs: []apiruntime.Object{workerNodeSlice, workerNode2Slice, workerNode3Slice}, want: want{ @@ -690,18 +624,6 @@ func TestPlugin(t *testing.T) { }, }, - "waiting-for-deallocation": { - pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{deallocatingClaim}, - want: want{ - prefilter: result{ - status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `resourceclaim must be reallocated`), - }, - postfilter: result{ - status: framework.NewStatus(framework.Unschedulable, `no new claims to deallocate`), - }, - }, - }, "missing-class": { pod: podWithClaimName, claims: []*resourceapi.ResourceClaim{pendingClaim}, @@ -714,99 +636,6 @@ func TestPlugin(t *testing.T) { }, }, }, - "scheduling-select-immediately": { - // Create the PodSchedulingContext object, ask for information - // and select a node. - pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{pendingClaim}, - classes: []*resourceapi.DeviceClass{deviceClass}, - want: want{ - prebind: result{ - status: framework.NewStatus(framework.Pending, `waiting for resource driver`), - added: []metav1.Object{schedulingSelectedPotential}, - }, - }, - }, - "scheduling-ask": { - // Create the PodSchedulingContext object, ask for - // information, but do not select a node because - // there are multiple claims. - pod: podWithTwoClaimNames, - claims: []*resourceapi.ResourceClaim{pendingClaim, pendingClaim2}, - classes: []*resourceapi.DeviceClass{deviceClass}, - want: want{ - prebind: result{ - status: framework.NewStatus(framework.Pending, `waiting for resource driver`), - added: []metav1.Object{schedulingPotential}, - }, - }, - }, - "scheduling-finish": { - // Use the populated PodSchedulingContext object to select a - // node. - pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{pendingClaim}, - schedulings: []*resourceapi.PodSchedulingContext{schedulingInfo}, - classes: []*resourceapi.DeviceClass{deviceClass}, - want: want{ - prebind: result{ - status: framework.NewStatus(framework.Pending, `waiting for resource driver`), - changes: change{ - scheduling: func(in *resourceapi.PodSchedulingContext) *resourceapi.PodSchedulingContext { - return st.FromPodSchedulingContexts(in). - SelectedNode(workerNode.Name). - Obj() - }, - }, - }, - }, - }, - "scheduling-finish-concurrent-label-update": { - // Use the populated PodSchedulingContext object to select a - // node. - pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{pendingClaim}, - schedulings: []*resourceapi.PodSchedulingContext{schedulingInfo}, - classes: []*resourceapi.DeviceClass{deviceClass}, - prepare: prepare{ - prebind: change{ - scheduling: func(in *resourceapi.PodSchedulingContext) *resourceapi.PodSchedulingContext { - // This does not actually conflict with setting the - // selected node, but because the plugin is not using - // patching yet, Update nonetheless fails. - return st.FromPodSchedulingContexts(in). - Label("hello", "world"). - Obj() - }, - }, - }, - want: want{ - prebind: result{ - status: framework.AsStatus(errors.New(`ResourceVersion must match the object that gets updated`)), - }, - }, - }, - "scheduling-completed": { - // Remove PodSchedulingContext object once the pod is scheduled. - pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{allocatedClaim}, - schedulings: []*resourceapi.PodSchedulingContext{schedulingInfo}, - classes: []*resourceapi.DeviceClass{deviceClass}, - want: want{ - prebind: result{ - changes: change{ - claim: func(in *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { - return st.FromResourceClaim(in). - ReservedFor(resourceapi.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: types.UID(podUID)}). - Obj() - }, - }, - }, - postbind: result{ - removed: []metav1.Object{schedulingInfo}, - }, - }, - }, "wrong-topology": { // PostFilter tries to get the pod scheduleable by // deallocating the claim. @@ -819,31 +648,7 @@ func TestPlugin(t *testing.T) { }, }, postfilter: result{ - // Claims with delayed allocation get deallocated. - changes: change{ - claim: func(in *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { - return st.FromResourceClaim(in). - DeallocationRequested(true). - Obj() - }, - }, - status: framework.NewStatus(framework.Unschedulable, `deallocation of ResourceClaim completed`), - }, - }, - }, - "wrong-topology-structured": { - // PostFilter tries to get the pod scheduleable by - // deallocating the claim. - pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{structuredClaim(allocatedClaimWithWrongTopology)}, - want: want{ - filter: perNodeResult{ - workerNode.Name: { - status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `resourceclaim not available on the node`), - }, - }, - postfilter: result{ - // Claims with delayed allocation and structured parameters get deallocated immediately. + // Claims get deallocated immediately. changes: change{ claim: func(in *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { return st.FromResourceClaim(in). @@ -894,30 +699,6 @@ func TestPlugin(t *testing.T) { }, }, }, - "bind-failure-structured": { - pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{structuredClaim(allocatedClaimWithGoodTopology)}, - want: want{ - prebind: result{ - changes: change{ - claim: func(in *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { - return st.FromResourceClaim(in). - ReservedFor(resourceapi.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: types.UID(podUID)}). - Obj() - }, - }, - }, - unreserveAfterBindFailure: &result{ - changes: change{ - claim: func(in *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { - out := in.DeepCopy() - out.Status.ReservedFor = []resourceapi.ResourceClaimConsumerReference{} - return out - }, - }, - }, - }, - }, "reserved-okay": { pod: podWithClaimName, claims: []*resourceapi.ResourceClaim{inUseClaim}, @@ -945,9 +726,8 @@ func TestPlugin(t *testing.T) { } features := feature.Features{ EnableDynamicResourceAllocation: !tc.disableDRA, - EnableDRAControlPlaneController: !tc.disableClassicDRA, } - testCtx := setup(t, nodes, tc.claims, tc.classes, tc.schedulings, tc.objs, features) + testCtx := setup(t, nodes, tc.claims, tc.classes, tc.objs, features) initialObjects := testCtx.listAll(t) status := testCtx.p.PreEnqueue(testCtx.ctx, tc.pod) @@ -996,13 +776,6 @@ func TestPlugin(t *testing.T) { if !unschedulable && len(potentialNodes) > 1 { initialObjects = testCtx.listAll(t) initialObjects = testCtx.updateAPIServer(t, initialObjects, tc.prepare.prescore) - status := testCtx.p.PreScore(testCtx.ctx, testCtx.state, tc.pod, potentialNodes) - t.Run("prescore", func(t *testing.T) { - testCtx.verify(t, tc.want.prescore, initialObjects, nil, status) - }) - if status.Code() != framework.Success { - unschedulable = true - } } var selectedNode *framework.NodeInfo @@ -1054,10 +827,6 @@ func TestPlugin(t *testing.T) { } else if status.IsSuccess() { initialObjects = testCtx.listAll(t) initialObjects = testCtx.updateAPIServer(t, initialObjects, tc.prepare.postbind) - testCtx.p.PostBind(testCtx.ctx, testCtx.state, tc.pod, selectedNode.Node().Name) - t.Run("postbind", func(t *testing.T) { - testCtx.verify(t, tc.want.postbind, initialObjects, nil, nil) - }) } } } else if len(potentialNodes) == 0 { @@ -1137,13 +906,6 @@ func (tc *testContext) listAll(t *testing.T) (objects []metav1.Object) { claim := claim objects = append(objects, &claim) } - schedulings, err := tc.client.ResourceV1alpha3().PodSchedulingContexts("").List(tc.ctx, metav1.ListOptions{}) - require.NoError(t, err, "list pod scheduling") - for _, scheduling := range schedulings.Items { - scheduling := scheduling - objects = append(objects, &scheduling) - } - sortObjects(objects) return } @@ -1189,12 +951,6 @@ func (tc *testContext) updateAPIServer(t *testing.T, objects []metav1.Object, up t.Fatalf("unexpected error during prepare update: %v", err) } modified[i] = obj - case *resourceapi.PodSchedulingContext: - obj, err := tc.client.ResourceV1alpha3().PodSchedulingContexts(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{}) - if err != nil { - t.Fatalf("unexpected error during prepare update: %v", err) - } - modified[i] = obj default: t.Fatalf("unsupported object type %T", obj) } @@ -1224,10 +980,6 @@ func update(t *testing.T, objects []metav1.Object, updates change) []metav1.Obje if updates.claim != nil { obj = updates.claim(in) } - case *resourceapi.PodSchedulingContext: - if updates.scheduling != nil { - obj = updates.scheduling(in) - } } updated = append(updated, obj) } @@ -1235,7 +987,7 @@ func update(t *testing.T, objects []metav1.Object, updates change) []metav1.Obje return updated } -func setup(t *testing.T, nodes []*v1.Node, claims []*resourceapi.ResourceClaim, classes []*resourceapi.DeviceClass, schedulings []*resourceapi.PodSchedulingContext, objs []apiruntime.Object, features feature.Features) (result *testContext) { +func setup(t *testing.T, nodes []*v1.Node, claims []*resourceapi.ResourceClaim, classes []*resourceapi.DeviceClass, objs []apiruntime.Object, features feature.Features) (result *testContext) { t.Helper() tc := &testContext{} @@ -1274,10 +1026,6 @@ func setup(t *testing.T, nodes []*v1.Node, claims []*resourceapi.ResourceClaim, _, err := tc.client.ResourceV1alpha3().DeviceClasses().Create(tc.ctx, class, metav1.CreateOptions{}) require.NoError(t, err, "create resource class") } - for _, scheduling := range schedulings { - _, err := tc.client.ResourceV1alpha3().PodSchedulingContexts(scheduling.Namespace).Create(tc.ctx, scheduling, metav1.CreateOptions{}) - require.NoError(t, err, "create pod scheduling") - } tc.informerFactory.Start(tc.ctx.Done()) t.Cleanup(func() { @@ -1434,17 +1182,15 @@ func Test_isSchedulableAfterClaimChange(t *testing.T) { }(), wantHint: framework.Queue, }, - "structured-claim-deallocate": { + "claim-deallocate": { pod: podWithClaimName, - claims: []*resourceapi.ResourceClaim{pendingClaim, structuredClaim(otherAllocatedClaim)}, - oldObj: structuredClaim(otherAllocatedClaim), + claims: []*resourceapi.ResourceClaim{pendingClaim, otherAllocatedClaim}, + oldObj: otherAllocatedClaim, newObj: func() *resourceapi.ResourceClaim { - claim := structuredClaim(otherAllocatedClaim).DeepCopy() + claim := otherAllocatedClaim.DeepCopy() claim.Status.Allocation = nil return claim }(), - // TODO (https://github.com/kubernetes/kubernetes/issues/123697): don't wake up - // claims not using structured parameters. wantHint: framework.Queue, }, } @@ -1455,7 +1201,7 @@ func Test_isSchedulableAfterClaimChange(t *testing.T) { features := feature.Features{ EnableDynamicResourceAllocation: true, } - testCtx := setup(t, nil, tc.claims, nil, nil, nil, features) + testCtx := setup(t, nil, tc.claims, nil, nil, features) oldObj := tc.oldObj newObj := tc.newObj if claim, ok := tc.newObj.(*resourceapi.ResourceClaim); ok { @@ -1578,7 +1324,7 @@ func Test_isSchedulableAfterPodChange(t *testing.T) { features := feature.Features{ EnableDynamicResourceAllocation: true, } - testCtx := setup(t, nil, tc.claims, nil, nil, tc.objs, features) + testCtx := setup(t, nil, tc.claims, nil, tc.objs, features) gotHint, err := testCtx.p.isSchedulableAfterPodChange(logger, tc.pod, nil, tc.obj) if tc.wantErr { if err == nil { @@ -1597,145 +1343,6 @@ func Test_isSchedulableAfterPodChange(t *testing.T) { } } -func Test_isSchedulableAfterPodSchedulingContextChange(t *testing.T) { - testcases := map[string]struct { - pod *v1.Pod - schedulings []*resourceapi.PodSchedulingContext - claims []*resourceapi.ResourceClaim - oldObj, newObj interface{} - wantHint framework.QueueingHint - wantErr bool - }{ - "skip-deleted": { - pod: podWithClaimTemplate, - oldObj: scheduling, - wantHint: framework.QueueSkip, - }, - "skip-missed-deleted": { - pod: podWithClaimTemplate, - oldObj: cache.DeletedFinalStateUnknown{ - Obj: scheduling, - }, - wantHint: framework.QueueSkip, - }, - "backoff-wrong-old-object": { - pod: podWithClaimTemplate, - oldObj: "not-a-scheduling-context", - newObj: scheduling, - wantErr: true, - }, - "backoff-missed-wrong-old-object": { - pod: podWithClaimTemplate, - oldObj: cache.DeletedFinalStateUnknown{ - Obj: "not-a-scheduling-context", - }, - newObj: scheduling, - wantErr: true, - }, - "skip-unrelated-object": { - pod: podWithClaimTemplate, - claims: []*resourceapi.ResourceClaim{pendingClaim}, - newObj: func() *resourceapi.PodSchedulingContext { - scheduling := scheduling.DeepCopy() - scheduling.Name += "-foo" - return scheduling - }(), - wantHint: framework.QueueSkip, - }, - "backoff-wrong-new-object": { - pod: podWithClaimTemplate, - oldObj: scheduling, - newObj: "not-a-scheduling-context", - wantErr: true, - }, - "skip-missing-claim": { - pod: podWithClaimTemplate, - oldObj: scheduling, - newObj: schedulingInfo, - wantHint: framework.QueueSkip, - }, - "skip-missing-infos": { - pod: podWithClaimTemplateInStatus, - claims: []*resourceapi.ResourceClaim{pendingClaim}, - oldObj: scheduling, - newObj: scheduling, - wantHint: framework.QueueSkip, - }, - "queue-new-infos": { - pod: podWithClaimTemplateInStatus, - claims: []*resourceapi.ResourceClaim{pendingClaim}, - oldObj: scheduling, - newObj: schedulingInfo, - wantHint: framework.Queue, - }, - "queue-bad-selected-node": { - pod: podWithClaimTemplateInStatus, - claims: []*resourceapi.ResourceClaim{pendingClaim}, - oldObj: func() *resourceapi.PodSchedulingContext { - scheduling := schedulingInfo.DeepCopy() - scheduling.Spec.SelectedNode = workerNode.Name - return scheduling - }(), - newObj: func() *resourceapi.PodSchedulingContext { - scheduling := schedulingInfo.DeepCopy() - scheduling.Spec.SelectedNode = workerNode.Name - scheduling.Status.ResourceClaims[0].UnsuitableNodes = append(scheduling.Status.ResourceClaims[0].UnsuitableNodes, scheduling.Spec.SelectedNode) - return scheduling - }(), - wantHint: framework.Queue, - }, - "skip-spec-changes": { - pod: podWithClaimTemplateInStatus, - claims: []*resourceapi.ResourceClaim{pendingClaim}, - oldObj: schedulingInfo, - newObj: func() *resourceapi.PodSchedulingContext { - scheduling := schedulingInfo.DeepCopy() - scheduling.Spec.SelectedNode = workerNode.Name - return scheduling - }(), - wantHint: framework.QueueSkip, - }, - "backoff-other-changes": { - pod: podWithClaimTemplateInStatus, - claims: []*resourceapi.ResourceClaim{pendingClaim}, - oldObj: schedulingInfo, - newObj: func() *resourceapi.PodSchedulingContext { - scheduling := schedulingInfo.DeepCopy() - scheduling.Finalizers = append(scheduling.Finalizers, "foo") - return scheduling - }(), - wantHint: framework.Queue, - }, - } - - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - t.Parallel() - logger, _ := ktesting.NewTestContext(t) - features := feature.Features{ - EnableDynamicResourceAllocation: true, - EnableDRAControlPlaneController: true, - } - testCtx := setup(t, nil, tc.claims, nil, tc.schedulings, nil, features) - gotHint, err := testCtx.p.isSchedulableAfterPodSchedulingContextChange(logger, tc.pod, tc.oldObj, tc.newObj) - if tc.wantErr { - if err == nil { - t.Fatal("want an error, got none") - } - return - } - - if err != nil { - t.Fatalf("want no error, got: %v", err) - } - if tc.wantHint != gotHint { - t.Fatalf("want %#v, got %#v", tc.wantHint.String(), gotHint.String()) - } - }) - } -} - func Test_isSchedulableAfterResourceSliceChange(t *testing.T) { testcases := map[string]struct { pod *v1.Pod @@ -1795,14 +1402,14 @@ func Test_isSchedulableAfterResourceSliceChange(t *testing.T) { "backoff-unexpected-object-with-oldObj-newObj": { pod: podWithClaimName, claims: []*resourceapi.ResourceClaim{pendingClaim}, - oldObj: scheduling, - newObj: scheduling, + oldObj: pendingClaim, + newObj: pendingClaim, wantErr: true, }, "backoff-unexpected-object-with-oldObj": { pod: podWithClaimName, claims: []*resourceapi.ResourceClaim{pendingClaim}, - oldObj: scheduling, + oldObj: pendingClaim, newObj: resourceSlice, wantErr: true, }, @@ -1810,7 +1417,7 @@ func Test_isSchedulableAfterResourceSliceChange(t *testing.T) { pod: podWithClaimName, claims: []*resourceapi.ResourceClaim{pendingClaim}, oldObj: resourceSlice, - newObj: scheduling, + newObj: pendingClaim, wantErr: true, }, } @@ -1822,7 +1429,7 @@ func Test_isSchedulableAfterResourceSliceChange(t *testing.T) { features := feature.Features{ EnableDynamicResourceAllocation: true, } - testCtx := setup(t, nil, tc.claims, nil, nil, nil, features) + testCtx := setup(t, nil, tc.claims, nil, nil, features) gotHint, err := testCtx.p.isSchedulableAfterResourceSliceChange(logger, tc.pod, tc.oldObj, tc.newObj) if tc.wantErr { if err == nil { diff --git a/pkg/scheduler/framework/plugins/feature/feature.go b/pkg/scheduler/framework/plugins/feature/feature.go index 6fd2bb82c63..af28abfc901 100644 --- a/pkg/scheduler/framework/plugins/feature/feature.go +++ b/pkg/scheduler/framework/plugins/feature/feature.go @@ -20,7 +20,6 @@ package feature // This struct allows us to break the dependency of the plugins on // the internal k8s features pkg. type Features struct { - EnableDRAControlPlaneController bool EnableDynamicResourceAllocation bool EnableVolumeCapacityPriority bool EnableNodeInclusionPolicyInPodTopologySpread bool diff --git a/pkg/scheduler/framework/plugins/registry.go b/pkg/scheduler/framework/plugins/registry.go index 2903a65c87c..f63ef0bbd4a 100644 --- a/pkg/scheduler/framework/plugins/registry.go +++ b/pkg/scheduler/framework/plugins/registry.go @@ -46,7 +46,6 @@ import ( // through the WithFrameworkOutOfTreeRegistry option. func NewInTreeRegistry() runtime.Registry { fts := plfeature.Features{ - EnableDRAControlPlaneController: feature.DefaultFeatureGate.Enabled(features.DRAControlPlaneController), EnableDynamicResourceAllocation: feature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation), EnableVolumeCapacityPriority: feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority), EnableNodeInclusionPolicyInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread), diff --git a/pkg/scheduler/framework/types.go b/pkg/scheduler/framework/types.go index bc39253627f..18142ea6faa 100644 --- a/pkg/scheduler/framework/types.go +++ b/pkg/scheduler/framework/types.go @@ -132,7 +132,6 @@ const ( CSIDriver GVK = "storage.k8s.io/CSIDriver" CSIStorageCapacity GVK = "storage.k8s.io/CSIStorageCapacity" StorageClass GVK = "storage.k8s.io/StorageClass" - PodSchedulingContext GVK = "PodSchedulingContext" ResourceClaim GVK = "ResourceClaim" ResourceSlice GVK = "ResourceSlice" DeviceClass GVK = "DeviceClass" @@ -227,7 +226,6 @@ func UnrollWildCardResource() []ClusterEventWithHint { {Event: ClusterEvent{Resource: CSIDriver, ActionType: All}}, {Event: ClusterEvent{Resource: CSIStorageCapacity, ActionType: All}}, {Event: ClusterEvent{Resource: StorageClass, ActionType: All}}, - {Event: ClusterEvent{Resource: PodSchedulingContext, ActionType: All}}, {Event: ClusterEvent{Resource: ResourceClaim, ActionType: All}}, {Event: ClusterEvent{Resource: DeviceClass, ActionType: All}}, } diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index e8e2c833892..0d313e3b364 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -639,9 +639,6 @@ func Test_buildQueueingHintMap(t *testing.T) { {Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: { {PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn}, }, - {Resource: framework.PodSchedulingContext, ActionType: framework.All}: { - {PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn}, - }, {Resource: framework.ResourceClaim, ActionType: framework.All}: { {PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn}, }, @@ -806,7 +803,6 @@ func Test_UnionedGVKs(t *testing.T) { framework.PersistentVolume: framework.All, framework.PersistentVolumeClaim: framework.All, framework.StorageClass: framework.All, - framework.PodSchedulingContext: framework.All, framework.ResourceClaim: framework.All, framework.DeviceClass: framework.All, }, diff --git a/pkg/scheduler/testing/wrappers.go b/pkg/scheduler/testing/wrappers.go index c0ab9b491ec..43c87ab2667 100644 --- a/pkg/scheduler/testing/wrappers.go +++ b/pkg/scheduler/testing/wrappers.go @@ -18,6 +18,7 @@ package testing import ( "fmt" + "slices" "time" v1 "k8s.io/api/core/v1" @@ -952,8 +953,8 @@ func (p *PersistentVolumeWrapper) Label(k, v string) *PersistentVolumeWrapper { type ResourceClaimWrapper struct{ resourceapi.ResourceClaim } // MakeResourceClaim creates a ResourceClaim wrapper. -func MakeResourceClaim(controller string) *ResourceClaimWrapper { - return &ResourceClaimWrapper{resourceapi.ResourceClaim{Spec: resourceapi.ResourceClaimSpec{Controller: controller}}} +func MakeResourceClaim() *ResourceClaimWrapper { + return &ResourceClaimWrapper{} } // FromResourceClaim creates a ResourceClaim wrapper from some existing object. @@ -1014,6 +1015,9 @@ func (wrapper *ResourceClaimWrapper) Request(deviceClassName string) *ResourceCl // Allocation sets the allocation of the inner object. func (wrapper *ResourceClaimWrapper) Allocation(allocation *resourceapi.AllocationResult) *ResourceClaimWrapper { + if !slices.Contains(wrapper.ResourceClaim.Finalizers, resourceapi.Finalizer) { + wrapper.ResourceClaim.Finalizers = append(wrapper.ResourceClaim.Finalizers, resourceapi.Finalizer) + } wrapper.ResourceClaim.Status.Allocation = allocation return wrapper } @@ -1024,24 +1028,6 @@ func (wrapper *ResourceClaimWrapper) Deleting(time metav1.Time) *ResourceClaimWr return wrapper } -// Structured turns a "normal" claim into one which was allocated via structured parameters. -// The only difference is that there is no controller name and the special finalizer -// gets added. -func (wrapper *ResourceClaimWrapper) Structured() *ResourceClaimWrapper { - wrapper.Spec.Controller = "" - if wrapper.ResourceClaim.Status.Allocation != nil { - wrapper.ResourceClaim.Finalizers = append(wrapper.ResourceClaim.Finalizers, resourceapi.Finalizer) - wrapper.ResourceClaim.Status.Allocation.Controller = "" - } - return wrapper -} - -// DeallocationRequested sets that field of the inner object. -func (wrapper *ResourceClaimWrapper) DeallocationRequested(deallocationRequested bool) *ResourceClaimWrapper { - wrapper.ResourceClaim.Status.DeallocationRequested = deallocationRequested - return wrapper -} - // ReservedFor sets that field of the inner object. func (wrapper *ResourceClaimWrapper) ReservedFor(consumers ...resourceapi.ResourceClaimConsumerReference) *ResourceClaimWrapper { wrapper.ResourceClaim.Status.ReservedFor = consumers @@ -1053,86 +1039,6 @@ func (wrapper *ResourceClaimWrapper) ReservedForPod(podName string, podUID types return wrapper.ReservedFor(resourceapi.ResourceClaimConsumerReference{Resource: "pods", Name: podName, UID: podUID}) } -// PodSchedulingWrapper wraps a PodSchedulingContext inside. -type PodSchedulingWrapper struct { - resourceapi.PodSchedulingContext -} - -// MakePodSchedulingContexts creates a PodSchedulingContext wrapper. -func MakePodSchedulingContexts() *PodSchedulingWrapper { - return &PodSchedulingWrapper{resourceapi.PodSchedulingContext{}} -} - -// FromPodSchedulingContexts creates a PodSchedulingContext wrapper from an existing object. -func FromPodSchedulingContexts(other *resourceapi.PodSchedulingContext) *PodSchedulingWrapper { - return &PodSchedulingWrapper{*other.DeepCopy()} -} - -// Obj returns the inner object. -func (wrapper *PodSchedulingWrapper) Obj() *resourceapi.PodSchedulingContext { - return &wrapper.PodSchedulingContext -} - -// Name sets `s` as the name of the inner object. -func (wrapper *PodSchedulingWrapper) Name(s string) *PodSchedulingWrapper { - wrapper.SetName(s) - return wrapper -} - -// UID sets `s` as the UID of the inner object. -func (wrapper *PodSchedulingWrapper) UID(s string) *PodSchedulingWrapper { - wrapper.SetUID(types.UID(s)) - return wrapper -} - -// Namespace sets `s` as the namespace of the inner object. -func (wrapper *PodSchedulingWrapper) Namespace(s string) *PodSchedulingWrapper { - wrapper.SetNamespace(s) - return wrapper -} - -// OwnerReference updates the owning controller of the inner object. -func (wrapper *PodSchedulingWrapper) OwnerReference(name, uid string, gvk schema.GroupVersionKind) *PodSchedulingWrapper { - wrapper.OwnerReferences = []metav1.OwnerReference{ - { - APIVersion: gvk.GroupVersion().String(), - Kind: gvk.Kind, - Name: name, - UID: types.UID(uid), - Controller: ptr.To(true), - BlockOwnerDeletion: ptr.To(true), - }, - } - return wrapper -} - -// Label applies a {k,v} label pair to the inner object -func (wrapper *PodSchedulingWrapper) Label(k, v string) *PodSchedulingWrapper { - if wrapper.Labels == nil { - wrapper.Labels = make(map[string]string) - } - wrapper.Labels[k] = v - return wrapper -} - -// SelectedNode sets that field of the inner object. -func (wrapper *PodSchedulingWrapper) SelectedNode(s string) *PodSchedulingWrapper { - wrapper.Spec.SelectedNode = s - return wrapper -} - -// PotentialNodes sets that field of the inner object. -func (wrapper *PodSchedulingWrapper) PotentialNodes(nodes ...string) *PodSchedulingWrapper { - wrapper.Spec.PotentialNodes = nodes - return wrapper -} - -// ResourceClaims sets that field of the inner object. -func (wrapper *PodSchedulingWrapper) ResourceClaims(statuses ...resourceapi.ResourceClaimSchedulingStatus) *PodSchedulingWrapper { - wrapper.Status.ResourceClaims = statuses - return wrapper -} - type ResourceSliceWrapper struct { resourceapi.ResourceSlice } diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go index a4a03243ee5..16b0a93f3c5 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go @@ -210,7 +210,6 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(), rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/finalizers").RuleOrDie(), rbacv1helpers.NewRule("get", "list", "watch", "create", "delete").Groups(resourceGroup).Resources("resourceclaims").RuleOrDie(), - rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "patch").Groups(resourceGroup).Resources("podschedulingcontexts").RuleOrDie(), rbacv1helpers.NewRule("update", "patch").Groups(resourceGroup).Resources("resourceclaims", "resourceclaims/status").RuleOrDie(), rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), eventsRule(), diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go index 4c203379e1f..58333c3ec02 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go @@ -583,8 +583,6 @@ func ClusterRoles() []rbacv1.ClusterRole { rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("deviceclasses").RuleOrDie(), rbacv1helpers.NewRule(ReadUpdate...).Groups(resourceGroup).Resources("resourceclaims").RuleOrDie(), rbacv1helpers.NewRule(ReadUpdate...).Groups(resourceGroup).Resources("resourceclaims/status").RuleOrDie(), - rbacv1helpers.NewRule(ReadWrite...).Groups(resourceGroup).Resources("podschedulingcontexts").RuleOrDie(), - rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("podschedulingcontexts/status").RuleOrDie(), rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("pods/finalizers").RuleOrDie(), rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("resourceslices").RuleOrDie(), ) diff --git a/staging/src/k8s.io/api/resource/v1alpha3/generated.pb.go b/staging/src/k8s.io/api/resource/v1alpha3/generated.pb.go index 4ac01cc6f3b..d60033a89c0 100644 --- a/staging/src/k8s.io/api/resource/v1alpha3/generated.pb.go +++ b/staging/src/k8s.io/api/resource/v1alpha3/generated.pb.go @@ -580,122 +580,10 @@ func (m *OpaqueDeviceConfiguration) XXX_DiscardUnknown() { var xxx_messageInfo_OpaqueDeviceConfiguration proto.InternalMessageInfo -func (m *PodSchedulingContext) Reset() { *m = PodSchedulingContext{} } -func (*PodSchedulingContext) ProtoMessage() {} -func (*PodSchedulingContext) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{19} -} -func (m *PodSchedulingContext) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSchedulingContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSchedulingContext) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContext.Merge(m, src) -} -func (m *PodSchedulingContext) XXX_Size() int { - return m.Size() -} -func (m *PodSchedulingContext) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContext.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSchedulingContext proto.InternalMessageInfo - -func (m *PodSchedulingContextList) Reset() { *m = PodSchedulingContextList{} } -func (*PodSchedulingContextList) ProtoMessage() {} -func (*PodSchedulingContextList) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{20} -} -func (m *PodSchedulingContextList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSchedulingContextList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSchedulingContextList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContextList.Merge(m, src) -} -func (m *PodSchedulingContextList) XXX_Size() int { - return m.Size() -} -func (m *PodSchedulingContextList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContextList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSchedulingContextList proto.InternalMessageInfo - -func (m *PodSchedulingContextSpec) Reset() { *m = PodSchedulingContextSpec{} } -func (*PodSchedulingContextSpec) ProtoMessage() {} -func (*PodSchedulingContextSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{21} -} -func (m *PodSchedulingContextSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSchedulingContextSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSchedulingContextSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContextSpec.Merge(m, src) -} -func (m *PodSchedulingContextSpec) XXX_Size() int { - return m.Size() -} -func (m *PodSchedulingContextSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContextSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSchedulingContextSpec proto.InternalMessageInfo - -func (m *PodSchedulingContextStatus) Reset() { *m = PodSchedulingContextStatus{} } -func (*PodSchedulingContextStatus) ProtoMessage() {} -func (*PodSchedulingContextStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{22} -} -func (m *PodSchedulingContextStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSchedulingContextStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSchedulingContextStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSchedulingContextStatus.Merge(m, src) -} -func (m *PodSchedulingContextStatus) XXX_Size() int { - return m.Size() -} -func (m *PodSchedulingContextStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PodSchedulingContextStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSchedulingContextStatus proto.InternalMessageInfo - func (m *ResourceClaim) Reset() { *m = ResourceClaim{} } func (*ResourceClaim) ProtoMessage() {} func (*ResourceClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{23} + return fileDescriptor_66649ee9bbcd89d2, []int{19} } func (m *ResourceClaim) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -723,7 +611,7 @@ var xxx_messageInfo_ResourceClaim proto.InternalMessageInfo func (m *ResourceClaimConsumerReference) Reset() { *m = ResourceClaimConsumerReference{} } func (*ResourceClaimConsumerReference) ProtoMessage() {} func (*ResourceClaimConsumerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{24} + return fileDescriptor_66649ee9bbcd89d2, []int{20} } func (m *ResourceClaimConsumerReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -751,7 +639,7 @@ var xxx_messageInfo_ResourceClaimConsumerReference proto.InternalMessageInfo func (m *ResourceClaimList) Reset() { *m = ResourceClaimList{} } func (*ResourceClaimList) ProtoMessage() {} func (*ResourceClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{25} + return fileDescriptor_66649ee9bbcd89d2, []int{21} } func (m *ResourceClaimList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -776,38 +664,10 @@ func (m *ResourceClaimList) XXX_DiscardUnknown() { var xxx_messageInfo_ResourceClaimList proto.InternalMessageInfo -func (m *ResourceClaimSchedulingStatus) Reset() { *m = ResourceClaimSchedulingStatus{} } -func (*ResourceClaimSchedulingStatus) ProtoMessage() {} -func (*ResourceClaimSchedulingStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{26} -} -func (m *ResourceClaimSchedulingStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceClaimSchedulingStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceClaimSchedulingStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceClaimSchedulingStatus.Merge(m, src) -} -func (m *ResourceClaimSchedulingStatus) XXX_Size() int { - return m.Size() -} -func (m *ResourceClaimSchedulingStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceClaimSchedulingStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceClaimSchedulingStatus proto.InternalMessageInfo - func (m *ResourceClaimSpec) Reset() { *m = ResourceClaimSpec{} } func (*ResourceClaimSpec) ProtoMessage() {} func (*ResourceClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{27} + return fileDescriptor_66649ee9bbcd89d2, []int{22} } func (m *ResourceClaimSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -835,7 +695,7 @@ var xxx_messageInfo_ResourceClaimSpec proto.InternalMessageInfo func (m *ResourceClaimStatus) Reset() { *m = ResourceClaimStatus{} } func (*ResourceClaimStatus) ProtoMessage() {} func (*ResourceClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{28} + return fileDescriptor_66649ee9bbcd89d2, []int{23} } func (m *ResourceClaimStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -863,7 +723,7 @@ var xxx_messageInfo_ResourceClaimStatus proto.InternalMessageInfo func (m *ResourceClaimTemplate) Reset() { *m = ResourceClaimTemplate{} } func (*ResourceClaimTemplate) ProtoMessage() {} func (*ResourceClaimTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{29} + return fileDescriptor_66649ee9bbcd89d2, []int{24} } func (m *ResourceClaimTemplate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -891,7 +751,7 @@ var xxx_messageInfo_ResourceClaimTemplate proto.InternalMessageInfo func (m *ResourceClaimTemplateList) Reset() { *m = ResourceClaimTemplateList{} } func (*ResourceClaimTemplateList) ProtoMessage() {} func (*ResourceClaimTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{30} + return fileDescriptor_66649ee9bbcd89d2, []int{25} } func (m *ResourceClaimTemplateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -919,7 +779,7 @@ var xxx_messageInfo_ResourceClaimTemplateList proto.InternalMessageInfo func (m *ResourceClaimTemplateSpec) Reset() { *m = ResourceClaimTemplateSpec{} } func (*ResourceClaimTemplateSpec) ProtoMessage() {} func (*ResourceClaimTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{31} + return fileDescriptor_66649ee9bbcd89d2, []int{26} } func (m *ResourceClaimTemplateSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -947,7 +807,7 @@ var xxx_messageInfo_ResourceClaimTemplateSpec proto.InternalMessageInfo func (m *ResourcePool) Reset() { *m = ResourcePool{} } func (*ResourcePool) ProtoMessage() {} func (*ResourcePool) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{32} + return fileDescriptor_66649ee9bbcd89d2, []int{27} } func (m *ResourcePool) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -975,7 +835,7 @@ var xxx_messageInfo_ResourcePool proto.InternalMessageInfo func (m *ResourceSlice) Reset() { *m = ResourceSlice{} } func (*ResourceSlice) ProtoMessage() {} func (*ResourceSlice) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{33} + return fileDescriptor_66649ee9bbcd89d2, []int{28} } func (m *ResourceSlice) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1003,7 +863,7 @@ var xxx_messageInfo_ResourceSlice proto.InternalMessageInfo func (m *ResourceSliceList) Reset() { *m = ResourceSliceList{} } func (*ResourceSliceList) ProtoMessage() {} func (*ResourceSliceList) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{34} + return fileDescriptor_66649ee9bbcd89d2, []int{29} } func (m *ResourceSliceList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1031,7 +891,7 @@ var xxx_messageInfo_ResourceSliceList proto.InternalMessageInfo func (m *ResourceSliceSpec) Reset() { *m = ResourceSliceSpec{} } func (*ResourceSliceSpec) ProtoMessage() {} func (*ResourceSliceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_66649ee9bbcd89d2, []int{35} + return fileDescriptor_66649ee9bbcd89d2, []int{30} } func (m *ResourceSliceSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1078,14 +938,9 @@ func init() { proto.RegisterType((*DeviceRequestAllocationResult)(nil), "k8s.io.api.resource.v1alpha3.DeviceRequestAllocationResult") proto.RegisterType((*DeviceSelector)(nil), "k8s.io.api.resource.v1alpha3.DeviceSelector") proto.RegisterType((*OpaqueDeviceConfiguration)(nil), "k8s.io.api.resource.v1alpha3.OpaqueDeviceConfiguration") - proto.RegisterType((*PodSchedulingContext)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContext") - proto.RegisterType((*PodSchedulingContextList)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextList") - proto.RegisterType((*PodSchedulingContextSpec)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextSpec") - proto.RegisterType((*PodSchedulingContextStatus)(nil), "k8s.io.api.resource.v1alpha3.PodSchedulingContextStatus") proto.RegisterType((*ResourceClaim)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaim") proto.RegisterType((*ResourceClaimConsumerReference)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimConsumerReference") proto.RegisterType((*ResourceClaimList)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimList") - proto.RegisterType((*ResourceClaimSchedulingStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSchedulingStatus") proto.RegisterType((*ResourceClaimSpec)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimSpec") proto.RegisterType((*ResourceClaimStatus)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimStatus") proto.RegisterType((*ResourceClaimTemplate)(nil), "k8s.io.api.resource.v1alpha3.ResourceClaimTemplate") @@ -1102,138 +957,122 @@ func init() { } var fileDescriptor_66649ee9bbcd89d2 = []byte{ - // 2085 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x1a, 0xcd, 0x6f, 0x1c, 0x57, - 0x3d, 0xb3, 0xeb, 0xcf, 0x9f, 0xbf, 0x9f, 0x93, 0xe0, 0x9a, 0x66, 0x37, 0x99, 0x22, 0x70, 0xda, - 0x74, 0xb6, 0x71, 0x4b, 0x1b, 0x5a, 0x90, 0xf0, 0xd8, 0x6e, 0xe4, 0x28, 0x1f, 0xce, 0xdb, 0x26, - 0x34, 0x50, 0x4a, 0x9f, 0x67, 0x9f, 0xed, 0xc1, 0xb3, 0x33, 0xd3, 0x99, 0x37, 0x26, 0x16, 0x12, - 0x8a, 0xb8, 0x70, 0x8b, 0x7a, 0xe5, 0x80, 0xb8, 0x21, 0x21, 0x0e, 0x70, 0xe0, 0x88, 0x54, 0x24, - 0x90, 0xc8, 0x31, 0x08, 0x0e, 0x3d, 0x2d, 0xcd, 0x22, 0xfe, 0x89, 0x5c, 0x40, 0xef, 0xcd, 0x9b, - 0x4f, 0xcf, 0x6c, 0x66, 0xa3, 0xca, 0x2a, 0x37, 0xcf, 0xef, 0xfb, 0xfd, 0xbe, 0xdf, 0x5b, 0xc3, - 0xa5, 0x83, 0x2b, 0xbe, 0x66, 0x3a, 0x2d, 0xe2, 0x9a, 0x2d, 0x8f, 0xfa, 0x4e, 0xe0, 0x19, 0xb4, - 0x75, 0x78, 0x99, 0x58, 0xee, 0x3e, 0x79, 0xbd, 0xb5, 0x47, 0x6d, 0xea, 0x11, 0x46, 0x3b, 0x9a, - 0xeb, 0x39, 0xcc, 0x41, 0x2f, 0x86, 0xd4, 0x1a, 0x71, 0x4d, 0x2d, 0xa2, 0xd6, 0x22, 0xea, 0xe5, - 0x57, 0xf7, 0x4c, 0xb6, 0x1f, 0xec, 0x68, 0x86, 0xd3, 0x6d, 0xed, 0x39, 0x7b, 0x4e, 0x4b, 0x30, - 0xed, 0x04, 0xbb, 0xe2, 0x4b, 0x7c, 0x88, 0xbf, 0x42, 0x61, 0xcb, 0x6a, 0x4a, 0xb5, 0xe1, 0x78, - 0x5c, 0x6d, 0x5e, 0xe1, 0xf2, 0x1b, 0x09, 0x4d, 0x97, 0x18, 0xfb, 0xa6, 0x4d, 0xbd, 0xa3, 0x96, - 0x7b, 0xb0, 0x97, 0xb5, 0x77, 0x18, 0x2e, 0xbf, 0xd5, 0xa5, 0x8c, 0x14, 0xe9, 0x6a, 0x95, 0x71, - 0x79, 0x81, 0xcd, 0xcc, 0xee, 0x71, 0x35, 0x6f, 0x3e, 0x8b, 0xc1, 0x37, 0xf6, 0x69, 0x97, 0xe4, - 0xf9, 0xd4, 0xff, 0x2a, 0x30, 0xbf, 0x66, 0x59, 0x8e, 0x41, 0x98, 0xe9, 0xd8, 0x98, 0xfa, 0x81, - 0xc5, 0xd0, 0x8f, 0x60, 0xbc, 0x43, 0x0f, 0x4d, 0x83, 0xfa, 0x4b, 0xca, 0x79, 0x65, 0x65, 0x6a, - 0xf5, 0x0d, 0x6d, 0x90, 0xb3, 0xb5, 0x0d, 0x41, 0x9c, 0x17, 0xa3, 0xcf, 0x3d, 0xea, 0x35, 0x4f, - 0xf5, 0x7b, 0xcd, 0xf1, 0x10, 0xef, 0xe3, 0x48, 0x2a, 0xba, 0x0b, 0xd3, 0xb6, 0xd3, 0xa1, 0x6d, - 0x6a, 0x51, 0x83, 0x39, 0xde, 0x52, 0x5d, 0x68, 0x39, 0x9f, 0xd6, 0xc2, 0xa3, 0xa0, 0x1d, 0x5e, - 0xd6, 0x6e, 0xa6, 0xe8, 0xf4, 0xf9, 0x7e, 0xaf, 0x39, 0x9d, 0x86, 0xe0, 0x8c, 0x1c, 0xb4, 0x0a, - 0x60, 0x38, 0x36, 0xf3, 0x1c, 0xcb, 0xa2, 0xde, 0xd2, 0xc8, 0x79, 0x65, 0x65, 0x52, 0x47, 0xd2, - 0x0a, 0x58, 0x8f, 0x31, 0x38, 0x45, 0xa5, 0x7e, 0x5e, 0x87, 0x29, 0x9d, 0xf8, 0xa6, 0x11, 0x5a, - 0x89, 0x7e, 0x06, 0x40, 0x18, 0xf3, 0xcc, 0x9d, 0x80, 0x89, 0xf3, 0xd7, 0x57, 0xa6, 0x56, 0xbf, - 0x35, 0xf8, 0xfc, 0x29, 0x76, 0x6d, 0x2d, 0xe6, 0xdd, 0xb4, 0x99, 0x77, 0xa4, 0xbf, 0x14, 0xa9, - 0x4f, 0x10, 0x3f, 0xff, 0x57, 0x73, 0xe6, 0x76, 0x40, 0x2c, 0x73, 0xd7, 0xa4, 0x9d, 0x9b, 0xa4, - 0x4b, 0x71, 0x4a, 0x23, 0x3a, 0x84, 0x09, 0x83, 0xb8, 0xc4, 0x30, 0xd9, 0xd1, 0x52, 0x4d, 0x68, - 0x7f, 0xab, 0xba, 0xf6, 0x75, 0xc9, 0x19, 0xea, 0xbe, 0x20, 0x75, 0x4f, 0x44, 0xe0, 0xe3, 0x9a, - 0x63, 0x5d, 0xcb, 0x16, 0xcc, 0xe5, 0x6c, 0x47, 0xf3, 0x50, 0x3f, 0xa0, 0x47, 0x22, 0x07, 0x26, - 0x31, 0xff, 0x13, 0xad, 0xc3, 0xe8, 0x21, 0xb1, 0x02, 0xba, 0x54, 0x13, 0x11, 0x7b, 0xb5, 0x52, - 0x5e, 0x44, 0x52, 0x71, 0xc8, 0xfb, 0x76, 0xed, 0x8a, 0xb2, 0x7c, 0x00, 0x33, 0x19, 0x5b, 0x0b, - 0x74, 0x6d, 0x64, 0x75, 0x69, 0x29, 0x5d, 0x71, 0x8a, 0x6b, 0xee, 0xc1, 0x5e, 0x56, 0xf9, 0xed, - 0x80, 0xd8, 0xcc, 0x64, 0x47, 0x29, 0x65, 0xea, 0x55, 0x58, 0x58, 0xdf, 0xbc, 0x1e, 0x5a, 0x93, - 0xce, 0x15, 0x7a, 0xdf, 0xf5, 0xa8, 0xef, 0x9b, 0x8e, 0x1d, 0xea, 0x4d, 0x72, 0x65, 0x33, 0xc6, - 0xe0, 0x14, 0x95, 0x7a, 0x08, 0x63, 0x32, 0x4b, 0xce, 0xc3, 0x88, 0x4d, 0xba, 0x54, 0xf2, 0x4d, - 0x4b, 0xbe, 0x11, 0xe1, 0x53, 0x81, 0x41, 0xd7, 0x60, 0x74, 0x87, 0x47, 0x46, 0x9a, 0x7f, 0xb1, - 0x72, 0x10, 0xf5, 0xc9, 0x7e, 0xaf, 0x39, 0x2a, 0x00, 0x38, 0x14, 0xa1, 0x3e, 0xac, 0xc1, 0xb9, - 0x7c, 0x91, 0xad, 0x3b, 0xf6, 0xae, 0xb9, 0x17, 0x78, 0xe2, 0x03, 0x7d, 0x17, 0xc6, 0x42, 0x91, - 0xd2, 0xa2, 0x15, 0x69, 0xd1, 0x58, 0x5b, 0x40, 0x9f, 0xf6, 0x9a, 0x67, 0xf3, 0xac, 0x21, 0x06, - 0x4b, 0x3e, 0xb4, 0x02, 0x13, 0x1e, 0xfd, 0x38, 0xa0, 0x3e, 0xf3, 0x45, 0xde, 0x4d, 0xea, 0xd3, - 0x3c, 0x75, 0xb0, 0x84, 0xe1, 0x18, 0x8b, 0x1e, 0x28, 0xb0, 0x18, 0x56, 0x72, 0xc6, 0x06, 0x59, - 0xc5, 0x97, 0xab, 0xe4, 0x44, 0x86, 0x51, 0xff, 0xaa, 0x34, 0x76, 0xb1, 0x00, 0x89, 0x8b, 0x54, - 0xa9, 0xff, 0x51, 0xe0, 0x6c, 0x71, 0xd7, 0x41, 0xbb, 0x30, 0xee, 0x89, 0xbf, 0xa2, 0xe2, 0x7d, - 0xa7, 0x8a, 0x41, 0xf2, 0x98, 0xe5, 0x3d, 0x2c, 0xfc, 0xf6, 0x71, 0x24, 0x1c, 0x19, 0x30, 0x66, - 0x08, 0x9b, 0x64, 0x95, 0xbe, 0x33, 0x5c, 0x8f, 0xcc, 0x7a, 0x60, 0x36, 0x0a, 0x57, 0x08, 0xc6, - 0x52, 0xb4, 0xfa, 0x5b, 0x05, 0xe6, 0x72, 0x55, 0x84, 0x1a, 0x50, 0x37, 0x6d, 0x26, 0xd2, 0xaa, - 0x1e, 0xc6, 0x68, 0xcb, 0x66, 0x77, 0x79, 0xb2, 0x63, 0x8e, 0x40, 0x17, 0x60, 0x64, 0xc7, 0x71, - 0x2c, 0x11, 0x8e, 0x09, 0x7d, 0xa6, 0xdf, 0x6b, 0x4e, 0xea, 0x8e, 0x63, 0x85, 0x14, 0x02, 0x85, - 0xbe, 0x01, 0x63, 0x3e, 0xf3, 0x4c, 0x7b, 0x4f, 0xf6, 0xc8, 0xb9, 0x7e, 0xaf, 0x39, 0xd5, 0x16, - 0x90, 0x90, 0x4c, 0xa2, 0xd1, 0xcb, 0x30, 0x7e, 0x48, 0x3d, 0x51, 0x21, 0xa3, 0x82, 0x52, 0x74, - 0xe0, 0xbb, 0x21, 0x28, 0x24, 0x8d, 0x08, 0xd4, 0xdf, 0xd7, 0x60, 0x4a, 0x06, 0xd0, 0x22, 0x66, - 0x17, 0xdd, 0x4b, 0x25, 0x54, 0x18, 0x89, 0x57, 0x86, 0x88, 0x84, 0x3e, 0x1f, 0x35, 0xaf, 0x82, - 0x0c, 0xa4, 0x30, 0x65, 0x38, 0xb6, 0xcf, 0x3c, 0x62, 0xda, 0x32, 0x5d, 0xb3, 0x0d, 0x62, 0x50, - 0xe2, 0x49, 0x36, 0x7d, 0x51, 0x2a, 0x98, 0x4a, 0x60, 0x3e, 0x4e, 0xcb, 0x45, 0x1f, 0xc6, 0x21, - 0xae, 0x0b, 0x0d, 0x6f, 0x56, 0xd2, 0xc0, 0x0f, 0x5f, 0x2d, 0xba, 0x7f, 0x53, 0x60, 0xa9, 0x8c, - 0x29, 0x53, 0x8f, 0xca, 0x73, 0xd5, 0x63, 0xed, 0xe4, 0xea, 0xf1, 0xcf, 0x4a, 0x2a, 0xf6, 0xbe, - 0x8f, 0x3e, 0x82, 0x09, 0xbe, 0xda, 0x74, 0x08, 0x23, 0x72, 0x85, 0x78, 0x6d, 0x50, 0xfb, 0xf6, - 0x35, 0x4e, 0xcd, 0xc7, 0xfd, 0xad, 0x9d, 0x1f, 0x53, 0x83, 0xdd, 0xa0, 0x8c, 0x24, 0xcd, 0x38, - 0x81, 0xe1, 0x58, 0x2a, 0xba, 0x05, 0x23, 0xbe, 0x4b, 0x8d, 0x61, 0x06, 0x91, 0x30, 0xad, 0xed, - 0x52, 0x23, 0xe9, 0xd7, 0xfc, 0x0b, 0x0b, 0x41, 0xea, 0xaf, 0xd2, 0xc1, 0xf0, 0xfd, 0x6c, 0x30, - 0xca, 0x5c, 0xac, 0x9c, 0x9c, 0x8b, 0x3f, 0x8d, 0x5b, 0x81, 0xb0, 0xef, 0xba, 0xe9, 0x33, 0xf4, - 0xc1, 0x31, 0x37, 0x6b, 0xd5, 0xdc, 0xcc, 0xb9, 0x85, 0x93, 0xe3, 0x2a, 0x8b, 0x20, 0x29, 0x17, - 0xdf, 0x84, 0x51, 0x93, 0xd1, 0x6e, 0x54, 0x5f, 0x17, 0x2b, 0xfb, 0x58, 0x9f, 0x91, 0x52, 0x47, - 0xb7, 0x38, 0x3f, 0x0e, 0xc5, 0xa8, 0xbf, 0xab, 0x65, 0x4e, 0xc0, 0x7d, 0x8f, 0x7e, 0x08, 0x93, - 0xbe, 0x9c, 0xc8, 0x51, 0x97, 0xb8, 0x54, 0x45, 0x4f, 0xbc, 0x12, 0x2e, 0x48, 0x55, 0x93, 0x11, - 0xc4, 0xc7, 0x89, 0xc4, 0x54, 0x05, 0xd7, 0x86, 0xaa, 0xe0, 0x5c, 0xfc, 0xcb, 0x2a, 0x18, 0xdd, - 0x83, 0x19, 0x3f, 0x30, 0x19, 0xd9, 0xb1, 0x28, 0x5f, 0x4b, 0xfd, 0xca, 0x9b, 0xec, 0x42, 0xbf, - 0xd7, 0x9c, 0x69, 0xa7, 0x59, 0x71, 0x56, 0x92, 0xea, 0x41, 0x51, 0x6e, 0xa0, 0x1f, 0xc0, 0x98, - 0xe3, 0x92, 0x8f, 0x03, 0x2a, 0x03, 0xfe, 0x8c, 0xe5, 0xf0, 0x96, 0xa0, 0x2d, 0xca, 0x40, 0xe0, - 0xc7, 0x09, 0xd1, 0x58, 0x8a, 0x54, 0x1f, 0x2a, 0x30, 0x9f, 0xef, 0x93, 0x43, 0x34, 0xa2, 0x6d, - 0x98, 0xed, 0x12, 0x66, 0xec, 0xc7, 0xb3, 0x4a, 0x54, 0xe7, 0xa4, 0xbe, 0xd2, 0xef, 0x35, 0x67, - 0x6f, 0x64, 0x30, 0x4f, 0x7b, 0x4d, 0xf4, 0x6e, 0x60, 0x59, 0x47, 0xd9, 0x75, 0x34, 0xc7, 0xaf, - 0xfe, 0xa2, 0x0e, 0x33, 0x99, 0xb1, 0x50, 0x61, 0xf1, 0x5a, 0x83, 0xb9, 0x4e, 0x12, 0x47, 0x8e, - 0x90, 0x66, 0x7c, 0x45, 0x12, 0xa7, 0x93, 0x50, 0xf0, 0xe5, 0xe9, 0xb3, 0x59, 0x59, 0xff, 0xc2, - 0xb3, 0xf2, 0x2e, 0xcc, 0x92, 0x78, 0x11, 0xb8, 0xe1, 0x74, 0xa8, 0x1c, 0xc3, 0x9a, 0xe4, 0x9a, - 0x5d, 0xcb, 0x60, 0x9f, 0xf6, 0x9a, 0xa7, 0xf3, 0xeb, 0x03, 0x87, 0xe3, 0x9c, 0x14, 0xf4, 0x12, - 0x8c, 0x1a, 0x4e, 0x60, 0x33, 0x31, 0xab, 0xeb, 0x49, 0x15, 0xae, 0x73, 0x20, 0x0e, 0x71, 0xe8, - 0x9b, 0x30, 0x45, 0x3a, 0x5d, 0xd3, 0x5e, 0x33, 0x0c, 0xea, 0xfb, 0x4b, 0x63, 0x62, 0x4b, 0x88, - 0x67, 0xe1, 0x5a, 0x82, 0xc2, 0x69, 0x3a, 0xf5, 0x4f, 0x4a, 0xb4, 0x82, 0x96, 0xac, 0x4a, 0xe8, - 0x22, 0x5f, 0xbc, 0x04, 0x4a, 0x06, 0x27, 0xb5, 0x3b, 0x09, 0x30, 0x8e, 0xf0, 0xe8, 0xeb, 0x30, - 0xd6, 0xf1, 0xcc, 0x43, 0xea, 0xc9, 0xc8, 0xc4, 0xe5, 0xb5, 0x21, 0xa0, 0x58, 0x62, 0x79, 0xb0, - 0xdd, 0x68, 0x95, 0x49, 0x05, 0x7b, 0xdb, 0x71, 0x2c, 0x2c, 0x30, 0x42, 0x92, 0xb0, 0x4a, 0xba, - 0x30, 0x91, 0x14, 0xda, 0x2a, 0xb1, 0xea, 0x07, 0x30, 0x9b, 0xdb, 0xff, 0xaf, 0x41, 0xdd, 0xa0, - 0x96, 0xac, 0xa2, 0xd6, 0xe0, 0xe8, 0x1e, 0xbb, 0x3d, 0xe8, 0xe3, 0xfd, 0x5e, 0xb3, 0xbe, 0xbe, - 0x79, 0x1d, 0x73, 0x21, 0xea, 0x6f, 0x14, 0x78, 0xa1, 0xb4, 0xd2, 0x52, 0xa7, 0x55, 0x06, 0x9e, - 0x96, 0x00, 0xb8, 0xc4, 0x23, 0x5d, 0xca, 0xa8, 0xe7, 0x17, 0x0c, 0xb6, 0x6c, 0x3f, 0x97, 0x17, - 0x7b, 0x0d, 0x93, 0x9f, 0x6c, 0xde, 0x67, 0xd4, 0xe6, 0x3b, 0x58, 0x32, 0x33, 0xb7, 0x63, 0x41, - 0x38, 0x25, 0x54, 0xfd, 0x63, 0x0d, 0x4e, 0x6f, 0x3b, 0x9d, 0xb6, 0xb1, 0x4f, 0x3b, 0x81, 0x65, - 0xda, 0x7b, 0xfc, 0x52, 0x4c, 0xef, 0xb3, 0x13, 0x18, 0xd8, 0xef, 0x67, 0x06, 0xf6, 0x33, 0x1a, - 0x71, 0x91, 0x8d, 0x65, 0x93, 0x1b, 0x7d, 0xc4, 0xb7, 0x59, 0xc2, 0x82, 0xa8, 0xfb, 0x5e, 0x79, - 0x0e, 0xd9, 0x82, 0x3f, 0x89, 0x4c, 0xf8, 0x8d, 0xa5, 0x5c, 0xf5, 0xef, 0x0a, 0x2c, 0x15, 0xb1, - 0x9d, 0xc0, 0x10, 0xfe, 0x5e, 0x76, 0x08, 0xaf, 0x0e, 0x7f, 0xb6, 0x92, 0x69, 0xfc, 0x49, 0xc9, - 0x99, 0xc4, 0x58, 0xbe, 0x02, 0xd3, 0x61, 0xbb, 0xa2, 0x1d, 0x3e, 0x8d, 0x64, 0xe2, 0x9e, 0x96, - 0x82, 0xa6, 0xdb, 0x29, 0x1c, 0xce, 0x50, 0xa2, 0xb7, 0x61, 0xd6, 0x75, 0x18, 0xb5, 0x99, 0x49, - 0xac, 0x70, 0x24, 0x86, 0x97, 0x49, 0xc4, 0xfb, 0xda, 0x76, 0x06, 0x83, 0x73, 0x94, 0xea, 0x2f, - 0x15, 0x58, 0x2e, 0x8f, 0x0e, 0xfa, 0x29, 0xcc, 0x46, 0x27, 0x16, 0xfb, 0x72, 0xc5, 0x0b, 0x1e, - 0x4e, 0xf3, 0x24, 0xb2, 0x65, 0xc8, 0xcf, 0x46, 0x3d, 0x37, 0x43, 0xe6, 0xe3, 0x9c, 0x2a, 0xf5, - 0xd7, 0x35, 0x98, 0xc9, 0x90, 0x9c, 0x40, 0xc9, 0xdc, 0xce, 0x94, 0x4c, 0x6b, 0x98, 0x63, 0x96, - 0xd5, 0xca, 0xbd, 0x5c, 0xad, 0x5c, 0x1e, 0x46, 0xe8, 0xe0, 0x22, 0xe9, 0x2b, 0xd0, 0xc8, 0xd0, - 0xf3, 0x1d, 0x22, 0xe8, 0x52, 0x0f, 0xd3, 0x5d, 0xea, 0x51, 0xdb, 0xa0, 0xe8, 0x12, 0x4c, 0x10, - 0xd7, 0xbc, 0xea, 0x39, 0x81, 0x2b, 0x53, 0x2a, 0x4e, 0xfd, 0xb5, 0xed, 0x2d, 0x01, 0xc7, 0x31, - 0x05, 0xa7, 0x8e, 0x2c, 0x92, 0x13, 0x20, 0x75, 0x27, 0x0c, 0xe1, 0x38, 0xa6, 0x88, 0x17, 0x83, - 0x91, 0xd2, 0xc5, 0x40, 0x87, 0x7a, 0x60, 0x76, 0xe4, 0x45, 0xf6, 0x35, 0x49, 0x50, 0xbf, 0xb3, - 0xb5, 0xf1, 0xb4, 0xd7, 0xbc, 0x50, 0xf6, 0x7e, 0xca, 0x8e, 0x5c, 0xea, 0x6b, 0x77, 0xb6, 0x36, - 0x30, 0x67, 0x56, 0xff, 0xa2, 0xc0, 0x42, 0xe6, 0x90, 0x27, 0xd0, 0x02, 0xb6, 0xb3, 0x2d, 0xe0, - 0x95, 0x21, 0x42, 0x56, 0x52, 0xfb, 0x0f, 0x14, 0x38, 0x37, 0xb0, 0x2c, 0x2a, 0xac, 0x59, 0xdf, - 0x81, 0xb9, 0xc0, 0xce, 0x2e, 0xbf, 0x61, 0xa5, 0x2f, 0xf2, 0x15, 0xeb, 0x4e, 0x16, 0x85, 0xf3, - 0xb4, 0xfc, 0xba, 0xb5, 0x70, 0x2c, 0x65, 0xd1, 0x7b, 0xf9, 0x97, 0xe7, 0x8b, 0x95, 0xaf, 0xdc, - 0x03, 0x9e, 0x9b, 0xb3, 0xcf, 0xc2, 0xb5, 0x4a, 0xcf, 0xc2, 0x9f, 0xd6, 0x60, 0xb1, 0x20, 0xfb, - 0xd1, 0x87, 0x00, 0xc9, 0xd6, 0x55, 0x10, 0xec, 0x02, 0x23, 0x8f, 0x3d, 0x2a, 0xcd, 0x8a, 0xf7, - 0xe0, 0x04, 0x9a, 0x92, 0x88, 0x7c, 0x98, 0xf2, 0xa8, 0x4f, 0xbd, 0x43, 0xda, 0x79, 0xd7, 0xf1, - 0x64, 0xc8, 0xbf, 0x3d, 0x44, 0xc8, 0x8f, 0x55, 0x5d, 0xb2, 0xdc, 0xe1, 0x44, 0x30, 0x4e, 0x6b, - 0x41, 0x6d, 0x38, 0xd3, 0xa1, 0x24, 0x65, 0xa6, 0x58, 0xd3, 0x68, 0x47, 0xbe, 0x21, 0x9d, 0x93, - 0x02, 0xce, 0x6c, 0x14, 0x11, 0xe1, 0x62, 0x5e, 0xf5, 0x9f, 0x0a, 0x9c, 0xc9, 0x58, 0xf6, 0x1e, - 0xed, 0xba, 0x16, 0x61, 0xf4, 0x04, 0x3a, 0xe7, 0xbd, 0x4c, 0xe7, 0x7c, 0x6b, 0x08, 0xf7, 0x45, - 0x46, 0x96, 0xbe, 0x13, 0xfc, 0x43, 0x81, 0x17, 0x0a, 0x39, 0x4e, 0xa0, 0x13, 0xbc, 0x9f, 0xed, - 0x04, 0xaf, 0x3f, 0xc7, 0xb9, 0x4a, 0x3a, 0xc2, 0xe3, 0xb2, 0x53, 0xb5, 0xc3, 0x0d, 0xeb, 0xff, - 0x6f, 0xd4, 0xa9, 0x7f, 0x50, 0x60, 0x3a, 0xa2, 0xe4, 0x37, 0x86, 0x0a, 0x3d, 0x6d, 0x15, 0x40, - 0xfe, 0x40, 0x16, 0xbd, 0x9f, 0xd5, 0x13, 0xbb, 0xaf, 0xc6, 0x18, 0x9c, 0xa2, 0x42, 0xd7, 0x00, - 0x45, 0x16, 0xb6, 0x2d, 0xb1, 0xfb, 0xf3, 0x1b, 0x58, 0x5d, 0xf0, 0x2e, 0x4b, 0x5e, 0x84, 0x8f, - 0x51, 0xe0, 0x02, 0x2e, 0xf5, 0xaf, 0x4a, 0xb2, 0x64, 0x08, 0xf0, 0x97, 0xd5, 0xf3, 0xc2, 0xb8, - 0x52, 0xcf, 0xa7, 0x87, 0xa4, 0xa0, 0xfc, 0xd2, 0x0e, 0x49, 0x61, 0x5d, 0x49, 0x49, 0x3c, 0xac, - 0xe7, 0x4e, 0x21, 0x4a, 0xa1, 0xea, 0x65, 0xee, 0xba, 0xbc, 0xba, 0x86, 0x6e, 0x7d, 0xb9, 0x9a, - 0x39, 0x3c, 0x4d, 0x0b, 0xaf, 0xb9, 0x97, 0x60, 0xc2, 0x76, 0x3a, 0x54, 0x3c, 0x66, 0xe4, 0x56, - 0xa1, 0x9b, 0x12, 0x8e, 0x63, 0x8a, 0x63, 0x3f, 0xaf, 0x8e, 0x7c, 0x41, 0x3f, 0xaf, 0xf2, 0xf5, - 0xcd, 0x92, 0x5b, 0xfd, 0xa8, 0x98, 0x0c, 0xc9, 0xfa, 0x26, 0xe1, 0x38, 0xa6, 0x40, 0xb7, 0x92, - 0x59, 0x3e, 0x26, 0x62, 0xf2, 0xb5, 0x2a, 0xb3, 0xbc, 0x7c, 0x8c, 0xeb, 0xfa, 0xa3, 0x27, 0x8d, - 0x53, 0x8f, 0x9f, 0x34, 0x4e, 0x7d, 0xf6, 0xa4, 0x71, 0xea, 0x41, 0xbf, 0xa1, 0x3c, 0xea, 0x37, - 0x94, 0xc7, 0xfd, 0x86, 0xf2, 0x59, 0xbf, 0xa1, 0x7c, 0xde, 0x6f, 0x28, 0x9f, 0xfc, 0xbb, 0x71, - 0xea, 0xfb, 0x2f, 0x0e, 0xfa, 0x2f, 0x82, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x23, 0x3d, 0xa6, - 0x20, 0x64, 0x20, 0x00, 0x00, + // 1832 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x19, 0xcd, 0x6f, 0xe4, 0x56, + 0x3d, 0x1e, 0x27, 0x93, 0xc9, 0x6f, 0xf2, 0xb5, 0x6f, 0xa1, 0x64, 0x43, 0x99, 0xd9, 0x75, 0x11, + 0x64, 0xdb, 0xad, 0xa7, 0xbb, 0x2d, 0x6d, 0xa1, 0x1c, 0x88, 0x93, 0x74, 0x95, 0xd5, 0x7e, 0x64, + 0x5f, 0xda, 0x15, 0x0b, 0xa5, 0xf0, 0xe2, 0x79, 0x99, 0x98, 0x78, 0x6c, 0xd7, 0xef, 0x79, 0x68, + 0x2e, 0xa8, 0xe2, 0xc2, 0x6d, 0xc5, 0x3f, 0x80, 0xb8, 0x21, 0x71, 0x82, 0x3f, 0x00, 0x09, 0x24, + 0x90, 0x58, 0x89, 0xcb, 0x4a, 0x70, 0xe8, 0x69, 0xe8, 0x0e, 0xe2, 0xc2, 0x9f, 0x90, 0x13, 0xf2, + 0xf3, 0xf3, 0xe7, 0x8c, 0xb3, 0x9e, 0xaa, 0x44, 0xed, 0x6d, 0xfc, 0xfb, 0xfe, 0xfe, 0xfd, 0xec, + 0x81, 0x6b, 0xc7, 0x6f, 0x32, 0xdd, 0x72, 0x3b, 0xc4, 0xb3, 0x3a, 0x3e, 0x65, 0x6e, 0xe0, 0x9b, + 0xb4, 0x33, 0xb8, 0x4e, 0x6c, 0xef, 0x88, 0xbc, 0xda, 0xe9, 0x51, 0x87, 0xfa, 0x84, 0xd3, 0xae, + 0xee, 0xf9, 0x2e, 0x77, 0xd1, 0xf3, 0x11, 0xb5, 0x4e, 0x3c, 0x4b, 0x8f, 0xa9, 0xf5, 0x98, 0x7a, + 0xfd, 0xe5, 0x9e, 0xc5, 0x8f, 0x82, 0x03, 0xdd, 0x74, 0xfb, 0x9d, 0x9e, 0xdb, 0x73, 0x3b, 0x82, + 0xe9, 0x20, 0x38, 0x14, 0x4f, 0xe2, 0x41, 0xfc, 0x8a, 0x84, 0xad, 0x6b, 0x19, 0xd5, 0xa6, 0xeb, + 0x87, 0x6a, 0x8b, 0x0a, 0xd7, 0x5f, 0x4b, 0x69, 0xfa, 0xc4, 0x3c, 0xb2, 0x1c, 0xea, 0x9f, 0x74, + 0xbc, 0xe3, 0x5e, 0xde, 0xde, 0x69, 0xb8, 0x58, 0xa7, 0x4f, 0x39, 0x99, 0xa4, 0xab, 0x53, 0xc6, + 0xe5, 0x07, 0x0e, 0xb7, 0xfa, 0xe3, 0x6a, 0x5e, 0x7f, 0x16, 0x03, 0x33, 0x8f, 0x68, 0x9f, 0x14, + 0xf9, 0xb4, 0xbf, 0x2b, 0xb0, 0xba, 0x69, 0xdb, 0xae, 0x49, 0xb8, 0xe5, 0x3a, 0x98, 0xb2, 0xc0, + 0xe6, 0xe8, 0xc7, 0x30, 0xdf, 0xa5, 0x03, 0xcb, 0xa4, 0x6c, 0x4d, 0xb9, 0xac, 0x6c, 0x34, 0x6f, + 0xbc, 0xa6, 0x9f, 0x15, 0x6c, 0x7d, 0x5b, 0x10, 0x17, 0xc5, 0x18, 0x2b, 0x8f, 0x87, 0xed, 0x99, + 0xd1, 0xb0, 0x3d, 0x1f, 0xe1, 0x19, 0x8e, 0xa5, 0xa2, 0x07, 0xb0, 0xe8, 0xb8, 0x5d, 0xba, 0x4f, + 0x6d, 0x6a, 0x72, 0xd7, 0x5f, 0x53, 0x85, 0x96, 0xcb, 0x59, 0x2d, 0x61, 0x16, 0xf4, 0xc1, 0x75, + 0xfd, 0x6e, 0x86, 0xce, 0x58, 0x1d, 0x0d, 0xdb, 0x8b, 0x59, 0x08, 0xce, 0xc9, 0xd1, 0x3e, 0x51, + 0xa1, 0x69, 0x10, 0x66, 0x99, 0x91, 0x46, 0xf4, 0x73, 0x00, 0xc2, 0xb9, 0x6f, 0x1d, 0x04, 0x5c, + 0xf8, 0xa2, 0x6e, 0x34, 0x6f, 0x7c, 0xfb, 0x6c, 0x5f, 0x32, 0xec, 0xfa, 0x66, 0xc2, 0xbb, 0xe3, + 0x70, 0xff, 0xc4, 0x78, 0x41, 0x3a, 0x04, 0x29, 0xe2, 0x17, 0xff, 0x6a, 0x2f, 0xdd, 0x0f, 0x88, + 0x6d, 0x1d, 0x5a, 0xb4, 0x7b, 0x97, 0xf4, 0x29, 0xce, 0x68, 0x44, 0x03, 0x68, 0x98, 0xc4, 0x23, + 0xa6, 0xc5, 0x4f, 0xd6, 0x6a, 0x42, 0xfb, 0x1b, 0xd5, 0xb5, 0x6f, 0x49, 0xce, 0x48, 0xf7, 0x15, + 0xa9, 0xbb, 0x11, 0x83, 0xc7, 0x35, 0x27, 0xba, 0xd6, 0x6d, 0x58, 0x29, 0xd8, 0x8e, 0x56, 0x41, + 0x3d, 0xa6, 0x27, 0x22, 0x9f, 0x0b, 0x38, 0xfc, 0x89, 0xb6, 0x60, 0x6e, 0x40, 0xec, 0x80, 0xae, + 0xd5, 0x44, 0xf4, 0x5f, 0xae, 0x94, 0xe3, 0x58, 0x2a, 0x8e, 0x78, 0xbf, 0x53, 0x7b, 0x53, 0x59, + 0x3f, 0x86, 0xa5, 0x9c, 0xad, 0x13, 0x74, 0x6d, 0xe7, 0x75, 0xe9, 0x19, 0x5d, 0x49, 0xb9, 0xea, + 0xde, 0x71, 0x2f, 0xaf, 0xfc, 0x7e, 0x40, 0x1c, 0x6e, 0xf1, 0x93, 0x8c, 0x32, 0xed, 0x26, 0x5c, + 0xd8, 0xda, 0xb9, 0x1d, 0x59, 0x13, 0xe7, 0x1d, 0xdd, 0x00, 0xa0, 0x1f, 0x7a, 0x3e, 0x65, 0xcc, + 0x72, 0x9d, 0x48, 0xaf, 0x81, 0xe2, 0x64, 0xed, 0x24, 0x18, 0x9c, 0xa1, 0xd2, 0x06, 0x50, 0x97, + 0x55, 0x72, 0x19, 0x66, 0x1d, 0xd2, 0xa7, 0x92, 0x6f, 0x51, 0xf2, 0xcd, 0x8a, 0x98, 0x0a, 0x0c, + 0xba, 0x05, 0x73, 0x07, 0x61, 0x66, 0xa4, 0xf9, 0x57, 0x2b, 0x27, 0xd1, 0x58, 0x18, 0x0d, 0xdb, + 0x73, 0x02, 0x80, 0x23, 0x11, 0xda, 0xa3, 0x1a, 0x7c, 0xad, 0xd8, 0x30, 0x5b, 0xae, 0x73, 0x68, + 0xf5, 0x02, 0x5f, 0x3c, 0xa0, 0xef, 0x41, 0x3d, 0x12, 0x29, 0x2d, 0xda, 0x90, 0x16, 0xd5, 0xf7, + 0x05, 0xf4, 0x74, 0xd8, 0x7e, 0xae, 0xc8, 0x1a, 0x61, 0xb0, 0xe4, 0x43, 0x1b, 0xd0, 0xf0, 0xe9, + 0x07, 0x01, 0x65, 0x9c, 0x89, 0xba, 0x5b, 0x30, 0x16, 0xc3, 0xd2, 0xc1, 0x12, 0x86, 0x13, 0x2c, + 0xfa, 0x48, 0x81, 0x8b, 0x51, 0x57, 0xe6, 0x6c, 0x90, 0x1d, 0x79, 0xbd, 0x4a, 0x4d, 0xe4, 0x18, + 0x8d, 0xaf, 0x4a, 0x63, 0x2f, 0x4e, 0x40, 0xe2, 0x49, 0xaa, 0xb4, 0xff, 0x28, 0xf0, 0xdc, 0xe4, + 0x09, 0x82, 0x0e, 0x61, 0xde, 0x17, 0xbf, 0xe2, 0xe6, 0x7d, 0xab, 0x8a, 0x41, 0xd2, 0xcd, 0xf2, + 0x79, 0x14, 0x3d, 0x33, 0x1c, 0x0b, 0x47, 0x26, 0xd4, 0x4d, 0x61, 0x93, 0xec, 0xd2, 0xb7, 0xa6, + 0x9b, 0x77, 0xf9, 0x08, 0x2c, 0xc7, 0xe9, 0x8a, 0xc0, 0x58, 0x8a, 0xd6, 0x7e, 0xa7, 0xc0, 0x4a, + 0xa1, 0x8b, 0x50, 0x0b, 0x54, 0xcb, 0xe1, 0xa2, 0xac, 0xd4, 0x28, 0x47, 0xbb, 0x0e, 0x7f, 0x10, + 0x16, 0x3b, 0x0e, 0x11, 0xe8, 0x0a, 0xcc, 0x1e, 0xb8, 0xae, 0x2d, 0xd2, 0xd1, 0x30, 0x96, 0x46, + 0xc3, 0xf6, 0x82, 0xe1, 0xba, 0x76, 0x44, 0x21, 0x50, 0xe8, 0x9b, 0x50, 0x67, 0xdc, 0xb7, 0x9c, + 0xde, 0xda, 0xac, 0xa8, 0x96, 0x95, 0xd1, 0xb0, 0xdd, 0xdc, 0x17, 0x90, 0x88, 0x4c, 0xa2, 0xd1, + 0x8b, 0x30, 0x3f, 0xa0, 0xbe, 0xe8, 0x90, 0x39, 0x41, 0x29, 0xa6, 0xe9, 0x83, 0x08, 0x14, 0x91, + 0xc6, 0x04, 0xda, 0xef, 0x6b, 0xd0, 0x94, 0x09, 0xb4, 0x89, 0xd5, 0x47, 0x0f, 0x33, 0x05, 0x15, + 0x65, 0xe2, 0xa5, 0x29, 0x32, 0x61, 0xac, 0xc6, 0xc3, 0x6b, 0x42, 0x05, 0x52, 0x68, 0x9a, 0xae, + 0xc3, 0xb8, 0x4f, 0x2c, 0x47, 0x96, 0x6b, 0x7e, 0x40, 0x9c, 0x55, 0x78, 0x92, 0xcd, 0xb8, 0x28, + 0x15, 0x34, 0x53, 0x18, 0xc3, 0x59, 0xb9, 0xe8, 0xfd, 0x24, 0xc5, 0xaa, 0xd0, 0xf0, 0x7a, 0x25, + 0x0d, 0xa1, 0xf3, 0xd5, 0xb2, 0xfb, 0x37, 0x05, 0xd6, 0xca, 0x98, 0x72, 0xfd, 0xa8, 0x7c, 0xaa, + 0x7e, 0xac, 0x9d, 0x5f, 0x3f, 0xfe, 0x59, 0xc9, 0xe4, 0x9e, 0x31, 0xf4, 0x13, 0x68, 0x84, 0x67, + 0x4a, 0x97, 0x70, 0x22, 0xcf, 0x81, 0x57, 0xce, 0x1a, 0xdf, 0x4c, 0x0f, 0xa9, 0xc3, 0xd5, 0x7d, + 0xef, 0xe0, 0xa7, 0xd4, 0xe4, 0x77, 0x28, 0x27, 0xe9, 0x30, 0x4e, 0x61, 0x38, 0x91, 0x8a, 0xee, + 0xc1, 0x2c, 0xf3, 0xa8, 0x39, 0xcd, 0x22, 0x12, 0xa6, 0xed, 0x7b, 0xd4, 0x4c, 0xe7, 0x75, 0xf8, + 0x84, 0x85, 0x20, 0xed, 0xd7, 0xd9, 0x64, 0x30, 0x96, 0x4f, 0x46, 0x59, 0x88, 0x95, 0xf3, 0x0b, + 0xf1, 0x9f, 0x92, 0x51, 0x20, 0xec, 0xbb, 0x6d, 0x31, 0x8e, 0xde, 0x1b, 0x0b, 0xb3, 0x5e, 0x2d, + 0xcc, 0x21, 0xb7, 0x08, 0x72, 0xd2, 0x65, 0x31, 0x24, 0x13, 0xe2, 0xbb, 0x30, 0x67, 0x71, 0xda, + 0x8f, 0xfb, 0xeb, 0x6a, 0xe5, 0x18, 0x1b, 0x4b, 0x52, 0xea, 0xdc, 0x6e, 0xc8, 0x8f, 0x23, 0x31, + 0xda, 0x93, 0xbc, 0x07, 0x61, 0xec, 0xd1, 0x8f, 0x60, 0x81, 0xc9, 0x8d, 0x1c, 0x4f, 0x89, 0x6b, + 0x55, 0xf4, 0x24, 0xe7, 0xdd, 0x05, 0xa9, 0x6a, 0x21, 0x86, 0x30, 0x9c, 0x4a, 0xcc, 0x74, 0x70, + 0x6d, 0xaa, 0x0e, 0x2e, 0xe4, 0xbf, 0xb4, 0x83, 0x7d, 0x98, 0x94, 0x40, 0xf4, 0x43, 0xa8, 0xbb, + 0x1e, 0xf9, 0x20, 0xa0, 0x32, 0x2b, 0xcf, 0xb8, 0xe0, 0xee, 0x09, 0xda, 0x49, 0x65, 0x02, 0xa1, + 0xce, 0x08, 0x8d, 0xa5, 0x48, 0xed, 0x91, 0x02, 0xab, 0xc5, 0x61, 0x36, 0xc5, 0xb4, 0xd8, 0x83, + 0xe5, 0x3e, 0xe1, 0xe6, 0x51, 0xb2, 0x50, 0x44, 0x0b, 0x2d, 0x18, 0x1b, 0xa3, 0x61, 0x7b, 0xf9, + 0x4e, 0x0e, 0x73, 0x3a, 0x6c, 0xa3, 0xb7, 0x03, 0xdb, 0x3e, 0xc9, 0xdf, 0x8c, 0x05, 0x7e, 0xed, + 0x97, 0x2a, 0x2c, 0xe5, 0x66, 0x77, 0x85, 0xeb, 0x68, 0x13, 0x56, 0xba, 0x69, 0xb0, 0x43, 0x84, + 0x34, 0xe3, 0x2b, 0x92, 0x38, 0x5b, 0x29, 0x82, 0xaf, 0x48, 0x9f, 0x2f, 0x1d, 0xf5, 0x33, 0x2f, + 0x9d, 0x07, 0xb0, 0x4c, 0x92, 0x6d, 0x7d, 0xc7, 0xed, 0x52, 0xb9, 0x2b, 0x75, 0xc9, 0xb5, 0xbc, + 0x99, 0xc3, 0x9e, 0x0e, 0xdb, 0x5f, 0x2a, 0xee, 0xf8, 0x10, 0x8e, 0x0b, 0x52, 0xd0, 0x0b, 0x30, + 0x67, 0xba, 0x81, 0xc3, 0xc5, 0x42, 0x55, 0xd3, 0x56, 0xd9, 0x0a, 0x81, 0x38, 0xc2, 0xa1, 0x6f, + 0x41, 0x93, 0x74, 0xfb, 0x96, 0xb3, 0x69, 0x9a, 0x94, 0xb1, 0xb5, 0xba, 0x58, 0xe5, 0xc9, 0xc2, + 0xda, 0x4c, 0x51, 0x38, 0x4b, 0xa7, 0xfd, 0x51, 0x89, 0xef, 0xc4, 0x92, 0x7b, 0x06, 0x5d, 0x0d, + 0xaf, 0x23, 0x81, 0x92, 0xc9, 0xc9, 0x1c, 0x38, 0x02, 0x8c, 0x63, 0x3c, 0xfa, 0x06, 0xd4, 0xbb, + 0xbe, 0x35, 0xa0, 0xbe, 0xcc, 0x4c, 0xd2, 0x03, 0xdb, 0x02, 0x8a, 0x25, 0x36, 0x4c, 0xb6, 0x17, + 0xdf, 0x1b, 0x99, 0x64, 0xef, 0xb9, 0xae, 0x8d, 0x05, 0x46, 0x48, 0x12, 0x56, 0xc9, 0x10, 0xa6, + 0x92, 0x22, 0x5b, 0x25, 0x56, 0x7b, 0x0f, 0x96, 0x0b, 0x47, 0xfa, 0x2d, 0x50, 0x4d, 0x6a, 0xcb, + 0x2e, 0xea, 0x9c, 0x9d, 0xdd, 0xb1, 0x13, 0xdf, 0x98, 0x1f, 0x0d, 0xdb, 0xea, 0xd6, 0xce, 0x6d, + 0x1c, 0x0a, 0xd1, 0x7e, 0xab, 0xc0, 0xa5, 0xd2, 0x4e, 0xcb, 0x78, 0xab, 0x9c, 0xe9, 0x2d, 0x01, + 0xf0, 0x88, 0x4f, 0xfa, 0x94, 0x53, 0x9f, 0x4d, 0xd8, 0x3e, 0xf9, 0xa1, 0x2b, 0xdf, 0xa4, 0x75, + 0x4c, 0x7e, 0xb6, 0xf3, 0x21, 0xa7, 0x4e, 0x78, 0x28, 0xa5, 0x8b, 0x6d, 0x2f, 0x11, 0x84, 0x33, + 0x42, 0xb5, 0xdf, 0xd4, 0x60, 0x09, 0x4b, 0xf7, 0xa2, 0x53, 0xea, 0xff, 0xbf, 0x4e, 0xef, 0xe7, + 0xd6, 0xe9, 0x33, 0x22, 0x9d, 0x33, 0xae, 0x6c, 0xa1, 0xa2, 0x87, 0xe1, 0x91, 0x49, 0x78, 0xc0, + 0xaa, 0xbd, 0x18, 0xe4, 0x85, 0x0a, 0xc6, 0x34, 0x09, 0xd1, 0x33, 0x96, 0x02, 0xb5, 0x91, 0x02, + 0xad, 0x1c, 0x7d, 0x38, 0x09, 0x83, 0x3e, 0xf5, 0x31, 0x3d, 0xa4, 0x3e, 0x75, 0x4c, 0x8a, 0xae, + 0x41, 0x83, 0x78, 0xd6, 0x4d, 0xdf, 0x0d, 0x3c, 0x99, 0xd1, 0x64, 0xd5, 0x6d, 0xee, 0xed, 0x0a, + 0x38, 0x4e, 0x28, 0x42, 0xea, 0xd8, 0x22, 0x59, 0xc7, 0x99, 0xf3, 0x33, 0x82, 0xe3, 0x84, 0x22, + 0x19, 0x6f, 0xb3, 0xa5, 0xe3, 0xcd, 0x00, 0x35, 0xb0, 0xba, 0xf2, 0x66, 0x7e, 0x45, 0x12, 0xa8, + 0xef, 0xee, 0x6e, 0x9f, 0x0e, 0xdb, 0x57, 0xca, 0x3e, 0xbb, 0xf0, 0x13, 0x8f, 0x32, 0xfd, 0xdd, + 0xdd, 0x6d, 0x1c, 0x32, 0x6b, 0x7f, 0x51, 0xe0, 0x42, 0xce, 0xc9, 0x73, 0x58, 0xf9, 0x7b, 0xf9, + 0x95, 0xff, 0xd2, 0x14, 0x29, 0x2b, 0x59, 0xfa, 0x56, 0xc1, 0x09, 0xb1, 0xf5, 0xdf, 0x29, 0x7e, + 0x2c, 0xba, 0x5a, 0xf9, 0xb2, 0x2e, 0xff, 0x42, 0xa4, 0xfd, 0x57, 0x81, 0x8b, 0x13, 0xaa, 0x08, + 0xbd, 0x0f, 0x90, 0xce, 0xe0, 0x09, 0x41, 0x9b, 0xa0, 0x70, 0xec, 0x3d, 0x70, 0x59, 0x7c, 0xc2, + 0x49, 0xa1, 0x19, 0x89, 0x88, 0x41, 0xd3, 0xa7, 0x8c, 0xfa, 0x03, 0xda, 0x7d, 0xdb, 0xf5, 0x65, + 0xe8, 0xbe, 0x3b, 0x45, 0xe8, 0xc6, 0xaa, 0x37, 0x1d, 0xf5, 0x38, 0x15, 0x8c, 0xb3, 0x5a, 0xb4, + 0x7f, 0x2a, 0xf0, 0xe5, 0x9c, 0x90, 0x77, 0x68, 0xdf, 0xb3, 0x09, 0xa7, 0xe7, 0x30, 0x2c, 0x1e, + 0xe6, 0x86, 0xc5, 0x1b, 0x53, 0x78, 0x1a, 0x1b, 0x59, 0x7a, 0x85, 0xff, 0x43, 0x81, 0x4b, 0x13, + 0x39, 0xce, 0xa1, 0xf8, 0xbf, 0x9f, 0x2f, 0xfe, 0x57, 0x3f, 0x85, 0x5f, 0xe5, 0x97, 0xef, 0xa5, + 0xd2, 0x38, 0x7c, 0x21, 0xa7, 0xbb, 0xf6, 0x07, 0x05, 0x16, 0x63, 0xca, 0x70, 0xd5, 0x57, 0xb8, + 0xf9, 0x6e, 0x00, 0xc8, 0x4f, 0xc9, 0xf1, 0xdb, 0xa9, 0x9a, 0xda, 0x7d, 0x33, 0xc1, 0xe0, 0x0c, + 0x15, 0xba, 0x05, 0x28, 0xb6, 0x70, 0xdf, 0x16, 0x4b, 0x3b, 0x3c, 0x9d, 0x54, 0xc1, 0xbb, 0x2e, + 0x79, 0x11, 0x1e, 0xa3, 0xc0, 0x13, 0xb8, 0xb4, 0xbf, 0x2a, 0xe9, 0x5e, 0x15, 0xe0, 0xcf, 0x6b, + 0xe4, 0x85, 0x71, 0xa5, 0x91, 0xcf, 0xee, 0x05, 0x41, 0xf9, 0xb9, 0xdd, 0x0b, 0xc2, 0xba, 0x92, + 0x96, 0x78, 0xa4, 0x16, 0xbc, 0x10, 0xad, 0x50, 0xf5, 0x0a, 0xbb, 0x2d, 0x6f, 0xce, 0x28, 0xac, + 0x2f, 0x56, 0x33, 0x27, 0x2c, 0xd3, 0x89, 0xf7, 0xe9, 0x35, 0x68, 0x38, 0x6e, 0x97, 0x8a, 0xb7, + 0x90, 0xc2, 0xf6, 0xbf, 0x2b, 0xe1, 0x38, 0xa1, 0x18, 0xfb, 0x23, 0x62, 0xf6, 0xb3, 0xf9, 0x23, + 0x42, 0x5c, 0x2c, 0xb6, 0x1d, 0x12, 0x30, 0x71, 0x38, 0x34, 0x32, 0x17, 0x8b, 0x84, 0xe3, 0x84, + 0x02, 0xdd, 0x4b, 0x57, 0x68, 0x5d, 0xe4, 0xe4, 0xeb, 0x55, 0x56, 0x68, 0xf9, 0xf6, 0x34, 0x8c, + 0xc7, 0x4f, 0x5b, 0x33, 0x4f, 0x9e, 0xb6, 0x66, 0x3e, 0x7e, 0xda, 0x9a, 0xf9, 0x68, 0xd4, 0x52, + 0x1e, 0x8f, 0x5a, 0xca, 0x93, 0x51, 0x4b, 0xf9, 0x78, 0xd4, 0x52, 0x3e, 0x19, 0xb5, 0x94, 0x5f, + 0xfd, 0xbb, 0x35, 0xf3, 0x83, 0xe7, 0xcf, 0xfa, 0xbf, 0xed, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0xaa, 0x59, 0xd2, 0xe3, 0x8e, 0x1b, 0x00, 0x00, } func (m *AllocationResult) Marshal() (dAtA []byte, err error) { @@ -1256,11 +1095,6 @@ func (m *AllocationResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - i -= len(m.Controller) - copy(dAtA[i:], m.Controller) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller))) - i-- - dAtA[i] = 0x22 if m.NodeSelector != nil { { size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) @@ -1835,18 +1669,6 @@ func (m *DeviceClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.SuitableNodes != nil { - { - size, err := m.SuitableNodes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } if len(m.Config) > 0 { for iNdEx := len(m.Config) - 1; iNdEx >= 0; iNdEx-- { { @@ -2131,180 +1953,6 @@ func (m *OpaqueDeviceConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, erro return len(dAtA) - i, nil } -func (m *PodSchedulingContext) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContext) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSchedulingContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodSchedulingContextList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContextList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSchedulingContextList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodSchedulingContextSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContextSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSchedulingContextSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PotentialNodes) > 0 { - for iNdEx := len(m.PotentialNodes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PotentialNodes[iNdEx]) - copy(dAtA[i:], m.PotentialNodes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PotentialNodes[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.SelectedNode) - copy(dAtA[i:], m.SelectedNode) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelectedNode))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodSchedulingContextStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSchedulingContextStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSchedulingContextStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourceClaims) > 0 { - for iNdEx := len(m.ResourceClaims) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceClaims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - func (m *ResourceClaim) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2448,43 +2096,6 @@ func (m *ResourceClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ResourceClaimSchedulingStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceClaimSchedulingStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceClaimSchedulingStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.UnsuitableNodes) > 0 { - for iNdEx := len(m.UnsuitableNodes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.UnsuitableNodes[iNdEx]) - copy(dAtA[i:], m.UnsuitableNodes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UnsuitableNodes[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *ResourceClaimSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2505,11 +2116,6 @@ func (m *ResourceClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - i -= len(m.Controller) - copy(dAtA[i:], m.Controller) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller))) - i-- - dAtA[i] = 0x12 { size, err := m.Devices.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -2543,14 +2149,6 @@ func (m *ResourceClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - i-- - if m.DeallocationRequested { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 if len(m.ReservedFor) > 0 { for iNdEx := len(m.ReservedFor) - 1; iNdEx >= 0; iNdEx-- { { @@ -2937,8 +2535,6 @@ func (m *AllocationResult) Size() (n int) { l = m.NodeSelector.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.Controller) - n += 1 + l + sovGenerated(uint64(l)) return n } @@ -3161,10 +2757,6 @@ func (m *DeviceClassSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - if m.SuitableNodes != nil { - l = m.SuitableNodes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } @@ -3266,70 +2858,6 @@ func (m *OpaqueDeviceConfiguration) Size() (n int) { return n } -func (m *PodSchedulingContext) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodSchedulingContextList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSchedulingContextSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.SelectedNode) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.PotentialNodes) > 0 { - for _, s := range m.PotentialNodes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSchedulingContextStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ResourceClaims) > 0 { - for _, e := range m.ResourceClaims { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *ResourceClaim) Size() (n int) { if m == nil { return 0 @@ -3379,23 +2907,6 @@ func (m *ResourceClaimList) Size() (n int) { return n } -func (m *ResourceClaimSchedulingStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.UnsuitableNodes) > 0 { - for _, s := range m.UnsuitableNodes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *ResourceClaimSpec) Size() (n int) { if m == nil { return 0 @@ -3404,8 +2915,6 @@ func (m *ResourceClaimSpec) Size() (n int) { _ = l l = m.Devices.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Controller) - n += 1 + l + sovGenerated(uint64(l)) return n } @@ -3425,7 +2934,6 @@ func (m *ResourceClaimStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - n += 2 return n } @@ -3554,7 +3062,6 @@ func (this *AllocationResult) String() string { s := strings.Join([]string{`&AllocationResult{`, `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceAllocationResult", "DeviceAllocationResult", 1), `&`, ``, 1) + `,`, `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v1.NodeSelector", 1) + `,`, - `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`, `}`, }, "") return s @@ -3749,7 +3256,6 @@ func (this *DeviceClassSpec) String() string { s := strings.Join([]string{`&DeviceClassSpec{`, `Selectors:` + repeatedStringForSelectors + `,`, `Config:` + repeatedStringForConfig + `,`, - `SuitableNodes:` + strings.Replace(fmt.Sprintf("%v", this.SuitableNodes), "NodeSelector", "v1.NodeSelector", 1) + `,`, `}`, }, "") return s @@ -3829,60 +3335,6 @@ func (this *OpaqueDeviceConfiguration) String() string { }, "") return s } -func (this *PodSchedulingContext) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSchedulingContext{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSchedulingContextSpec", "PodSchedulingContextSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSchedulingContextStatus", "PodSchedulingContextStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSchedulingContextList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodSchedulingContext{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSchedulingContext", "PodSchedulingContext", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodSchedulingContextList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodSchedulingContextSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSchedulingContextSpec{`, - `SelectedNode:` + fmt.Sprintf("%v", this.SelectedNode) + `,`, - `PotentialNodes:` + fmt.Sprintf("%v", this.PotentialNodes) + `,`, - `}`, - }, "") - return s -} -func (this *PodSchedulingContextStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForResourceClaims := "[]ResourceClaimSchedulingStatus{" - for _, f := range this.ResourceClaims { - repeatedStringForResourceClaims += strings.Replace(strings.Replace(f.String(), "ResourceClaimSchedulingStatus", "ResourceClaimSchedulingStatus", 1), `&`, ``, 1) + "," - } - repeatedStringForResourceClaims += "}" - s := strings.Join([]string{`&PodSchedulingContextStatus{`, - `ResourceClaims:` + repeatedStringForResourceClaims + `,`, - `}`, - }, "") - return s -} func (this *ResourceClaim) String() string { if this == nil { return "nil" @@ -3924,24 +3376,12 @@ func (this *ResourceClaimList) String() string { }, "") return s } -func (this *ResourceClaimSchedulingStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceClaimSchedulingStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UnsuitableNodes:` + fmt.Sprintf("%v", this.UnsuitableNodes) + `,`, - `}`, - }, "") - return s -} func (this *ResourceClaimSpec) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&ResourceClaimSpec{`, `Devices:` + strings.Replace(strings.Replace(this.Devices.String(), "DeviceClaim", "DeviceClaim", 1), `&`, ``, 1) + `,`, - `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`, `}`, }, "") return s @@ -3958,7 +3398,6 @@ func (this *ResourceClaimStatus) String() string { s := strings.Join([]string{`&ResourceClaimStatus{`, `Allocation:` + strings.Replace(this.Allocation.String(), "AllocationResult", "AllocationResult", 1) + `,`, `ReservedFor:` + repeatedStringForReservedFor + `,`, - `DeallocationRequested:` + fmt.Sprintf("%v", this.DeallocationRequested) + `,`, `}`, }, "") return s @@ -4166,38 +3605,6 @@ func (m *AllocationResult) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Controller = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5829,42 +5236,6 @@ func (m *DeviceClassSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SuitableNodes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SuitableNodes == nil { - m.SuitableNodes = &v1.NodeSelector{} - } - if err := m.SuitableNodes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6685,470 +6056,6 @@ func (m *OpaqueDeviceConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *PodSchedulingContext) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContext: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContext: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSchedulingContextList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodSchedulingContext{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSchedulingContextSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelectedNode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SelectedNode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PotentialNodes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PotentialNodes = append(m.PotentialNodes, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSchedulingContextStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSchedulingContextStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSchedulingContextStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceClaims", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceClaims = append(m.ResourceClaims, ResourceClaimSchedulingStatus{}) - if err := m.ResourceClaims[len(m.ResourceClaims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ResourceClaim) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -7593,120 +6500,6 @@ func (m *ResourceClaimList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ResourceClaimSchedulingStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceClaimSchedulingStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceClaimSchedulingStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UnsuitableNodes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UnsuitableNodes = append(m.UnsuitableNodes, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -7769,38 +6562,6 @@ func (m *ResourceClaimSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Controller = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7921,26 +6682,6 @@ func (m *ResourceClaimStatus) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeallocationRequested", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DeallocationRequested = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) diff --git a/staging/src/k8s.io/api/resource/v1alpha3/generated.proto b/staging/src/k8s.io/api/resource/v1alpha3/generated.proto index f77d6059527..ce2a297e4cd 100644 --- a/staging/src/k8s.io/api/resource/v1alpha3/generated.proto +++ b/staging/src/k8s.io/api/resource/v1alpha3/generated.proto @@ -42,22 +42,6 @@ message AllocationResult { // // +optional optional .k8s.io.api.core.v1.NodeSelector nodeSelector = 3; - - // Controller is the name of the DRA driver which handled the - // allocation. That driver is also responsible for deallocating the - // claim. It is empty when the claim can be deallocated without - // involving a driver. - // - // A driver may allocate devices provided by other drivers, so this - // driver name here can be different from the driver names listed for - // the results. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - optional string controller = 4; } // BasicDevice defines one device instance. @@ -309,22 +293,6 @@ message DeviceClassSpec { // +optional // +listType=atomic repeated DeviceClassConfiguration config = 2; - - // Only nodes matching the selector will be considered by the scheduler - // when trying to find a Node that fits a Pod when that Pod uses - // a claim that has not been allocated yet *and* that claim - // gets allocated through a control plane controller. It is ignored - // when the claim does not use a control plane controller - // for allocation. - // - // Setting this field is optional. If unset, all Nodes are candidates. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - optional .k8s.io.api.core.v1.NodeSelector suitableNodes = 3; } // DeviceConfiguration must have exactly one field set. It gets embedded @@ -516,69 +484,6 @@ message OpaqueDeviceConfiguration { optional .k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2; } -// PodSchedulingContext objects hold information that is needed to schedule -// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation -// mode. -// -// This is an alpha type and requires enabling the DRAControlPlaneController -// feature gate. -message PodSchedulingContext { - // Standard object metadata - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec describes where resources for the Pod are needed. - optional PodSchedulingContextSpec spec = 2; - - // Status describes where resources for the Pod can be allocated. - // - // +optional - optional PodSchedulingContextStatus status = 3; -} - -// PodSchedulingContextList is a collection of Pod scheduling objects. -message PodSchedulingContextList { - // Standard list metadata - // +optional - optional .k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of PodSchedulingContext objects. - repeated PodSchedulingContext items = 2; -} - -// PodSchedulingContextSpec describes where resources for the Pod are needed. -message PodSchedulingContextSpec { - // SelectedNode is the node for which allocation of ResourceClaims that - // are referenced by the Pod and that use "WaitForFirstConsumer" - // allocation is to be attempted. - // - // +optional - optional string selectedNode = 1; - - // PotentialNodes lists nodes where the Pod might be able to run. - // - // The size of this field is limited to 128. This is large enough for - // many clusters. Larger clusters may need more attempts to find a node - // that suits all pending resources. This may get increased in the - // future, but not reduced. - // - // +optional - // +listType=atomic - repeated string potentialNodes = 2; -} - -// PodSchedulingContextStatus describes where resources for the Pod can be allocated. -message PodSchedulingContextStatus { - // ResourceClaims describes resource availability for each - // pod.spec.resourceClaim entry where the corresponding ResourceClaim - // uses "WaitForFirstConsumer" allocation mode. - // - // +listType=map - // +listMapKey=name - // +optional - repeated ResourceClaimSchedulingStatus resourceClaims = 1; -} - // ResourceClaim describes a request for access to resources in the cluster, // for use by workloads. For example, if a workload needs an accelerator device // with specific properties, this is how that request is expressed. The status @@ -634,46 +539,12 @@ message ResourceClaimList { repeated ResourceClaim items = 2; } -// ResourceClaimSchedulingStatus contains information about one particular -// ResourceClaim with "WaitForFirstConsumer" allocation mode. -message ResourceClaimSchedulingStatus { - // Name matches the pod.spec.resourceClaims[*].Name field. - // - // +required - optional string name = 1; - - // UnsuitableNodes lists nodes that the ResourceClaim cannot be - // allocated for. - // - // The size of this field is limited to 128, the same as for - // PodSchedulingSpec.PotentialNodes. This may get increased in the - // future, but not reduced. - // - // +optional - // +listType=atomic - repeated string unsuitableNodes = 2; -} - // ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it. message ResourceClaimSpec { // Devices defines how to request devices. // // +optional optional DeviceClaim devices = 1; - - // Controller is the name of the DRA driver that is meant - // to handle allocation of this claim. If empty, allocation is handled - // by the scheduler while scheduling a pod. - // - // Must be a DNS subdomain and should end with a DNS domain owned by the - // vendor of the driver. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - optional string controller = 2; } // ResourceClaimStatus tracks whether the resource has been allocated and what @@ -710,20 +581,6 @@ message ResourceClaimStatus { // +patchStrategy=merge // +patchMergeKey=uid repeated ResourceClaimConsumerReference reservedFor = 2; - - // Indicates that a claim is to be deallocated. While this is set, - // no new consumers may be added to ReservedFor. - // - // This is only used if the claim needs to be deallocated by a DRA driver. - // That driver then must deallocate this claim and reset the field - // together with clearing the Allocation field. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - optional bool deallocationRequested = 3; } // ResourceClaimTemplate is used to produce ResourceClaim objects. diff --git a/staging/src/k8s.io/api/resource/v1alpha3/register.go b/staging/src/k8s.io/api/resource/v1alpha3/register.go index 74044e8cf0e..8573758e319 100644 --- a/staging/src/k8s.io/api/resource/v1alpha3/register.go +++ b/staging/src/k8s.io/api/resource/v1alpha3/register.go @@ -50,8 +50,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ResourceClaimList{}, &ResourceClaimTemplate{}, &ResourceClaimTemplateList{}, - &PodSchedulingContext{}, - &PodSchedulingContextList{}, &ResourceSlice{}, &ResourceSliceList{}, ) diff --git a/staging/src/k8s.io/api/resource/v1alpha3/types.go b/staging/src/k8s.io/api/resource/v1alpha3/types.go index cc4de1d6302..f4947e0786e 100644 --- a/staging/src/k8s.io/api/resource/v1alpha3/types.go +++ b/staging/src/k8s.io/api/resource/v1alpha3/types.go @@ -330,19 +330,10 @@ type ResourceClaimSpec struct { // +optional Devices DeviceClaim `json:"devices" protobuf:"bytes,1,name=devices"` - // Controller is the name of the DRA driver that is meant - // to handle allocation of this claim. If empty, allocation is handled - // by the scheduler while scheduling a pod. - // - // Must be a DNS subdomain and should end with a DNS domain owned by the - // vendor of the driver. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - Controller string `json:"controller,omitempty" protobuf:"bytes,2,opt,name=controller"` + // Controller is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // Controller string `json:"controller,omitempty" protobuf:"bytes,2,opt,name=controller"` } // DeviceClaim defines how to request devices with a ResourceClaim. @@ -658,19 +649,10 @@ type ResourceClaimStatus struct { // +patchMergeKey=uid ReservedFor []ResourceClaimConsumerReference `json:"reservedFor,omitempty" protobuf:"bytes,2,opt,name=reservedFor" patchStrategy:"merge" patchMergeKey:"uid"` - // Indicates that a claim is to be deallocated. While this is set, - // no new consumers may be added to ReservedFor. - // - // This is only used if the claim needs to be deallocated by a DRA driver. - // That driver then must deallocate this claim and reset the field - // together with clearing the Allocation field. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"bytes,3,opt,name=deallocationRequested"` + // DeallocationRequested is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // DeallocationRequested bool `json:"deallocationRequested,omitempty" protobuf:"bytes,3,opt,name=deallocationRequested"` } // ReservedForMaxSize is the maximum number of entries in @@ -710,21 +692,10 @@ type AllocationResult struct { // +optional NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,3,opt,name=nodeSelector"` - // Controller is the name of the DRA driver which handled the - // allocation. That driver is also responsible for deallocating the - // claim. It is empty when the claim can be deallocated without - // involving a driver. - // - // A driver may allocate devices provided by other drivers, so this - // driver name here can be different from the driver names listed for - // the results. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - Controller string `json:"controller,omitempty" protobuf:"bytes,4,opt,name=controller"` + // Controller is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // Controller string `json:"controller,omitempty" protobuf:"bytes,4,opt,name=controller"` } // DeviceAllocationResult is the result of allocating devices. @@ -828,107 +799,6 @@ type ResourceClaimList struct { Items []ResourceClaim `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.31 - -// PodSchedulingContext objects hold information that is needed to schedule -// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation -// mode. -// -// This is an alpha type and requires enabling the DRAControlPlaneController -// feature gate. -type PodSchedulingContext struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec describes where resources for the Pod are needed. - Spec PodSchedulingContextSpec `json:"spec" protobuf:"bytes,2,name=spec"` - - // Status describes where resources for the Pod can be allocated. - // - // +optional - Status PodSchedulingContextStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PodSchedulingContextSpec describes where resources for the Pod are needed. -type PodSchedulingContextSpec struct { - // SelectedNode is the node for which allocation of ResourceClaims that - // are referenced by the Pod and that use "WaitForFirstConsumer" - // allocation is to be attempted. - // - // +optional - SelectedNode string `json:"selectedNode,omitempty" protobuf:"bytes,1,opt,name=selectedNode"` - - // PotentialNodes lists nodes where the Pod might be able to run. - // - // The size of this field is limited to 128. This is large enough for - // many clusters. Larger clusters may need more attempts to find a node - // that suits all pending resources. This may get increased in the - // future, but not reduced. - // - // +optional - // +listType=atomic - PotentialNodes []string `json:"potentialNodes,omitempty" protobuf:"bytes,2,opt,name=potentialNodes"` -} - -// PodSchedulingContextStatus describes where resources for the Pod can be allocated. -type PodSchedulingContextStatus struct { - // ResourceClaims describes resource availability for each - // pod.spec.resourceClaim entry where the corresponding ResourceClaim - // uses "WaitForFirstConsumer" allocation mode. - // - // +listType=map - // +listMapKey=name - // +optional - ResourceClaims []ResourceClaimSchedulingStatus `json:"resourceClaims,omitempty" protobuf:"bytes,1,opt,name=resourceClaims"` - - // If there ever is a need to support other kinds of resources - // than ResourceClaim, then new fields could get added here - // for those other resources. -} - -// ResourceClaimSchedulingStatus contains information about one particular -// ResourceClaim with "WaitForFirstConsumer" allocation mode. -type ResourceClaimSchedulingStatus struct { - // Name matches the pod.spec.resourceClaims[*].Name field. - // - // +required - Name string `json:"name" protobuf:"bytes,1,name=name"` - - // UnsuitableNodes lists nodes that the ResourceClaim cannot be - // allocated for. - // - // The size of this field is limited to 128, the same as for - // PodSchedulingSpec.PotentialNodes. This may get increased in the - // future, but not reduced. - // - // +optional - // +listType=atomic - UnsuitableNodes []string `json:"unsuitableNodes,omitempty" protobuf:"bytes,2,opt,name=unsuitableNodes"` -} - -// PodSchedulingNodeListMaxSize defines the maximum number of entries in the -// node lists that are stored in PodSchedulingContext objects. This limit is part -// of the API. -const PodSchedulingNodeListMaxSize = 128 - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.31 - -// PodSchedulingContextList is a collection of Pod scheduling objects. -type PodSchedulingContextList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of PodSchedulingContext objects. - Items []PodSchedulingContext `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -977,21 +847,10 @@ type DeviceClassSpec struct { // +listType=atomic Config []DeviceClassConfiguration `json:"config,omitempty" protobuf:"bytes,2,opt,name=config"` - // Only nodes matching the selector will be considered by the scheduler - // when trying to find a Node that fits a Pod when that Pod uses - // a claim that has not been allocated yet *and* that claim - // gets allocated through a control plane controller. It is ignored - // when the claim does not use a control plane controller - // for allocation. - // - // Setting this field is optional. If unset, all Nodes are candidates. - // - // This is an alpha field and requires enabling the DRAControlPlaneController - // feature gate. - // - // +optional - // +featureGate=DRAControlPlaneController - SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"` + // SuitableNodes is tombstoned since Kubernetes 1.32 where + // it got removed. May be reused once decoding v1alpha3 is no longer + // supported. + // SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,3,opt,name=suitableNodes"` } // DeviceClassConfiguration is used in DeviceClass. diff --git a/staging/src/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go b/staging/src/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go index 5fa01514f21..ee5d1e1f682 100644 --- a/staging/src/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/resource/v1alpha3/types_swagger_doc_generated.go @@ -31,7 +31,6 @@ var map_AllocationResult = map[string]string{ "": "AllocationResult contains attributes of an allocated resource.", "devices": "Devices is the result of allocating devices.", "nodeSelector": "NodeSelector defines where the allocated resources are available. If unset, they are available everywhere.", - "controller": "Controller is the name of the DRA driver which handled the allocation. That driver is also responsible for deallocating the claim. It is empty when the claim can be deallocated without involving a driver.\n\nA driver may allocate devices provided by other drivers, so this driver name here can be different from the driver names listed for the results.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", } func (AllocationResult) SwaggerDoc() map[string]string { @@ -148,10 +147,9 @@ func (DeviceClassList) SwaggerDoc() map[string]string { } var map_DeviceClassSpec = map[string]string{ - "": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.", - "selectors": "Each selector must be satisfied by a device which is claimed via this class.", - "config": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.", - "suitableNodes": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a claim that has not been allocated yet *and* that claim gets allocated through a control plane controller. It is ignored when the claim does not use a control plane controller for allocation.\n\nSetting this field is optional. If unset, all Nodes are candidates.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", + "": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.", + "selectors": "Each selector must be satisfied by a device which is claimed via this class.", + "config": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.", } func (DeviceClassSpec) SwaggerDoc() map[string]string { @@ -222,46 +220,6 @@ func (OpaqueDeviceConfiguration) SwaggerDoc() map[string]string { return map_OpaqueDeviceConfiguration } -var map_PodSchedulingContext = map[string]string{ - "": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DRAControlPlaneController feature gate.", - "metadata": "Standard object metadata", - "spec": "Spec describes where resources for the Pod are needed.", - "status": "Status describes where resources for the Pod can be allocated.", -} - -func (PodSchedulingContext) SwaggerDoc() map[string]string { - return map_PodSchedulingContext -} - -var map_PodSchedulingContextList = map[string]string{ - "": "PodSchedulingContextList is a collection of Pod scheduling objects.", - "metadata": "Standard list metadata", - "items": "Items is the list of PodSchedulingContext objects.", -} - -func (PodSchedulingContextList) SwaggerDoc() map[string]string { - return map_PodSchedulingContextList -} - -var map_PodSchedulingContextSpec = map[string]string{ - "": "PodSchedulingContextSpec describes where resources for the Pod are needed.", - "selectedNode": "SelectedNode is the node for which allocation of ResourceClaims that are referenced by the Pod and that use \"WaitForFirstConsumer\" allocation is to be attempted.", - "potentialNodes": "PotentialNodes lists nodes where the Pod might be able to run.\n\nThe size of this field is limited to 128. This is large enough for many clusters. Larger clusters may need more attempts to find a node that suits all pending resources. This may get increased in the future, but not reduced.", -} - -func (PodSchedulingContextSpec) SwaggerDoc() map[string]string { - return map_PodSchedulingContextSpec -} - -var map_PodSchedulingContextStatus = map[string]string{ - "": "PodSchedulingContextStatus describes where resources for the Pod can be allocated.", - "resourceClaims": "ResourceClaims describes resource availability for each pod.spec.resourceClaim entry where the corresponding ResourceClaim uses \"WaitForFirstConsumer\" allocation mode.", -} - -func (PodSchedulingContextStatus) SwaggerDoc() map[string]string { - return map_PodSchedulingContextStatus -} - var map_ResourceClaim = map[string]string{ "": "ResourceClaim describes a request for access to resources in the cluster, for use by workloads. For example, if a workload needs an accelerator device with specific properties, this is how that request is expressed. The status stanza tracks whether this claim has been satisfied and what specific resources have been allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.", "metadata": "Standard object metadata", @@ -295,20 +253,9 @@ func (ResourceClaimList) SwaggerDoc() map[string]string { return map_ResourceClaimList } -var map_ResourceClaimSchedulingStatus = map[string]string{ - "": "ResourceClaimSchedulingStatus contains information about one particular ResourceClaim with \"WaitForFirstConsumer\" allocation mode.", - "name": "Name matches the pod.spec.resourceClaims[*].Name field.", - "unsuitableNodes": "UnsuitableNodes lists nodes that the ResourceClaim cannot be allocated for.\n\nThe size of this field is limited to 128, the same as for PodSchedulingSpec.PotentialNodes. This may get increased in the future, but not reduced.", -} - -func (ResourceClaimSchedulingStatus) SwaggerDoc() map[string]string { - return map_ResourceClaimSchedulingStatus -} - var map_ResourceClaimSpec = map[string]string{ - "": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.", - "devices": "Devices defines how to request devices.", - "controller": "Controller is the name of the DRA driver that is meant to handle allocation of this claim. If empty, allocation is handled by the scheduler while scheduling a pod.\n\nMust be a DNS subdomain and should end with a DNS domain owned by the vendor of the driver.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", + "": "ResourceClaimSpec defines what is being requested in a ResourceClaim and how to configure it.", + "devices": "Devices defines how to request devices.", } func (ResourceClaimSpec) SwaggerDoc() map[string]string { @@ -316,10 +263,9 @@ func (ResourceClaimSpec) SwaggerDoc() map[string]string { } var map_ResourceClaimStatus = map[string]string{ - "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", - "allocation": "Allocation is set once the claim has been allocated successfully.", - "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", - "deallocationRequested": "Indicates that a claim is to be deallocated. While this is set, no new consumers may be added to ReservedFor.\n\nThis is only used if the claim needs to be deallocated by a DRA driver. That driver then must deallocate this claim and reset the field together with clearing the Allocation field.\n\nThis is an alpha field and requires enabling the DRAControlPlaneController feature gate.", + "": "ResourceClaimStatus tracks whether the resource has been allocated and what the result of that was.", + "allocation": "Allocation is set once the claim has been allocated successfully.", + "reservedFor": "ReservedFor indicates which entities are currently allowed to use the claim. A Pod which references a ResourceClaim which is not reserved for that Pod will not be started. A claim that is in use or might be in use because it has been reserved must not get deallocated.\n\nIn a cluster with multiple scheduler instances, two pods might get scheduled concurrently by different schedulers. When they reference the same ResourceClaim which already has reached its maximum number of consumers, only one pod can be scheduled.\n\nBoth schedulers try to add their pod to the claim.status.reservedFor field, but only the update that reaches the API server first gets stored. The other one fails with an error and the scheduler which issued it knows that it must put the pod back into the queue, waiting for the ResourceClaim to become usable again.\n\nThere can be at most 32 such reservations. This may get increased in the future, but not reduced.", } func (ResourceClaimStatus) SwaggerDoc() map[string]string { diff --git a/staging/src/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go b/staging/src/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go index 58171df1f2d..b951d76aa04 100644 --- a/staging/src/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/resource/v1alpha3/zz_generated.deepcopy.go @@ -355,11 +355,6 @@ func (in *DeviceClassSpec) DeepCopyInto(out *DeviceClassSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.SuitableNodes != nil { - in, out := &in.SuitableNodes, &out.SuitableNodes - *out = new(v1.NodeSelector) - (*in).DeepCopyInto(*out) - } return } @@ -497,111 +492,6 @@ func (in *OpaqueDeviceConfiguration) DeepCopy() *OpaqueDeviceConfiguration { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContext. -func (in *PodSchedulingContext) DeepCopy() *PodSchedulingContext { - if in == nil { - return nil - } - out := new(PodSchedulingContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSchedulingContext) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextList) DeepCopyInto(out *PodSchedulingContextList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSchedulingContext, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextList. -func (in *PodSchedulingContextList) DeepCopy() *PodSchedulingContextList { - if in == nil { - return nil - } - out := new(PodSchedulingContextList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSchedulingContextList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextSpec) DeepCopyInto(out *PodSchedulingContextSpec) { - *out = *in - if in.PotentialNodes != nil { - in, out := &in.PotentialNodes, &out.PotentialNodes - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextSpec. -func (in *PodSchedulingContextSpec) DeepCopy() *PodSchedulingContextSpec { - if in == nil { - return nil - } - out := new(PodSchedulingContextSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSchedulingContextStatus) DeepCopyInto(out *PodSchedulingContextStatus) { - *out = *in - if in.ResourceClaims != nil { - in, out := &in.ResourceClaims, &out.ResourceClaims - *out = make([]ResourceClaimSchedulingStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSchedulingContextStatus. -func (in *PodSchedulingContextStatus) DeepCopy() *PodSchedulingContextStatus { - if in == nil { - return nil - } - out := new(PodSchedulingContextStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceClaim) DeepCopyInto(out *ResourceClaim) { *out = *in @@ -679,27 +569,6 @@ func (in *ResourceClaimList) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceClaimSchedulingStatus) DeepCopyInto(out *ResourceClaimSchedulingStatus) { - *out = *in - if in.UnsuitableNodes != nil { - in, out := &in.UnsuitableNodes, &out.UnsuitableNodes - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimSchedulingStatus. -func (in *ResourceClaimSchedulingStatus) DeepCopy() *ResourceClaimSchedulingStatus { - if in == nil { - return nil - } - out := new(ResourceClaimSchedulingStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceClaimSpec) DeepCopyInto(out *ResourceClaimSpec) { *out = *in diff --git a/staging/src/k8s.io/api/resource/v1alpha3/zz_generated.prerelease-lifecycle.go b/staging/src/k8s.io/api/resource/v1alpha3/zz_generated.prerelease-lifecycle.go index f309342be71..e49e054caba 100644 --- a/staging/src/k8s.io/api/resource/v1alpha3/zz_generated.prerelease-lifecycle.go +++ b/staging/src/k8s.io/api/resource/v1alpha3/zz_generated.prerelease-lifecycle.go @@ -57,42 +57,6 @@ func (in *DeviceClassList) APILifecycleRemoved() (major, minor int) { return 1, 37 } -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSchedulingContext) APILifecycleIntroduced() (major, minor int) { - return 1, 31 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSchedulingContext) APILifecycleDeprecated() (major, minor int) { - return 1, 34 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSchedulingContext) APILifecycleRemoved() (major, minor int) { - return 1, 37 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *PodSchedulingContextList) APILifecycleIntroduced() (major, minor int) { - return 1, 31 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *PodSchedulingContextList) APILifecycleDeprecated() (major, minor int) { - return 1, 34 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *PodSchedulingContextList) APILifecycleRemoved() (major, minor int) { - return 1, 37 -} - // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *ResourceClaim) APILifecycleIntroduced() (major, minor int) { diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.DeviceClass.json b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.DeviceClass.json index 1b683f8015d..cc837baa173 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.DeviceClass.json +++ b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.DeviceClass.json @@ -67,30 +67,6 @@ } } } - ], - "suitableNodes": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ], - "matchFields": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ] - } - ] - } + ] } } \ No newline at end of file diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.DeviceClass.pb b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.DeviceClass.pb index 3952de283d2c1604a464c64ab35b938bcfcc6882..1ed35bed01c1d546665efe8befae054f32c19619 100644 GIT binary patch delta 23 fcmeyvvVvuT9@7`*jRq-J7HS{as@Q<^Hon_rMxlvt8q1m;R{ Rmw`l!A#y~jlVVU}0062uBjx}A diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.DeviceClass.yaml b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.DeviceClass.yaml index 634f2e0407a..9f786dc430e 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.DeviceClass.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.DeviceClass.yaml @@ -46,15 +46,3 @@ spec: selectors: - cel: expression: expressionValue - suitableNodes: - nodeSelectorTerms: - - matchExpressions: - - key: keyValue - operator: operatorValue - values: - - valuesValue - matchFields: - - key: keyValue - operator: operatorValue - values: - - valuesValue diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.ResourceClaim.json b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.ResourceClaim.json index 8f838eb64a0..131328e42d4 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.ResourceClaim.json +++ b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.ResourceClaim.json @@ -89,8 +89,7 @@ } } ] - }, - "controller": "controllerValue" + } }, "status": { "allocation": { @@ -148,8 +147,7 @@ ] } ] - }, - "controller": "controllerValue" + } }, "reservedFor": [ { @@ -158,7 +156,6 @@ "name": "nameValue", "uid": "uidValue" } - ], - "deallocationRequested": true + ] } } \ No newline at end of file diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.ResourceClaim.pb b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.ResourceClaim.pb index dc1cc2a907aa46a6376b6286b41bbd80e4a950b3..7bac0d15d7acc8ea9ed29a1f510c7d8e88349303 100644 GIT binary patch delta 39 vcmZ3$@rQka0pqcahN+B<&6}q)vM{nr-D2Wew0R?wGUMc@%%&_-3`z_D1s@A= delta 71 zcmeyvzJOzb0pqKUhN+BwR!UbCS^t* SB_z4Y`n8z`}g6RSK<^_x?jEuiF&u2_!WSp{@m6?T6RER%0Kd+=HKPM-(C@e9jG*ya0 Gi2(qdHxFU} diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.ResourceClaimTemplate.yaml b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.ResourceClaimTemplate.yaml index 79f9482f3e5..b0e5939dd5f 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.ResourceClaimTemplate.yaml +++ b/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.ResourceClaimTemplate.yaml @@ -66,7 +66,6 @@ spec: selfLink: selfLinkValue uid: uidValue spec: - controller: controllerValue devices: config: - opaque: diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.PodSchedulingContext.json b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.DeviceClass.after_roundtrip.json similarity index 71% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.PodSchedulingContext.json rename to staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.DeviceClass.after_roundtrip.json index 2b46b32ab5a..cc837baa173 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.PodSchedulingContext.json +++ b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.DeviceClass.after_roundtrip.json @@ -1,5 +1,5 @@ { - "kind": "PodSchedulingContext", + "kind": "DeviceClass", "apiVersion": "resource.k8s.io/v1alpha3", "metadata": { "name": "nameValue", @@ -44,18 +44,28 @@ ] }, "spec": { - "selectedNode": "selectedNodeValue", - "potentialNodes": [ - "potentialNodesValue" - ] - }, - "status": { - "resourceClaims": [ + "selectors": [ { - "name": "nameValue", - "unsuitableNodes": [ - "unsuitableNodesValue" - ] + "cel": { + "expression": "expressionValue" + } + } + ], + "config": [ + { + "opaque": { + "driver": "driverValue", + "parameters": { + "apiVersion": "example.com/v1", + "kind": "CustomType", + "spec": { + "replicas": 1 + }, + "status": { + "available": 1 + } + } + } } ] } diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.PodSchedulingContext.pb b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.DeviceClass.after_roundtrip.pb similarity index 61% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.PodSchedulingContext.pb rename to staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.DeviceClass.after_roundtrip.pb index 745c7d5f6397a0ded2cdb4a77be90af0efadffa0..1ed35bed01c1d546665efe8befae054f32c19619 100644 GIT binary patch delta 206 zcmXBNF$%&!5CzZ#t;Rx(2x2*njYPyIq}3yc{kR!$apNvK*{Fzk1J7aW$!t7;E4J_d z&wqEG-T9%A1g0?dEH-$>d4ycsdAlIv_O#4;Yi_mTl_8Pr^C~#KKT;UN0Q!7(aVFXx zw`2lmCkP`(F=B_bri delta 148 zcmZ3%@}Aiu+oG6(%YaLwD784hv?w`M4=A9QnXg}Fn3z+Lk!UO=5|Ez~oSc!GQks*Q zm+qXOSCU#$BD86vekx;-2A5!QYEEi$NotB;eoAUsVoqtQkZ?hMNorn6W?~LVtQagP frOc(s#hI6w3soahnpa$!S(2EPlZvE>L5TqXp(8Ru diff --git a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.PodSchedulingContext.yaml b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.DeviceClass.after_roundtrip.yaml similarity index 75% rename from staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.PodSchedulingContext.yaml rename to staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.DeviceClass.after_roundtrip.yaml index ae8c1aa8a63..9f786dc430e 100644 --- a/staging/src/k8s.io/api/testdata/HEAD/resource.k8s.io.v1alpha3.PodSchedulingContext.yaml +++ b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.DeviceClass.after_roundtrip.yaml @@ -1,5 +1,5 @@ apiVersion: resource.k8s.io/v1alpha3 -kind: PodSchedulingContext +kind: DeviceClass metadata: annotations: annotationsKey: annotationsValue @@ -33,11 +33,16 @@ metadata: selfLink: selfLinkValue uid: uidValue spec: - potentialNodes: - - potentialNodesValue - selectedNode: selectedNodeValue -status: - resourceClaims: - - name: nameValue - unsuitableNodes: - - unsuitableNodesValue + config: + - opaque: + driver: driverValue + parameters: + apiVersion: example.com/v1 + kind: CustomType + spec: + replicas: 1 + status: + available: 1 + selectors: + - cel: + expression: expressionValue diff --git a/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaim.after_roundtrip.json b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaim.after_roundtrip.json new file mode 100644 index 00000000000..131328e42d4 --- /dev/null +++ b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaim.after_roundtrip.json @@ -0,0 +1,161 @@ +{ + "kind": "ResourceClaim", + "apiVersion": "resource.k8s.io/v1alpha3", + "metadata": { + "name": "nameValue", + "generateName": "generateNameValue", + "namespace": "namespaceValue", + "selfLink": "selfLinkValue", + "uid": "uidValue", + "resourceVersion": "resourceVersionValue", + "generation": 7, + "creationTimestamp": "2008-01-01T01:01:01Z", + "deletionTimestamp": "2009-01-01T01:01:01Z", + "deletionGracePeriodSeconds": 10, + "labels": { + "labelsKey": "labelsValue" + }, + "annotations": { + "annotationsKey": "annotationsValue" + }, + "ownerReferences": [ + { + "apiVersion": "apiVersionValue", + "kind": "kindValue", + "name": "nameValue", + "uid": "uidValue", + "controller": true, + "blockOwnerDeletion": true + } + ], + "finalizers": [ + "finalizersValue" + ], + "managedFields": [ + { + "manager": "managerValue", + "operation": "operationValue", + "apiVersion": "apiVersionValue", + "time": "2004-01-01T01:01:01Z", + "fieldsType": "fieldsTypeValue", + "fieldsV1": {}, + "subresource": "subresourceValue" + } + ] + }, + "spec": { + "devices": { + "requests": [ + { + "name": "nameValue", + "deviceClassName": "deviceClassNameValue", + "selectors": [ + { + "cel": { + "expression": "expressionValue" + } + } + ], + "allocationMode": "allocationModeValue", + "count": 5, + "adminAccess": true + } + ], + "constraints": [ + { + "requests": [ + "requestsValue" + ], + "matchAttribute": "matchAttributeValue" + } + ], + "config": [ + { + "requests": [ + "requestsValue" + ], + "opaque": { + "driver": "driverValue", + "parameters": { + "apiVersion": "example.com/v1", + "kind": "CustomType", + "spec": { + "replicas": 1 + }, + "status": { + "available": 1 + } + } + } + } + ] + } + }, + "status": { + "allocation": { + "devices": { + "results": [ + { + "request": "requestValue", + "driver": "driverValue", + "pool": "poolValue", + "device": "deviceValue" + } + ], + "config": [ + { + "source": "sourceValue", + "requests": [ + "requestsValue" + ], + "opaque": { + "driver": "driverValue", + "parameters": { + "apiVersion": "example.com/v1", + "kind": "CustomType", + "spec": { + "replicas": 1 + }, + "status": { + "available": 1 + } + } + } + } + ] + }, + "nodeSelector": { + "nodeSelectorTerms": [ + { + "matchExpressions": [ + { + "key": "keyValue", + "operator": "operatorValue", + "values": [ + "valuesValue" + ] + } + ], + "matchFields": [ + { + "key": "keyValue", + "operator": "operatorValue", + "values": [ + "valuesValue" + ] + } + ] + } + ] + } + }, + "reservedFor": [ + { + "apiGroup": "apiGroupValue", + "resource": "resourceValue", + "name": "nameValue", + "uid": "uidValue" + } + ] + } +} \ No newline at end of file diff --git a/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaim.after_roundtrip.pb b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaim.after_roundtrip.pb new file mode 100644 index 0000000000000000000000000000000000000000..7bac0d15d7acc8ea9ed29a1f510c7d8e88349303 GIT binary patch literal 1020 zcmc&zJ#Q2-5WQW3ayOUz$X<$QOA8^0(2?YXbh?hDK;kosPzpNl?j$kiT^oD%l;ac$ z35kN1UjQvXft~`CKR|^jsQCxrweO2mbadl42j@z)jdxs70Y-><;_7%f#)` z4(0Nc_JYRYG_%hsOM}m=@ZE=6MpHbZyue`n1T$1L$Hy}$Y?fIqY3xc!LnA+a$g-im zu~jWtVx!%S*{esWG!vOc`>T7^>c#t?pH|wH`zHGM>x+pF;MpeBIE|3&2RIJuF0?ly z0!^A_BB!}&V$xrf6Ae==vY)uNIu6WR9qo@yPljB@tU5VU>XdJ(Lp zG@~b|ED$t>EcMUmhnL$Bqv6NJ?P|ihcDLoR2d+Ns3Z7;Y64TUcX)VUKAbW)nf zr+4#Qv8c#hW%$OsTvmXFdZO59ra6IVLKb$B0l_nxN{-!Fq-J->EmE3o@clyPA}tjg zOiPT(U_um1&SI*`VCUSRW^RRvO~(;sX09B|#CiD5hYvn{^`Hl9Q{K+dqrq^v;92+i7?HTYC7PG#$bLT!-oq$5U2rdxTr2us$m1Kb_9M*7cxaW^qr6LfWRG Rk-DUuEVg~|_reOP`~isET`T|q literal 0 HcmV?d00001 diff --git a/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaim.after_roundtrip.yaml b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaim.after_roundtrip.yaml new file mode 100644 index 00000000000..26f136171bb --- /dev/null +++ b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaim.after_roundtrip.yaml @@ -0,0 +1,99 @@ +apiVersion: resource.k8s.io/v1alpha3 +kind: ResourceClaim +metadata: + annotations: + annotationsKey: annotationsValue + creationTimestamp: "2008-01-01T01:01:01Z" + deletionGracePeriodSeconds: 10 + deletionTimestamp: "2009-01-01T01:01:01Z" + finalizers: + - finalizersValue + generateName: generateNameValue + generation: 7 + labels: + labelsKey: labelsValue + managedFields: + - apiVersion: apiVersionValue + fieldsType: fieldsTypeValue + fieldsV1: {} + manager: managerValue + operation: operationValue + subresource: subresourceValue + time: "2004-01-01T01:01:01Z" + name: nameValue + namespace: namespaceValue + ownerReferences: + - apiVersion: apiVersionValue + blockOwnerDeletion: true + controller: true + kind: kindValue + name: nameValue + uid: uidValue + resourceVersion: resourceVersionValue + selfLink: selfLinkValue + uid: uidValue +spec: + devices: + config: + - opaque: + driver: driverValue + parameters: + apiVersion: example.com/v1 + kind: CustomType + spec: + replicas: 1 + status: + available: 1 + requests: + - requestsValue + constraints: + - matchAttribute: matchAttributeValue + requests: + - requestsValue + requests: + - adminAccess: true + allocationMode: allocationModeValue + count: 5 + deviceClassName: deviceClassNameValue + name: nameValue + selectors: + - cel: + expression: expressionValue +status: + allocation: + devices: + config: + - opaque: + driver: driverValue + parameters: + apiVersion: example.com/v1 + kind: CustomType + spec: + replicas: 1 + status: + available: 1 + requests: + - requestsValue + source: sourceValue + results: + - device: deviceValue + driver: driverValue + pool: poolValue + request: requestValue + nodeSelector: + nodeSelectorTerms: + - matchExpressions: + - key: keyValue + operator: operatorValue + values: + - valuesValue + matchFields: + - key: keyValue + operator: operatorValue + values: + - valuesValue + reservedFor: + - apiGroup: apiGroupValue + name: nameValue + resource: resourceValue + uid: uidValue diff --git a/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaimTemplate.after_roundtrip.json b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaimTemplate.after_roundtrip.json new file mode 100644 index 00000000000..0669af6df46 --- /dev/null +++ b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaimTemplate.after_roundtrip.json @@ -0,0 +1,138 @@ +{ + "kind": "ResourceClaimTemplate", + "apiVersion": "resource.k8s.io/v1alpha3", + "metadata": { + "name": "nameValue", + "generateName": "generateNameValue", + "namespace": "namespaceValue", + "selfLink": "selfLinkValue", + "uid": "uidValue", + "resourceVersion": "resourceVersionValue", + "generation": 7, + "creationTimestamp": "2008-01-01T01:01:01Z", + "deletionTimestamp": "2009-01-01T01:01:01Z", + "deletionGracePeriodSeconds": 10, + "labels": { + "labelsKey": "labelsValue" + }, + "annotations": { + "annotationsKey": "annotationsValue" + }, + "ownerReferences": [ + { + "apiVersion": "apiVersionValue", + "kind": "kindValue", + "name": "nameValue", + "uid": "uidValue", + "controller": true, + "blockOwnerDeletion": true + } + ], + "finalizers": [ + "finalizersValue" + ], + "managedFields": [ + { + "manager": "managerValue", + "operation": "operationValue", + "apiVersion": "apiVersionValue", + "time": "2004-01-01T01:01:01Z", + "fieldsType": "fieldsTypeValue", + "fieldsV1": {}, + "subresource": "subresourceValue" + } + ] + }, + "spec": { + "metadata": { + "name": "nameValue", + "generateName": "generateNameValue", + "namespace": "namespaceValue", + "selfLink": "selfLinkValue", + "uid": "uidValue", + "resourceVersion": "resourceVersionValue", + "generation": 7, + "creationTimestamp": "2008-01-01T01:01:01Z", + "deletionTimestamp": "2009-01-01T01:01:01Z", + "deletionGracePeriodSeconds": 10, + "labels": { + "labelsKey": "labelsValue" + }, + "annotations": { + "annotationsKey": "annotationsValue" + }, + "ownerReferences": [ + { + "apiVersion": "apiVersionValue", + "kind": "kindValue", + "name": "nameValue", + "uid": "uidValue", + "controller": true, + "blockOwnerDeletion": true + } + ], + "finalizers": [ + "finalizersValue" + ], + "managedFields": [ + { + "manager": "managerValue", + "operation": "operationValue", + "apiVersion": "apiVersionValue", + "time": "2004-01-01T01:01:01Z", + "fieldsType": "fieldsTypeValue", + "fieldsV1": {}, + "subresource": "subresourceValue" + } + ] + }, + "spec": { + "devices": { + "requests": [ + { + "name": "nameValue", + "deviceClassName": "deviceClassNameValue", + "selectors": [ + { + "cel": { + "expression": "expressionValue" + } + } + ], + "allocationMode": "allocationModeValue", + "count": 5, + "adminAccess": true + } + ], + "constraints": [ + { + "requests": [ + "requestsValue" + ], + "matchAttribute": "matchAttributeValue" + } + ], + "config": [ + { + "requests": [ + "requestsValue" + ], + "opaque": { + "driver": "driverValue", + "parameters": { + "apiVersion": "example.com/v1", + "kind": "CustomType", + "spec": { + "replicas": 1 + }, + "status": { + "available": 1 + } + } + } + } + ] + } + } + } +} \ No newline at end of file diff --git a/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaimTemplate.after_roundtrip.pb b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaimTemplate.after_roundtrip.pb new file mode 100644 index 0000000000000000000000000000000000000000..fab18a8e9e5f8cabc3e1b71334ff69803a07d138 GIT binary patch literal 1037 zcmeHG&2AGh5Z+CLnz%GcH$|%UXmLOl3QZAGlv~>y|3Xy{+-7&Dt%=ub{1dfAkt$W< zz*}(cBlHcBDqaC`K;p(5fY(cY)H^qBpT8N8$KQN28t8!Z$(BN0Se0ULG|)X(JR0|b z%Tst9-Fx#la=?M*hnPza2BWX5|@H++wtGf&!Hby_`{TtBh(1X_Z3PHS*)< zOpN@E?S^HUN4uM|*N;$XRtV1yR`(l?o6kS4>z&}KLs!4PIP{vl-6Bm633B}m&!e>p z`Wp!)EfAtGU|bO&?kt^s&bue1Ibx!EI9~Km=iO|#L9*pWcEUlZ+Q&=)XCGXHzRssG zA$M~SaDvJcQLB*U|7Y#Tx64rXhwnGPS31G2D~3GNhv%|VW>_b+$#lP?Z8BHFi~6(v zU*&(V@@0)&)X1x4H*aP*W+^()YrQaZmu!$Wo=G>PxlL&oI4@E^_!mW14fMgCoiMsj z))l_Ds7+;=sGEaHPxp*bEU~7_j6a6A(=5?sEmLefb5=%@30;_y4k@02+e7T7Mea70 xKBT3k9e-f8De}@+j?)rTI-F32lCu;v9rmY=b=zSbb`{6K-1cxR1JgL5!5?0EXo&y- literal 0 HcmV?d00001 diff --git a/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaimTemplate.after_roundtrip.yaml b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaimTemplate.after_roundtrip.yaml new file mode 100644 index 00000000000..b0e5939dd5f --- /dev/null +++ b/staging/src/k8s.io/api/testdata/v1.31.0/resource.k8s.io.v1alpha3.ResourceClaimTemplate.after_roundtrip.yaml @@ -0,0 +1,94 @@ +apiVersion: resource.k8s.io/v1alpha3 +kind: ResourceClaimTemplate +metadata: + annotations: + annotationsKey: annotationsValue + creationTimestamp: "2008-01-01T01:01:01Z" + deletionGracePeriodSeconds: 10 + deletionTimestamp: "2009-01-01T01:01:01Z" + finalizers: + - finalizersValue + generateName: generateNameValue + generation: 7 + labels: + labelsKey: labelsValue + managedFields: + - apiVersion: apiVersionValue + fieldsType: fieldsTypeValue + fieldsV1: {} + manager: managerValue + operation: operationValue + subresource: subresourceValue + time: "2004-01-01T01:01:01Z" + name: nameValue + namespace: namespaceValue + ownerReferences: + - apiVersion: apiVersionValue + blockOwnerDeletion: true + controller: true + kind: kindValue + name: nameValue + uid: uidValue + resourceVersion: resourceVersionValue + selfLink: selfLinkValue + uid: uidValue +spec: + metadata: + annotations: + annotationsKey: annotationsValue + creationTimestamp: "2008-01-01T01:01:01Z" + deletionGracePeriodSeconds: 10 + deletionTimestamp: "2009-01-01T01:01:01Z" + finalizers: + - finalizersValue + generateName: generateNameValue + generation: 7 + labels: + labelsKey: labelsValue + managedFields: + - apiVersion: apiVersionValue + fieldsType: fieldsTypeValue + fieldsV1: {} + manager: managerValue + operation: operationValue + subresource: subresourceValue + time: "2004-01-01T01:01:01Z" + name: nameValue + namespace: namespaceValue + ownerReferences: + - apiVersion: apiVersionValue + blockOwnerDeletion: true + controller: true + kind: kindValue + name: nameValue + uid: uidValue + resourceVersion: resourceVersionValue + selfLink: selfLinkValue + uid: uidValue + spec: + devices: + config: + - opaque: + driver: driverValue + parameters: + apiVersion: example.com/v1 + kind: CustomType + spec: + replicas: 1 + status: + available: 1 + requests: + - requestsValue + constraints: + - matchAttribute: matchAttributeValue + requests: + - requestsValue + requests: + - adminAccess: true + allocationMode: allocationModeValue + count: 5 + deviceClassName: deviceClassNameValue + name: nameValue + selectors: + - cel: + expression: expressionValue diff --git a/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go b/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go index a6976924e55..c5c674fcd53 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -12247,9 +12247,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.resource.v1alpha3.AllocationResult map: fields: - - name: controller - type: - scalar: string - name: devices type: namedType: io.k8s.api.resource.v1alpha3.DeviceAllocationResult @@ -12404,9 +12401,6 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: io.k8s.api.resource.v1alpha3.DeviceSelector elementRelationship: atomic - - name: suitableNodes - type: - namedType: io.k8s.api.core.v1.NodeSelector - name: io.k8s.api.resource.v1alpha3.DeviceConstraint map: fields: @@ -12481,50 +12475,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: parameters type: namedType: __untyped_atomic_ -- name: io.k8s.api.resource.v1alpha3.PodSchedulingContext - map: - fields: - - name: apiVersion - type: - scalar: string - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec - default: {} - - name: status - type: - namedType: io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus - default: {} -- name: io.k8s.api.resource.v1alpha3.PodSchedulingContextSpec - map: - fields: - - name: potentialNodes - type: - list: - elementType: - scalar: string - elementRelationship: atomic - - name: selectedNode - type: - scalar: string -- name: io.k8s.api.resource.v1alpha3.PodSchedulingContextStatus - map: - fields: - - name: resourceClaims - type: - list: - elementType: - namedType: io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus - elementRelationship: associative - keys: - - name - name: io.k8s.api.resource.v1alpha3.ResourceClaim map: fields: @@ -12564,25 +12514,9 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" -- name: io.k8s.api.resource.v1alpha3.ResourceClaimSchedulingStatus - map: - fields: - - name: name - type: - scalar: string - default: "" - - name: unsuitableNodes - type: - list: - elementType: - scalar: string - elementRelationship: atomic - name: io.k8s.api.resource.v1alpha3.ResourceClaimSpec map: fields: - - name: controller - type: - scalar: string - name: devices type: namedType: io.k8s.api.resource.v1alpha3.DeviceClaim @@ -12593,9 +12527,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: allocation type: namedType: io.k8s.api.resource.v1alpha3.AllocationResult - - name: deallocationRequested - type: - scalar: boolean - name: reservedFor type: list: diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go index 3090b2f9d35..7c7427ee970 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/allocationresult.go @@ -27,7 +27,6 @@ import ( type AllocationResultApplyConfiguration struct { Devices *DeviceAllocationResultApplyConfiguration `json:"devices,omitempty"` NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` - Controller *string `json:"controller,omitempty"` } // AllocationResultApplyConfiguration constructs a declarative configuration of the AllocationResult type for use with @@ -51,11 +50,3 @@ func (b *AllocationResultApplyConfiguration) WithNodeSelector(value *v1.NodeSele b.NodeSelector = value return b } - -// WithController sets the Controller field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Controller field is set to the value of the last call. -func (b *AllocationResultApplyConfiguration) WithController(value string) *AllocationResultApplyConfiguration { - b.Controller = &value - return b -} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go index d40a43de66f..37db6a1cc93 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/deviceclassspec.go @@ -18,16 +18,11 @@ limitations under the License. package v1alpha3 -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - // DeviceClassSpecApplyConfiguration represents a declarative configuration of the DeviceClassSpec type for use // with apply. type DeviceClassSpecApplyConfiguration struct { - Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` - Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"` - SuitableNodes *v1.NodeSelectorApplyConfiguration `json:"suitableNodes,omitempty"` + Selectors []DeviceSelectorApplyConfiguration `json:"selectors,omitempty"` + Config []DeviceClassConfigurationApplyConfiguration `json:"config,omitempty"` } // DeviceClassSpecApplyConfiguration constructs a declarative configuration of the DeviceClassSpec type for use with @@ -61,11 +56,3 @@ func (b *DeviceClassSpecApplyConfiguration) WithConfig(values ...*DeviceClassCon } return b } - -// WithSuitableNodes sets the SuitableNodes field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SuitableNodes field is set to the value of the last call. -func (b *DeviceClassSpecApplyConfiguration) WithSuitableNodes(value *v1.NodeSelectorApplyConfiguration) *DeviceClassSpecApplyConfiguration { - b.SuitableNodes = value - return b -} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go deleted file mode 100644 index 8b68e06df3b..00000000000 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontext.go +++ /dev/null @@ -1,264 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha3 - -import ( - resourcev1alpha3 "k8s.io/api/resource/v1alpha3" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// PodSchedulingContextApplyConfiguration represents a declarative configuration of the PodSchedulingContext type for use -// with apply. -type PodSchedulingContextApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *PodSchedulingContextSpecApplyConfiguration `json:"spec,omitempty"` - Status *PodSchedulingContextStatusApplyConfiguration `json:"status,omitempty"` -} - -// PodSchedulingContext constructs a declarative configuration of the PodSchedulingContext type for use with -// apply. -func PodSchedulingContext(name, namespace string) *PodSchedulingContextApplyConfiguration { - b := &PodSchedulingContextApplyConfiguration{} - b.WithName(name) - b.WithNamespace(namespace) - b.WithKind("PodSchedulingContext") - b.WithAPIVersion("resource.k8s.io/v1alpha3") - return b -} - -// ExtractPodSchedulingContext extracts the applied configuration owned by fieldManager from -// podSchedulingContext. If no managedFields are found in podSchedulingContext for fieldManager, a -// PodSchedulingContextApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// podSchedulingContext must be a unmodified PodSchedulingContext API object that was retrieved from the Kubernetes API. -// ExtractPodSchedulingContext provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractPodSchedulingContext(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { - return extractPodSchedulingContext(podSchedulingContext, fieldManager, "") -} - -// ExtractPodSchedulingContextStatus is the same as ExtractPodSchedulingContext except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractPodSchedulingContextStatus(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string) (*PodSchedulingContextApplyConfiguration, error) { - return extractPodSchedulingContext(podSchedulingContext, fieldManager, "status") -} - -func extractPodSchedulingContext(podSchedulingContext *resourcev1alpha3.PodSchedulingContext, fieldManager string, subresource string) (*PodSchedulingContextApplyConfiguration, error) { - b := &PodSchedulingContextApplyConfiguration{} - err := managedfields.ExtractInto(podSchedulingContext, internal.Parser().Type("io.k8s.api.resource.v1alpha3.PodSchedulingContext"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(podSchedulingContext.Name) - b.WithNamespace(podSchedulingContext.Namespace) - - b.WithKind("PodSchedulingContext") - b.WithAPIVersion("resource.k8s.io/v1alpha3") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithKind(value string) *PodSchedulingContextApplyConfiguration { - b.TypeMetaApplyConfiguration.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithAPIVersion(value string) *PodSchedulingContextApplyConfiguration { - b.TypeMetaApplyConfiguration.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithName(value string) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ObjectMetaApplyConfiguration.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithGenerateName(value string) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ObjectMetaApplyConfiguration.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithNamespace(value string) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ObjectMetaApplyConfiguration.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithUID(value types.UID) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ObjectMetaApplyConfiguration.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithResourceVersion(value string) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ObjectMetaApplyConfiguration.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithGeneration(value int64) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ObjectMetaApplyConfiguration.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithCreationTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ObjectMetaApplyConfiguration.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ObjectMetaApplyConfiguration.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ObjectMetaApplyConfiguration.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *PodSchedulingContextApplyConfiguration) WithLabels(entries map[string]string) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.ObjectMetaApplyConfiguration.Labels == nil && len(entries) > 0 { - b.ObjectMetaApplyConfiguration.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.ObjectMetaApplyConfiguration.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *PodSchedulingContextApplyConfiguration) WithAnnotations(entries map[string]string) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.ObjectMetaApplyConfiguration.Annotations == nil && len(entries) > 0 { - b.ObjectMetaApplyConfiguration.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.ObjectMetaApplyConfiguration.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *PodSchedulingContextApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.ObjectMetaApplyConfiguration.OwnerReferences = append(b.ObjectMetaApplyConfiguration.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *PodSchedulingContextApplyConfiguration) WithFinalizers(values ...string) *PodSchedulingContextApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.ObjectMetaApplyConfiguration.Finalizers = append(b.ObjectMetaApplyConfiguration.Finalizers, values[i]) - } - return b -} - -func (b *PodSchedulingContextApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithSpec sets the Spec field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithSpec(value *PodSchedulingContextSpecApplyConfiguration) *PodSchedulingContextApplyConfiguration { - b.Spec = value - return b -} - -// WithStatus sets the Status field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Status field is set to the value of the last call. -func (b *PodSchedulingContextApplyConfiguration) WithStatus(value *PodSchedulingContextStatusApplyConfiguration) *PodSchedulingContextApplyConfiguration { - b.Status = value - return b -} - -// GetName retrieves the value of the Name field in the declarative configuration. -func (b *PodSchedulingContextApplyConfiguration) GetName() *string { - b.ensureObjectMetaApplyConfigurationExists() - return b.ObjectMetaApplyConfiguration.Name -} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go deleted file mode 100644 index fd25df7a531..00000000000 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextspec.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha3 - -// PodSchedulingContextSpecApplyConfiguration represents a declarative configuration of the PodSchedulingContextSpec type for use -// with apply. -type PodSchedulingContextSpecApplyConfiguration struct { - SelectedNode *string `json:"selectedNode,omitempty"` - PotentialNodes []string `json:"potentialNodes,omitempty"` -} - -// PodSchedulingContextSpecApplyConfiguration constructs a declarative configuration of the PodSchedulingContextSpec type for use with -// apply. -func PodSchedulingContextSpec() *PodSchedulingContextSpecApplyConfiguration { - return &PodSchedulingContextSpecApplyConfiguration{} -} - -// WithSelectedNode sets the SelectedNode field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the SelectedNode field is set to the value of the last call. -func (b *PodSchedulingContextSpecApplyConfiguration) WithSelectedNode(value string) *PodSchedulingContextSpecApplyConfiguration { - b.SelectedNode = &value - return b -} - -// WithPotentialNodes adds the given value to the PotentialNodes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the PotentialNodes field. -func (b *PodSchedulingContextSpecApplyConfiguration) WithPotentialNodes(values ...string) *PodSchedulingContextSpecApplyConfiguration { - for i := range values { - b.PotentialNodes = append(b.PotentialNodes, values[i]) - } - return b -} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go deleted file mode 100644 index a06e370cc3c..00000000000 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/podschedulingcontextstatus.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha3 - -// PodSchedulingContextStatusApplyConfiguration represents a declarative configuration of the PodSchedulingContextStatus type for use -// with apply. -type PodSchedulingContextStatusApplyConfiguration struct { - ResourceClaims []ResourceClaimSchedulingStatusApplyConfiguration `json:"resourceClaims,omitempty"` -} - -// PodSchedulingContextStatusApplyConfiguration constructs a declarative configuration of the PodSchedulingContextStatus type for use with -// apply. -func PodSchedulingContextStatus() *PodSchedulingContextStatusApplyConfiguration { - return &PodSchedulingContextStatusApplyConfiguration{} -} - -// WithResourceClaims adds the given value to the ResourceClaims field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the ResourceClaims field. -func (b *PodSchedulingContextStatusApplyConfiguration) WithResourceClaims(values ...*ResourceClaimSchedulingStatusApplyConfiguration) *PodSchedulingContextStatusApplyConfiguration { - for i := range values { - if values[i] == nil { - panic("nil value passed to WithResourceClaims") - } - b.ResourceClaims = append(b.ResourceClaims, *values[i]) - } - return b -} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go deleted file mode 100644 index caab89acdb4..00000000000 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimschedulingstatus.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha3 - -// ResourceClaimSchedulingStatusApplyConfiguration represents a declarative configuration of the ResourceClaimSchedulingStatus type for use -// with apply. -type ResourceClaimSchedulingStatusApplyConfiguration struct { - Name *string `json:"name,omitempty"` - UnsuitableNodes []string `json:"unsuitableNodes,omitempty"` -} - -// ResourceClaimSchedulingStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimSchedulingStatus type for use with -// apply. -func ResourceClaimSchedulingStatus() *ResourceClaimSchedulingStatusApplyConfiguration { - return &ResourceClaimSchedulingStatusApplyConfiguration{} -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ResourceClaimSchedulingStatusApplyConfiguration) WithName(value string) *ResourceClaimSchedulingStatusApplyConfiguration { - b.Name = &value - return b -} - -// WithUnsuitableNodes adds the given value to the UnsuitableNodes field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the UnsuitableNodes field. -func (b *ResourceClaimSchedulingStatusApplyConfiguration) WithUnsuitableNodes(values ...string) *ResourceClaimSchedulingStatusApplyConfiguration { - for i := range values { - b.UnsuitableNodes = append(b.UnsuitableNodes, values[i]) - } - return b -} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go index 7c5b65681df..dfe8bdb1490 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimspec.go @@ -21,8 +21,7 @@ package v1alpha3 // ResourceClaimSpecApplyConfiguration represents a declarative configuration of the ResourceClaimSpec type for use // with apply. type ResourceClaimSpecApplyConfiguration struct { - Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"` - Controller *string `json:"controller,omitempty"` + Devices *DeviceClaimApplyConfiguration `json:"devices,omitempty"` } // ResourceClaimSpecApplyConfiguration constructs a declarative configuration of the ResourceClaimSpec type for use with @@ -38,11 +37,3 @@ func (b *ResourceClaimSpecApplyConfiguration) WithDevices(value *DeviceClaimAppl b.Devices = value return b } - -// WithController sets the Controller field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Controller field is set to the value of the last call. -func (b *ResourceClaimSpecApplyConfiguration) WithController(value string) *ResourceClaimSpecApplyConfiguration { - b.Controller = &value - return b -} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go index a52af3ec366..31f92c36ca1 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/resource/v1alpha3/resourceclaimstatus.go @@ -21,9 +21,8 @@ package v1alpha3 // ResourceClaimStatusApplyConfiguration represents a declarative configuration of the ResourceClaimStatus type for use // with apply. type ResourceClaimStatusApplyConfiguration struct { - Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"` - ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"` - DeallocationRequested *bool `json:"deallocationRequested,omitempty"` + Allocation *AllocationResultApplyConfiguration `json:"allocation,omitempty"` + ReservedFor []ResourceClaimConsumerReferenceApplyConfiguration `json:"reservedFor,omitempty"` } // ResourceClaimStatusApplyConfiguration constructs a declarative configuration of the ResourceClaimStatus type for use with @@ -52,11 +51,3 @@ func (b *ResourceClaimStatusApplyConfiguration) WithReservedFor(values ...*Resou } return b } - -// WithDeallocationRequested sets the DeallocationRequested field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeallocationRequested field is set to the value of the last call. -func (b *ResourceClaimStatusApplyConfiguration) WithDeallocationRequested(value bool) *ResourceClaimStatusApplyConfiguration { - b.DeallocationRequested = &value - return b -} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/utils.go b/staging/src/k8s.io/client-go/applyconfigurations/utils.go index 0955b8f44f7..857f5bad8d2 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/utils.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/utils.go @@ -1602,18 +1602,10 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &resourcev1alpha3.DeviceSelectorApplyConfiguration{} case v1alpha3.SchemeGroupVersion.WithKind("OpaqueDeviceConfiguration"): return &resourcev1alpha3.OpaqueDeviceConfigurationApplyConfiguration{} - case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContext"): - return &resourcev1alpha3.PodSchedulingContextApplyConfiguration{} - case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContextSpec"): - return &resourcev1alpha3.PodSchedulingContextSpecApplyConfiguration{} - case v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContextStatus"): - return &resourcev1alpha3.PodSchedulingContextStatusApplyConfiguration{} case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaim"): return &resourcev1alpha3.ResourceClaimApplyConfiguration{} case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimConsumerReference"): return &resourcev1alpha3.ResourceClaimConsumerReferenceApplyConfiguration{} - case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimSchedulingStatus"): - return &resourcev1alpha3.ResourceClaimSchedulingStatusApplyConfiguration{} case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimSpec"): return &resourcev1alpha3.ResourceClaimSpecApplyConfiguration{} case v1alpha3.SchemeGroupVersion.WithKind("ResourceClaimStatus"): diff --git a/staging/src/k8s.io/client-go/informers/generic.go b/staging/src/k8s.io/client-go/informers/generic.go index 1593e988209..d548adac5a0 100644 --- a/staging/src/k8s.io/client-go/informers/generic.go +++ b/staging/src/k8s.io/client-go/informers/generic.go @@ -374,8 +374,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource // Group=resource.k8s.io, Version=v1alpha3 case v1alpha3.SchemeGroupVersion.WithResource("deviceclasses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().DeviceClasses().Informer()}, nil - case v1alpha3.SchemeGroupVersion.WithResource("podschedulingcontexts"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().PodSchedulingContexts().Informer()}, nil case v1alpha3.SchemeGroupVersion.WithResource("resourceclaims"): return &genericInformer{resource: resource.GroupResource(), informer: f.Resource().V1alpha3().ResourceClaims().Informer()}, nil case v1alpha3.SchemeGroupVersion.WithResource("resourceclaimtemplates"): diff --git a/staging/src/k8s.io/client-go/informers/resource/v1alpha3/interface.go b/staging/src/k8s.io/client-go/informers/resource/v1alpha3/interface.go index 481a7de4518..356c46179df 100644 --- a/staging/src/k8s.io/client-go/informers/resource/v1alpha3/interface.go +++ b/staging/src/k8s.io/client-go/informers/resource/v1alpha3/interface.go @@ -26,8 +26,6 @@ import ( type Interface interface { // DeviceClasses returns a DeviceClassInformer. DeviceClasses() DeviceClassInformer - // PodSchedulingContexts returns a PodSchedulingContextInformer. - PodSchedulingContexts() PodSchedulingContextInformer // ResourceClaims returns a ResourceClaimInformer. ResourceClaims() ResourceClaimInformer // ResourceClaimTemplates returns a ResourceClaimTemplateInformer. @@ -52,11 +50,6 @@ func (v *version) DeviceClasses() DeviceClassInformer { return &deviceClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } -// PodSchedulingContexts returns a PodSchedulingContextInformer. -func (v *version) PodSchedulingContexts() PodSchedulingContextInformer { - return &podSchedulingContextInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} -} - // ResourceClaims returns a ResourceClaimInformer. func (v *version) ResourceClaims() ResourceClaimInformer { return &resourceClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} diff --git a/staging/src/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go b/staging/src/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go deleted file mode 100644 index d127cc608e4..00000000000 --- a/staging/src/k8s.io/client-go/informers/resource/v1alpha3/podschedulingcontext.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha3 - -import ( - context "context" - time "time" - - apiresourcev1alpha3 "k8s.io/api/resource/v1alpha3" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - resourcev1alpha3 "k8s.io/client-go/listers/resource/v1alpha3" - cache "k8s.io/client-go/tools/cache" -) - -// PodSchedulingContextInformer provides access to a shared informer and lister for -// PodSchedulingContexts. -type PodSchedulingContextInformer interface { - Informer() cache.SharedIndexInformer - Lister() resourcev1alpha3.PodSchedulingContextLister -} - -type podSchedulingContextInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc - namespace string -} - -// NewPodSchedulingContextInformer constructs a new informer for PodSchedulingContext type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewPodSchedulingContextInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredPodSchedulingContextInformer(client, namespace, resyncPeriod, indexers, nil) -} - -// NewFilteredPodSchedulingContextInformer constructs a new informer for PodSchedulingContext type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredPodSchedulingContextInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha3().PodSchedulingContexts(namespace).List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.ResourceV1alpha3().PodSchedulingContexts(namespace).Watch(context.TODO(), options) - }, - }, - &apiresourcev1alpha3.PodSchedulingContext{}, - resyncPeriod, - indexers, - ) -} - -func (f *podSchedulingContextInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredPodSchedulingContextInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *podSchedulingContextInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&apiresourcev1alpha3.PodSchedulingContext{}, f.defaultInformer) -} - -func (f *podSchedulingContextInformer) Lister() resourcev1alpha3.PodSchedulingContextLister { - return resourcev1alpha3.NewPodSchedulingContextLister(f.Informer().GetIndexer()) -} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go deleted file mode 100644 index 9cc72342147..00000000000 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_podschedulingcontext.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - context "context" - json "encoding/json" - fmt "fmt" - - v1alpha3 "k8s.io/api/resource/v1alpha3" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - resourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" - testing "k8s.io/client-go/testing" -) - -// FakePodSchedulingContexts implements PodSchedulingContextInterface -type FakePodSchedulingContexts struct { - Fake *FakeResourceV1alpha3 - ns string -} - -var podschedulingcontextsResource = v1alpha3.SchemeGroupVersion.WithResource("podschedulingcontexts") - -var podschedulingcontextsKind = v1alpha3.SchemeGroupVersion.WithKind("PodSchedulingContext") - -// Get takes name of the podSchedulingContext, and returns the corresponding podSchedulingContext object, and an error if there is any. -func (c *FakePodSchedulingContexts) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha3.PodSchedulingContext, err error) { - emptyResult := &v1alpha3.PodSchedulingContext{} - obj, err := c.Fake. - Invokes(testing.NewGetActionWithOptions(podschedulingcontextsResource, c.ns, name, options), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.PodSchedulingContext), err -} - -// List takes label and field selectors, and returns the list of PodSchedulingContexts that match those selectors. -func (c *FakePodSchedulingContexts) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha3.PodSchedulingContextList, err error) { - emptyResult := &v1alpha3.PodSchedulingContextList{} - obj, err := c.Fake. - Invokes(testing.NewListActionWithOptions(podschedulingcontextsResource, podschedulingcontextsKind, c.ns, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha3.PodSchedulingContextList{ListMeta: obj.(*v1alpha3.PodSchedulingContextList).ListMeta} - for _, item := range obj.(*v1alpha3.PodSchedulingContextList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested podSchedulingContexts. -func (c *FakePodSchedulingContexts) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewWatchActionWithOptions(podschedulingcontextsResource, c.ns, opts)) - -} - -// Create takes the representation of a podSchedulingContext and creates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *FakePodSchedulingContexts) Create(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.CreateOptions) (result *v1alpha3.PodSchedulingContext, err error) { - emptyResult := &v1alpha3.PodSchedulingContext{} - obj, err := c.Fake. - Invokes(testing.NewCreateActionWithOptions(podschedulingcontextsResource, c.ns, podSchedulingContext, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.PodSchedulingContext), err -} - -// Update takes the representation of a podSchedulingContext and updates it. Returns the server's representation of the podSchedulingContext, and an error, if there is any. -func (c *FakePodSchedulingContexts) Update(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha3.PodSchedulingContext, err error) { - emptyResult := &v1alpha3.PodSchedulingContext{} - obj, err := c.Fake. - Invokes(testing.NewUpdateActionWithOptions(podschedulingcontextsResource, c.ns, podSchedulingContext, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.PodSchedulingContext), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakePodSchedulingContexts) UpdateStatus(ctx context.Context, podSchedulingContext *v1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (result *v1alpha3.PodSchedulingContext, err error) { - emptyResult := &v1alpha3.PodSchedulingContext{} - obj, err := c.Fake. - Invokes(testing.NewUpdateSubresourceActionWithOptions(podschedulingcontextsResource, "status", c.ns, podSchedulingContext, opts), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.PodSchedulingContext), err -} - -// Delete takes name of the podSchedulingContext and deletes it. Returns an error if one occurs. -func (c *FakePodSchedulingContexts) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewDeleteActionWithOptions(podschedulingcontextsResource, c.ns, name, opts), &v1alpha3.PodSchedulingContext{}) - - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakePodSchedulingContexts) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionActionWithOptions(podschedulingcontextsResource, c.ns, opts, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha3.PodSchedulingContextList{}) - return err -} - -// Patch applies the patch and returns the patched podSchedulingContext. -func (c *FakePodSchedulingContexts) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha3.PodSchedulingContext, err error) { - emptyResult := &v1alpha3.PodSchedulingContext{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.PodSchedulingContext), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied podSchedulingContext. -func (c *FakePodSchedulingContexts) Apply(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) { - if podSchedulingContext == nil { - return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") - } - data, err := json.Marshal(podSchedulingContext) - if err != nil { - return nil, err - } - name := podSchedulingContext.Name - if name == nil { - return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") - } - emptyResult := &v1alpha3.PodSchedulingContext{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions()), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.PodSchedulingContext), err -} - -// ApplyStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). -func (c *FakePodSchedulingContexts) ApplyStatus(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha3.PodSchedulingContext, err error) { - if podSchedulingContext == nil { - return nil, fmt.Errorf("podSchedulingContext provided to Apply must not be nil") - } - data, err := json.Marshal(podSchedulingContext) - if err != nil { - return nil, err - } - name := podSchedulingContext.Name - if name == nil { - return nil, fmt.Errorf("podSchedulingContext.Name must be provided to Apply") - } - emptyResult := &v1alpha3.PodSchedulingContext{} - obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceActionWithOptions(podschedulingcontextsResource, c.ns, *name, types.ApplyPatchType, data, opts.ToPatchOptions(), "status"), emptyResult) - - if obj == nil { - return emptyResult, err - } - return obj.(*v1alpha3.PodSchedulingContext), err -} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go index 4523d9f09cd..172266eabb1 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/fake/fake_resource_client.go @@ -32,10 +32,6 @@ func (c *FakeResourceV1alpha3) DeviceClasses() v1alpha3.DeviceClassInterface { return &FakeDeviceClasses{c} } -func (c *FakeResourceV1alpha3) PodSchedulingContexts(namespace string) v1alpha3.PodSchedulingContextInterface { - return &FakePodSchedulingContexts{c, namespace} -} - func (c *FakeResourceV1alpha3) ResourceClaims(namespace string) v1alpha3.ResourceClaimInterface { return &FakeResourceClaims{c, namespace} } diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go index 747e564b769..cd8862ea842 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/generated_expansion.go @@ -20,8 +20,6 @@ package v1alpha3 type DeviceClassExpansion interface{} -type PodSchedulingContextExpansion interface{} - type ResourceClaimExpansion interface{} type ResourceClaimTemplateExpansion interface{} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go deleted file mode 100644 index 7bfb1f934ff..00000000000 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/podschedulingcontext.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha3 - -import ( - context "context" - - resourcev1alpha3 "k8s.io/api/resource/v1alpha3" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - applyconfigurationsresourcev1alpha3 "k8s.io/client-go/applyconfigurations/resource/v1alpha3" - gentype "k8s.io/client-go/gentype" - scheme "k8s.io/client-go/kubernetes/scheme" -) - -// PodSchedulingContextsGetter has a method to return a PodSchedulingContextInterface. -// A group's client should implement this interface. -type PodSchedulingContextsGetter interface { - PodSchedulingContexts(namespace string) PodSchedulingContextInterface -} - -// PodSchedulingContextInterface has methods to work with PodSchedulingContext resources. -type PodSchedulingContextInterface interface { - Create(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContext, opts v1.CreateOptions) (*resourcev1alpha3.PodSchedulingContext, error) - Update(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (*resourcev1alpha3.PodSchedulingContext, error) - // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - UpdateStatus(ctx context.Context, podSchedulingContext *resourcev1alpha3.PodSchedulingContext, opts v1.UpdateOptions) (*resourcev1alpha3.PodSchedulingContext, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*resourcev1alpha3.PodSchedulingContext, error) - List(ctx context.Context, opts v1.ListOptions) (*resourcev1alpha3.PodSchedulingContextList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *resourcev1alpha3.PodSchedulingContext, err error) - Apply(ctx context.Context, podSchedulingContext *applyconfigurationsresourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.PodSchedulingContext, err error) - // Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). - ApplyStatus(ctx context.Context, podSchedulingContext *applyconfigurationsresourcev1alpha3.PodSchedulingContextApplyConfiguration, opts v1.ApplyOptions) (result *resourcev1alpha3.PodSchedulingContext, err error) - PodSchedulingContextExpansion -} - -// podSchedulingContexts implements PodSchedulingContextInterface -type podSchedulingContexts struct { - *gentype.ClientWithListAndApply[*resourcev1alpha3.PodSchedulingContext, *resourcev1alpha3.PodSchedulingContextList, *applyconfigurationsresourcev1alpha3.PodSchedulingContextApplyConfiguration] -} - -// newPodSchedulingContexts returns a PodSchedulingContexts -func newPodSchedulingContexts(c *ResourceV1alpha3Client, namespace string) *podSchedulingContexts { - return &podSchedulingContexts{ - gentype.NewClientWithListAndApply[*resourcev1alpha3.PodSchedulingContext, *resourcev1alpha3.PodSchedulingContextList, *applyconfigurationsresourcev1alpha3.PodSchedulingContextApplyConfiguration]( - "podschedulingcontexts", - c.RESTClient(), - scheme.ParameterCodec, - namespace, - func() *resourcev1alpha3.PodSchedulingContext { return &resourcev1alpha3.PodSchedulingContext{} }, - func() *resourcev1alpha3.PodSchedulingContextList { return &resourcev1alpha3.PodSchedulingContextList{} }), - } -} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go index 389911a6afe..46e54f9c1b8 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/resource/v1alpha3/resource_client.go @@ -29,7 +29,6 @@ import ( type ResourceV1alpha3Interface interface { RESTClient() rest.Interface DeviceClassesGetter - PodSchedulingContextsGetter ResourceClaimsGetter ResourceClaimTemplatesGetter ResourceSlicesGetter @@ -44,10 +43,6 @@ func (c *ResourceV1alpha3Client) DeviceClasses() DeviceClassInterface { return newDeviceClasses(c) } -func (c *ResourceV1alpha3Client) PodSchedulingContexts(namespace string) PodSchedulingContextInterface { - return newPodSchedulingContexts(c, namespace) -} - func (c *ResourceV1alpha3Client) ResourceClaims(namespace string) ResourceClaimInterface { return newResourceClaims(c, namespace) } diff --git a/staging/src/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go b/staging/src/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go index b6642f635f9..f626c92837e 100644 --- a/staging/src/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/resource/v1alpha3/expansion_generated.go @@ -22,14 +22,6 @@ package v1alpha3 // DeviceClassLister. type DeviceClassListerExpansion interface{} -// PodSchedulingContextListerExpansion allows custom methods to be added to -// PodSchedulingContextLister. -type PodSchedulingContextListerExpansion interface{} - -// PodSchedulingContextNamespaceListerExpansion allows custom methods to be added to -// PodSchedulingContextNamespaceLister. -type PodSchedulingContextNamespaceListerExpansion interface{} - // ResourceClaimListerExpansion allows custom methods to be added to // ResourceClaimLister. type ResourceClaimListerExpansion interface{} diff --git a/staging/src/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go b/staging/src/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go deleted file mode 100644 index 237b85fde08..00000000000 --- a/staging/src/k8s.io/client-go/listers/resource/v1alpha3/podschedulingcontext.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha3 - -import ( - resourcev1alpha3 "k8s.io/api/resource/v1alpha3" - labels "k8s.io/apimachinery/pkg/labels" - listers "k8s.io/client-go/listers" - cache "k8s.io/client-go/tools/cache" -) - -// PodSchedulingContextLister helps list PodSchedulingContexts. -// All objects returned here must be treated as read-only. -type PodSchedulingContextLister interface { - // List lists all PodSchedulingContexts in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*resourcev1alpha3.PodSchedulingContext, err error) - // PodSchedulingContexts returns an object that can list and get PodSchedulingContexts. - PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister - PodSchedulingContextListerExpansion -} - -// podSchedulingContextLister implements the PodSchedulingContextLister interface. -type podSchedulingContextLister struct { - listers.ResourceIndexer[*resourcev1alpha3.PodSchedulingContext] -} - -// NewPodSchedulingContextLister returns a new PodSchedulingContextLister. -func NewPodSchedulingContextLister(indexer cache.Indexer) PodSchedulingContextLister { - return &podSchedulingContextLister{listers.New[*resourcev1alpha3.PodSchedulingContext](indexer, resourcev1alpha3.Resource("podschedulingcontext"))} -} - -// PodSchedulingContexts returns an object that can list and get PodSchedulingContexts. -func (s *podSchedulingContextLister) PodSchedulingContexts(namespace string) PodSchedulingContextNamespaceLister { - return podSchedulingContextNamespaceLister{listers.NewNamespaced[*resourcev1alpha3.PodSchedulingContext](s.ResourceIndexer, namespace)} -} - -// PodSchedulingContextNamespaceLister helps list and get PodSchedulingContexts. -// All objects returned here must be treated as read-only. -type PodSchedulingContextNamespaceLister interface { - // List lists all PodSchedulingContexts in the indexer for a given namespace. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*resourcev1alpha3.PodSchedulingContext, err error) - // Get retrieves the PodSchedulingContext from the indexer for a given namespace and name. - // Objects returned here must be treated as read-only. - Get(name string) (*resourcev1alpha3.PodSchedulingContext, error) - PodSchedulingContextNamespaceListerExpansion -} - -// podSchedulingContextNamespaceLister implements the PodSchedulingContextNamespaceLister -// interface. -type podSchedulingContextNamespaceLister struct { - listers.ResourceIndexer[*resourcev1alpha3.PodSchedulingContext] -} diff --git a/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go b/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go deleted file mode 100644 index 04719959fd8..00000000000 --- a/staging/src/k8s.io/dynamic-resource-allocation/controller/controller.go +++ /dev/null @@ -1,880 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "strings" - "time" - - "github.com/go-logr/logr" - "github.com/google/go-cmp/cmp" - - v1 "k8s.io/api/core/v1" - resourceapi "k8s.io/api/resource/v1alpha3" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - corev1types "k8s.io/client-go/kubernetes/typed/core/v1" - resourcelisters "k8s.io/client-go/listers/resource/v1alpha3" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" - "k8s.io/dynamic-resource-allocation/resourceclaim" - "k8s.io/klog/v2" -) - -// Controller watches ResourceClaims and triggers allocation and deallocation -// as needed. -type Controller interface { - // Run starts the controller. - Run(workers int) - - // SetReservedFor can be used to disable adding the Pod which - // triggered allocation to the status.reservedFor. Normally, - // DRA drivers should always do that, so it's the default. - // But nothing in the protocol between the scheduler and - // a driver requires it, so at least for testing the control - // plane components it is useful to disable it. - SetReservedFor(enabled bool) -} - -// Driver provides the actual allocation and deallocation operations. -type Driver interface { - // Allocate is called when all same-driver ResourceClaims for Pod are ready - // to be allocated. The selectedNode is empty for ResourceClaims with immediate - // allocation, in which case the resource driver decides itself where - // to allocate. If there is already an on-going allocation, the driver - // may finish it and ignore the new parameters or abort the on-going - // allocation and try again with the new parameters. - // - // Parameters have been retrieved earlier. - // - // Driver must set the result of allocation for every claim in "claims" - // parameter items. If there is no error and allocation - // is successful - claims[i].Allocation field should be set. In case of - // particular claim allocation failure - respective item's claims[i].Error field - // should be set and claims[i].Allocation will be ignored. - // - // If selectedNode is set, the driver must attempt to allocate for that - // node. If that is not possible, it must return an error. The - // controller will call UnsuitableNodes and pass the new information to - // the scheduler, which will then lead to selecting a different node - // if the current one is not suitable. - // - // The Claim, ClaimParameters, Class, ClassParameters fields of "claims" parameter - // items are read-only and must not be modified. This call must be idempotent. - Allocate(ctx context.Context, claims []*ClaimAllocation, selectedNode string) - - // Deallocate gets called when a ResourceClaim is ready to be - // freed. - // - // The claim is read-only and must not be modified. This call must be - // idempotent. In particular it must not return an error when the claim - // is currently not allocated. - // - // Deallocate may be called when a previous allocation got - // interrupted. Deallocate must then stop any on-going allocation - // activity and free resources before returning without an error. - Deallocate(ctx context.Context, claim *resourceapi.ResourceClaim) error - - // UnsuitableNodes checks all pending claims with delayed allocation - // for a pod. All claims are ready for allocation by the driver - // and parameters have been retrieved. - // - // The driver may consider each claim in isolation, but it's better - // to mark nodes as unsuitable for all claims, if all claims - // cannot be allocated for it (for example, two GPUs requested but - // the node only has one). - // - // The potentialNodes slice contains all potential nodes selected - // by the scheduler plus the selected node. The response must - // not contain any other nodes. Implementations do not have to - // care about size limits in the PodSchedulingContext status, the - // caller will handle that. - // - // The result of the check is in ClaimAllocation.UnsuitableNodes. - // An error indicates that the entire check must be repeated. - UnsuitableNodes(ctx context.Context, pod *v1.Pod, claims []*ClaimAllocation, potentialNodes []string) error -} - -// ClaimAllocation represents information about one particular -// pod.Spec.ResourceClaim entry. -type ClaimAllocation struct { - PodClaimName string - Claim *resourceapi.ResourceClaim - DeviceClasses map[string]*resourceapi.DeviceClass - - // UnsuitableNodes needs to be filled in by the driver when - // Driver.UnsuitableNodes gets called. - UnsuitableNodes []string - - // Driver must populate this field with resources that were - // allocated for the claim in case of successful allocation. - Allocation *resourceapi.AllocationResult - // In case of error allocating particular claim, driver must - // populate this field. - Error error -} - -type controller struct { - ctx context.Context - logger klog.Logger - name string - finalizer string - driver Driver - setReservedFor bool - kubeClient kubernetes.Interface - queue workqueue.TypedRateLimitingInterface[string] - eventRecorder record.EventRecorder - dcLister resourcelisters.DeviceClassLister - claimCache cache.MutationCache - schedulingCtxLister resourcelisters.PodSchedulingContextLister - synced []cache.InformerSynced -} - -// TODO: make it configurable -var recheckDelay = 30 * time.Second - -// New creates a new controller. -func New( - ctx context.Context, - name string, - driver Driver, - kubeClient kubernetes.Interface, - informerFactory informers.SharedInformerFactory) Controller { - logger := klog.LoggerWithName(klog.FromContext(ctx), "resource controller") - dcInformer := informerFactory.Resource().V1alpha3().DeviceClasses() - claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims() - schedulingCtxInformer := informerFactory.Resource().V1alpha3().PodSchedulingContexts() - - eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx)) - go func() { - <-ctx.Done() - eventBroadcaster.Shutdown() - }() - // TODO: use contextual logging in eventBroadcaster once it - // supports it. There is a StartStructuredLogging API, but it - // uses the global klog, which is worse than redirecting an unstructured - // string into our logger, in particular during testing. - eventBroadcaster.StartLogging(func(format string, args ...interface{}) { - helper, logger := logger.WithCallStackHelper() - helper() - logger.V(2).Info(fmt.Sprintf(format, args...)) - }) - eventBroadcaster.StartRecordingToSink(&corev1types.EventSinkImpl{Interface: kubeClient.CoreV1().Events(v1.NamespaceAll)}) - eventRecorder := eventBroadcaster.NewRecorder(scheme.Scheme, - v1.EventSource{Component: fmt.Sprintf("resource driver %s", name)}) - - // The work queue contains either keys for claims or PodSchedulingContext objects. - queue := workqueue.NewTypedRateLimitingQueueWithConfig( - workqueue.DefaultTypedControllerRateLimiter[string](), - workqueue.TypedRateLimitingQueueConfig[string]{Name: fmt.Sprintf("%s-queue", name)}, - ) - - // The mutation cache acts as an additional layer for the informer - // cache and after an update made by the controller returns a more - // recent copy until the informer catches up. - claimInformerCache := claimInformer.Informer().GetIndexer() - claimCache := cache.NewIntegerResourceVersionMutationCache(claimInformerCache, claimInformerCache, 60*time.Second, - false /* only cache updated claims that exist in the informer cache */) - - ctrl := &controller{ - ctx: ctx, - logger: logger, - name: name, - finalizer: name + "/deletion-protection", - driver: driver, - setReservedFor: true, - kubeClient: kubeClient, - dcLister: dcInformer.Lister(), - claimCache: claimCache, - schedulingCtxLister: schedulingCtxInformer.Lister(), - queue: queue, - eventRecorder: eventRecorder, - synced: []cache.InformerSynced{ - dcInformer.Informer().HasSynced, - claimInformer.Informer().HasSynced, - schedulingCtxInformer.Informer().HasSynced, - }, - } - - loggerV6 := logger.V(6) - if loggerV6.Enabled() { - resourceClaimLogger := klog.LoggerWithValues(loggerV6, "type", "ResourceClaim") - _, _ = claimInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(&resourceClaimLogger, ctrl)) - schedulingCtxLogger := klog.LoggerWithValues(loggerV6, "type", "PodSchedulingContext") - _, _ = schedulingCtxInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(&schedulingCtxLogger, ctrl)) - } else { - _, _ = claimInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(nil, ctrl)) - _, _ = schedulingCtxInformer.Informer().AddEventHandler(resourceEventHandlerFuncs(nil, ctrl)) - } - - return ctrl -} - -func (ctrl *controller) SetReservedFor(enabled bool) { - ctrl.setReservedFor = enabled -} - -func resourceEventHandlerFuncs(logger *klog.Logger, ctrl *controller) cache.ResourceEventHandlerFuncs { - return cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - ctrl.add(logger, obj) - }, - UpdateFunc: func(oldObj, newObj interface{}) { - ctrl.update(logger, oldObj, newObj) - }, - DeleteFunc: ctrl.delete, - } -} - -const ( - claimKeyPrefix = "claim:" - schedulingCtxKeyPrefix = "schedulingCtx:" -) - -func (ctrl *controller) add(loggerV6 *klog.Logger, obj interface{}) { - var logger klog.Logger - if loggerV6 != nil { - logger = loggerV6.WithValues("object", prettyPrint(obj)) - } else { - logger = ctrl.logger.V(5) - } - ctrl.addNewOrUpdated(logger, "Adding new work item", obj) -} - -func (ctrl *controller) update(loggerV6 *klog.Logger, oldObj, newObj interface{}) { - var logger klog.Logger - if loggerV6 != nil { - diff := cmp.Diff(oldObj, newObj) - logger = loggerV6.WithValues("object", prettyPrint(newObj), "diff", diff) - } else { - logger = ctrl.logger.V(5) - } - ctrl.addNewOrUpdated(logger, "Adding updated work item", newObj) -} - -func (ctrl *controller) addNewOrUpdated(loggerV klog.Logger, msg string, obj interface{}) { - objKey, err := getKey(obj) - if err != nil { - loggerV.Error(err, "Failed to get key", "obj", obj) - return - } - loggerV.Info(msg, "key", objKey) - ctrl.queue.Add(objKey) -} - -func (ctrl *controller) delete(obj interface{}) { - objKey, err := getKey(obj) - if err != nil { - return - } - ctrl.logger.V(5).Info("Removing deleted work item", "key", objKey) - ctrl.queue.Forget(objKey) -} - -func getKey(obj interface{}) (string, error) { - objKey, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err != nil { - return "", err - } - prefix := "" - switch obj.(type) { - case *resourceapi.ResourceClaim: - prefix = claimKeyPrefix - case *resourceapi.PodSchedulingContext: - prefix = schedulingCtxKeyPrefix - default: - return "", fmt.Errorf("unexpected object: %T", obj) - } - - return prefix + objKey, nil -} - -// Run starts the controller. -func (ctrl *controller) Run(workers int) { - defer ctrl.queue.ShutDown() - - ctrl.logger.Info("Starting", "driver", ctrl.name) - defer ctrl.logger.Info("Shutting down", "driver", ctrl.name) - - stopCh := ctrl.ctx.Done() - - if !cache.WaitForCacheSync(stopCh, ctrl.synced...) { - ctrl.logger.Error(nil, "Cannot sync caches") - return - } - - for i := 0; i < workers; i++ { - go wait.Until(func() { ctrl.sync(ctrl.queue) }, 0, stopCh) - } - - <-stopCh -} - -// errRequeue is a special error instance that functions can return -// to request silent requeueing (not logged as error, no event). -// Uses exponential backoff. -var errRequeue = errors.New("requeue") - -// errPeriodic is a special error instance that functions can return -// to request silent retrying at a fixed rate. -var errPeriodic = errors.New("periodic") - -// sync is the main worker. -func (ctrl *controller) sync(queue workqueue.TypedRateLimitingInterface[string]) { - key, quit := queue.Get() - if quit { - return - } - defer queue.Done(key) - - logger := klog.LoggerWithValues(ctrl.logger, "key", key) - ctx := klog.NewContext(ctrl.ctx, logger) - logger.V(4).Info("processing") - obj, err := ctrl.syncKey(ctx, key) - switch err { - case nil: - logger.V(5).Info("completed") - queue.Forget(key) - case errRequeue: - logger.V(5).Info("requeue") - queue.AddRateLimited(key) - case errPeriodic: - logger.V(5).Info("recheck periodically") - queue.AddAfter(key, recheckDelay) - default: - logger.Error(err, "processing failed") - if obj != nil { - // TODO: We don't know here *what* failed. Determine based on error? - ctrl.eventRecorder.Event(obj, v1.EventTypeWarning, "Failed", err.Error()) - } - queue.AddRateLimited(key) - } -} - -// syncKey looks up a ResourceClaim by its key and processes it. -func (ctrl *controller) syncKey(ctx context.Context, key string) (obj runtime.Object, finalErr error) { - sep := strings.Index(key, ":") - if sep < 0 { - return nil, fmt.Errorf("unexpected key: %s", key) - } - prefix, object := key[0:sep+1], key[sep+1:] - namespace, name, err := cache.SplitMetaNamespaceKey(object) - if err != nil { - return nil, err - } - - switch prefix { - case claimKeyPrefix: - claim, err := ctrl.getCachedClaim(ctx, object) - if claim == nil || err != nil { - return nil, err - } - obj, finalErr = claim, ctrl.syncClaim(ctx, claim) - case schedulingCtxKeyPrefix: - schedulingCtx, err := ctrl.schedulingCtxLister.PodSchedulingContexts(namespace).Get(name) - if err != nil { - if k8serrors.IsNotFound(err) { - klog.FromContext(ctx).V(5).Info("PodSchedulingContext was deleted, no need to process it") - return nil, nil - } - return nil, err - } - obj, finalErr = schedulingCtx, ctrl.syncPodSchedulingContexts(ctx, schedulingCtx) - } - return -} - -func (ctrl *controller) getCachedClaim(ctx context.Context, key string) (*resourceapi.ResourceClaim, error) { - claimObj, exists, err := ctrl.claimCache.GetByKey(key) - if !exists || k8serrors.IsNotFound(err) { - klog.FromContext(ctx).V(5).Info("ResourceClaim not found, no need to process it") - return nil, nil - } - if err != nil { - return nil, err - } - claim, ok := claimObj.(*resourceapi.ResourceClaim) - if !ok { - return nil, fmt.Errorf("internal error: got %T instead of *resourceapi.ResourceClaim from claim cache", claimObj) - } - return claim, nil -} - -// syncClaim determines which next action may be needed for a ResourceClaim -// and does it. -func (ctrl *controller) syncClaim(ctx context.Context, claim *resourceapi.ResourceClaim) error { - var err error - logger := klog.FromContext(ctx) - - if len(claim.Status.ReservedFor) > 0 { - // In use. Nothing that we can do for it now. - if loggerV6 := logger.V(6); loggerV6.Enabled() { - loggerV6.Info("ResourceClaim in use", "reservedFor", claim.Status.ReservedFor) - } else { - logger.V(5).Info("ResourceClaim in use") - } - return nil - } - - if claim.DeletionTimestamp != nil || - claim.Status.DeallocationRequested { - // Ready for deallocation. We might have our finalizer set. The - // finalizer is specific to the driver, therefore we know that - // this claim is "ours" when the finalizer is set. - hasFinalizer := ctrl.hasFinalizer(claim) - logger.V(5).Info("ResourceClaim ready for deallocation", "deallocationRequested", claim.Status.DeallocationRequested, "deletionTimestamp", claim.DeletionTimestamp, "allocated", claim.Status.Allocation != nil, "hasFinalizer", hasFinalizer) - if hasFinalizer { - claim = claim.DeepCopy() - if claim.Status.Allocation != nil { - // Allocation was completed. Deallocate before proceeding. - if err := ctrl.driver.Deallocate(ctx, claim); err != nil { - return fmt.Errorf("deallocate: %w", err) - } - claim.Status.Allocation = nil - claim.Status.DeallocationRequested = false - claim, err = ctrl.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("remove allocation: %w", err) - } - ctrl.claimCache.Mutation(claim) - } else { - // Ensure that there is no on-going allocation. - if err := ctrl.driver.Deallocate(ctx, claim); err != nil { - return fmt.Errorf("stop allocation: %w", err) - } - } - - if claim.Status.DeallocationRequested { - // Still need to remove it. - claim.Status.DeallocationRequested = false - claim, err = ctrl.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("remove deallocation: %w", err) - } - ctrl.claimCache.Mutation(claim) - } - - claim.Finalizers = ctrl.removeFinalizer(claim.Finalizers) - claim, err = ctrl.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}) - if err != nil { - return fmt.Errorf("remove finalizer: %w", err) - } - ctrl.claimCache.Mutation(claim) - } - - // Nothing further to do. The apiserver should remove it shortly. - return nil - - } - - if claim.Status.Allocation != nil { - logger.V(5).Info("ResourceClaim is allocated") - return nil - } - logger.V(5).Info("ResourceClaim waiting for first consumer") - return nil -} - -// allocateClaims filters list of claims, keeps those needing allocation and asks driver to do the allocations. -// Driver is supposed to write the AllocationResult and Error field into argument claims slice. -func (ctrl *controller) allocateClaims(ctx context.Context, claims []*ClaimAllocation, selectedNode string, selectedUser *resourceapi.ResourceClaimConsumerReference) { - logger := klog.FromContext(ctx) - - needAllocation := make([]*ClaimAllocation, 0, len(claims)) - for _, claim := range claims { - if claim.Claim.Status.Allocation != nil { - // This can happen when two PodSchedulingContext objects trigger - // allocation attempts (first one wins) or when we see the - // update of the PodSchedulingContext object. - logger.V(5).Info("Claim is already allocated, skipping allocation", "claim", claim.PodClaimName) - continue - } - needAllocation = append(needAllocation, claim) - } - - if len(needAllocation) == 0 { - logger.V(5).Info("No claims need allocation, nothing to do") - return - } - - // Keep separately claims that succeeded adding finalizers, - // they will be sent for Allocate to the driver. - claimsWithFinalizers := make([]*ClaimAllocation, 0, len(needAllocation)) - for _, claimAllocation := range needAllocation { - if !ctrl.hasFinalizer(claimAllocation.Claim) { - claim := claimAllocation.Claim.DeepCopy() - // Set finalizer before doing anything. We continue with the updated claim. - logger.V(5).Info("Adding finalizer", "claim", claim.Name) - claim.Finalizers = append(claim.Finalizers, ctrl.finalizer) - var err error - claim, err = ctrl.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}) - if err != nil { - logger.Error(err, "add finalizer", "claim", claim.Name) - claimAllocation.Error = fmt.Errorf("add finalizer: %w", err) - // Do not save claim to ask for Allocate from Driver. - continue - } - ctrl.claimCache.Mutation(claim) - claimAllocation.Claim = claim - } - claimsWithFinalizers = append(claimsWithFinalizers, claimAllocation) - } - - // Beyond here we only operate with claimsWithFinalizers because those are ready for allocation. - - logger.V(5).Info("Allocating") - ctrl.driver.Allocate(ctx, claimsWithFinalizers, selectedNode) - - // Update successfully allocated claims' status with allocation info. - for _, claimAllocation := range claimsWithFinalizers { - if claimAllocation.Error != nil { - logger.Error(claimAllocation.Error, "allocating claim", "claim", claimAllocation.Claim.Name) - continue - } - if claimAllocation.Allocation == nil { - logger.Error(nil, "allocating claim: missing allocation from driver", "claim", claimAllocation.Claim.Name) - claimAllocation.Error = fmt.Errorf("allocating claim: missing allocation from driver") - // Do not update this claim with allocation, it might succeed next time. - continue - } - logger.V(5).Info("successfully allocated", "claim", klog.KObj(claimAllocation.Claim)) - claim := claimAllocation.Claim.DeepCopy() - claim.Status.Allocation = claimAllocation.Allocation - claim.Status.Allocation.Controller = ctrl.name - if selectedUser != nil && ctrl.setReservedFor { - claim.Status.ReservedFor = append(claim.Status.ReservedFor, *selectedUser) - } - logger.V(6).Info("Updating claim after allocation", "claim", claim) - claim, err := ctrl.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}) - if err != nil { - claimAllocation.Error = fmt.Errorf("add allocation: %w", err) - continue - } - - ctrl.claimCache.Mutation(claim) - } - return -} - -func (ctrl *controller) checkPodClaim(ctx context.Context, pod *v1.Pod, podClaim v1.PodResourceClaim) (*ClaimAllocation, error) { - claimName, mustCheckOwner, err := resourceclaim.Name(pod, &podClaim) - if err != nil { - return nil, err - } - if claimName == nil { - // Nothing to do. - return nil, nil - } - key := pod.Namespace + "/" + *claimName - claim, err := ctrl.getCachedClaim(ctx, key) - if claim == nil || err != nil { - return nil, err - } - if mustCheckOwner { - if err := resourceclaim.IsForPod(pod, claim); err != nil { - return nil, err - } - } - if claim.Status.Allocation != nil { - // Already allocated, class and parameter are not needed and nothing - // need to be done for the claim either. - return nil, nil - } - if claim.Spec.Controller != ctrl.name { - return nil, nil - } - - // Sanity checks and preparations... - ca := &ClaimAllocation{ - PodClaimName: podClaim.Name, - Claim: claim, - DeviceClasses: make(map[string]*resourceapi.DeviceClass), - } - for _, request := range claim.Spec.Devices.Requests { - if request.DeviceClassName == "" { - // Some unknown request. Abort! - return nil, fmt.Errorf("claim %s: unknown request type in request %s", klog.KObj(claim), request.Name) - } - deviceClassName := request.DeviceClassName - class, err := ctrl.dcLister.Get(deviceClassName) - if err != nil { - return nil, fmt.Errorf("claim %s: request %s: class %s: %w", klog.KObj(claim), request.Name, deviceClassName, err) - } - ca.DeviceClasses[deviceClassName] = class - } - - return ca, nil -} - -// syncPodSchedulingContext determines which next action may be needed for a PodSchedulingContext object -// and does it. -func (ctrl *controller) syncPodSchedulingContexts(ctx context.Context, schedulingCtx *resourceapi.PodSchedulingContext) error { - logger := klog.FromContext(ctx) - - // Ignore deleted objects. - if schedulingCtx.DeletionTimestamp != nil { - logger.V(5).Info("PodSchedulingContext marked for deletion") - return nil - } - - if schedulingCtx.Spec.SelectedNode == "" && - len(schedulingCtx.Spec.PotentialNodes) == 0 { - // Nothing to do? Shouldn't occur. - logger.V(5).Info("Waiting for scheduler to set fields") - return nil - } - - // Check pod. - // TODO (?): use an informer - only useful when many (most?) pods have claims - // TODO (?): let the scheduler copy all claim names + UIDs into PodSchedulingContext - then we don't need the pod - pod, err := ctrl.kubeClient.CoreV1().Pods(schedulingCtx.Namespace).Get(ctx, schedulingCtx.Name, metav1.GetOptions{}) - if err != nil { - return err - } - if pod.DeletionTimestamp != nil { - logger.V(5).Info("Pod marked for deletion") - return nil - } - - // Still the owner? - if !metav1.IsControlledBy(schedulingCtx, pod) { - // Must be obsolete object, do nothing for it. - logger.V(5).Info("Pod not owner, PodSchedulingContext is obsolete") - return nil - } - - // Find all pending claims that are owned by us. We bail out if any of the pre-requisites - // for pod scheduling (claims exist, classes exist, parameters exist) are not met. - // The scheduler will do the same, except for checking parameters, so usually - // everything should be ready once the PodSchedulingContext object exists. - var claims claimAllocations - for _, podClaim := range pod.Spec.ResourceClaims { - delayed, err := ctrl.checkPodClaim(ctx, pod, podClaim) - if err != nil { - return fmt.Errorf("pod claim %s: %w", podClaim.Name, err) - } - if delayed == nil { - // Nothing to do for it. This can change, so keep checking. - continue - } - claims = append(claims, delayed) - } - if len(claims) == 0 { - logger.V(5).Info("Found no pending pod claims") - return errPeriodic - } - - // Check current resource availability *before* triggering the - // allocations. If we find that any of the claims cannot be allocated - // for the selected node, we don't need to try for the others either - // and shouldn't, because those allocations might have to be undone to - // pick a better node. If we don't need to allocate now, then we'll - // simply report back the gather information. - // - // We shouldn't assume that the scheduler has included the selected node - // in the list of potential nodes. Usually it does, but let's make sure - // that we check it. - selectedNode := schedulingCtx.Spec.SelectedNode - potentialNodes := schedulingCtx.Spec.PotentialNodes - if selectedNode != "" && !hasString(potentialNodes, selectedNode) { - potentialNodes = append(potentialNodes, selectedNode) - } - if len(schedulingCtx.Spec.PotentialNodes) > 0 { - if err := ctrl.driver.UnsuitableNodes(ctx, pod, claims, potentialNodes); err != nil { - return fmt.Errorf("checking potential nodes: %w", err) - } - } - logger.V(5).Info("pending pod claims", "claims", claims, "selectedNode", selectedNode) - if selectedNode != "" { - unsuitable := false - for _, delayed := range claims { - if hasString(delayed.UnsuitableNodes, selectedNode) { - unsuitable = true - break - } - } - - if unsuitable { - logger.V(2).Info("skipping allocation for unsuitable selected node", "node", selectedNode) - } else { - logger.V(2).Info("allocation for selected node", "node", selectedNode) - selectedUser := &resourceapi.ResourceClaimConsumerReference{ - Resource: "pods", - Name: pod.Name, - UID: pod.UID, - } - - ctrl.allocateClaims(ctx, claims, selectedNode, selectedUser) - - var allErrors []error - for _, delayed := range claims { - if delayed.Error != nil { - if strings.Contains(delayed.Error.Error(), delayed.Claim.Name) { - // Avoid adding redundant information. - allErrors = append(allErrors, delayed.Error) - } else { - // Include claim name, it's not in the underlying error. - allErrors = append(allErrors, fmt.Errorf("claim %s: %w", delayed.Claim.Name, delayed.Error)) - } - } - } - if len(allErrors) > 0 { - return errors.Join(allErrors...) - } - } - } - - // Now update unsuitable nodes. This is useful information for the scheduler even if - // we managed to allocate because we might have to undo that. - // TODO: replace with patching the array. We can do that without race conditions - // because each driver is responsible for its own entries. - modified := false - schedulingCtx = schedulingCtx.DeepCopy() - for _, delayed := range claims { - i := findClaim(schedulingCtx.Status.ResourceClaims, delayed.PodClaimName) - if i < 0 { - // Add new entry. - schedulingCtx.Status.ResourceClaims = append(schedulingCtx.Status.ResourceClaims, - resourceapi.ResourceClaimSchedulingStatus{ - Name: delayed.PodClaimName, - UnsuitableNodes: truncateNodes(delayed.UnsuitableNodes, selectedNode), - }) - modified = true - } else if stringsDiffer(schedulingCtx.Status.ResourceClaims[i].UnsuitableNodes, delayed.UnsuitableNodes) { - // Update existing entry. - schedulingCtx.Status.ResourceClaims[i].UnsuitableNodes = truncateNodes(delayed.UnsuitableNodes, selectedNode) - modified = true - } - } - if modified { - logger.V(6).Info("Updating pod scheduling with modified unsuitable nodes", "podSchedulingCtx", schedulingCtx) - if _, err := ctrl.kubeClient.ResourceV1alpha3().PodSchedulingContexts(schedulingCtx.Namespace).UpdateStatus(ctx, schedulingCtx, metav1.UpdateOptions{}); err != nil { - return fmt.Errorf("update unsuitable node status: %w", err) - } - } - - // We must keep the object in our queue and keep updating the - // UnsuitableNodes fields. - return errPeriodic -} - -func truncateNodes(nodes []string, selectedNode string) []string { - // We might have checked "potential nodes + selected node" above, so - // this list might be too long by one element. When truncating it, make - // sure that the selected node is listed. - lenUnsuitable := len(nodes) - if lenUnsuitable > resourceapi.PodSchedulingNodeListMaxSize { - if nodes[0] == selectedNode { - // Truncate at the end and keep selected node in the first element. - nodes = nodes[0 : lenUnsuitable-1] - } else { - // Truncate at the front, it's not the selected node. - nodes = nodes[1:lenUnsuitable] - } - } - return nodes -} - -type claimAllocations []*ClaimAllocation - -// MarshalLog replaces the pointers with the actual structs because -// we care about the content, not the pointer values. -func (claims claimAllocations) MarshalLog() interface{} { - content := make([]ClaimAllocation, 0, len(claims)) - for _, claim := range claims { - content = append(content, *claim) - } - return content -} - -var _ logr.Marshaler = claimAllocations{} - -// findClaim returns the index of the specified pod claim, -1 if not found. -func findClaim(claims []resourceapi.ResourceClaimSchedulingStatus, podClaimName string) int { - for i := range claims { - if claims[i].Name == podClaimName { - return i - } - } - return -1 -} - -// hasString checks for a string in a slice. -func hasString(strings []string, str string) bool { - for _, s := range strings { - if s == str { - return true - } - } - return false -} - -// stringsDiffer does a strict comparison of two string arrays, order of entries matters. -func stringsDiffer(a, b []string) bool { - if len(a) != len(b) { - return true - } - for i := range a { - if a[i] != b[i] { - return true - } - } - return false -} - -// hasFinalizer checks if the claim has the finalizer of the driver. -func (ctrl *controller) hasFinalizer(claim *resourceapi.ResourceClaim) bool { - for _, finalizer := range claim.Finalizers { - if finalizer == ctrl.finalizer { - return true - } - } - return false -} - -// removeFinalizer creates a new slice without the finalizer of the driver. -func (ctrl *controller) removeFinalizer(in []string) []string { - out := make([]string, 0, len(in)) - for _, finalizer := range in { - if finalizer != ctrl.finalizer { - out = append(out, finalizer) - } - } - if len(out) == 0 { - return nil - } - return out -} - -// prettyPrint formats arbitrary objects as JSON or, if that fails, with Sprintf. -func prettyPrint(obj interface{}) string { - buffer, err := json.Marshal(obj) - if err != nil { - return fmt.Sprintf("%s", obj) - } - return string(buffer) -} diff --git a/staging/src/k8s.io/dynamic-resource-allocation/controller/controller_test.go b/staging/src/k8s.io/dynamic-resource-allocation/controller/controller_test.go deleted file mode 100644 index 5adccaa300a..00000000000 --- a/staging/src/k8s.io/dynamic-resource-allocation/controller/controller_test.go +++ /dev/null @@ -1,617 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "errors" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - corev1 "k8s.io/api/core/v1" - resourceapi "k8s.io/api/resource/v1alpha3" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/fake" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/klog/v2/ktesting" - _ "k8s.io/klog/v2/ktesting/init" -) - -func TestController(t *testing.T) { - claimKey := "claim:default/claim" - claimName := "claim" - claimNamespace := "default" - driverName := "mock-driver" - className := "mock-class" - otherDriverName := "other-driver" - ourFinalizer := driverName + "/deletion-protection" - otherFinalizer := otherDriverName + "/deletion-protection" - claim := createClaim(claimName, claimNamespace, driverName) - otherClaim := createClaim(claimName, claimNamespace, otherDriverName) - podName := "pod" - podSchedulingCtxKey := "schedulingCtx:default/pod" - pod := createPod(podName, claimNamespace, nil) - podClaimName := "my-pod-claim" - podSchedulingCtx := createPodSchedulingContexts(pod) - podWithClaim := createPod(podName, claimNamespace, map[string]string{podClaimName: claimName}) - nodeName := "worker" - otherNodeName := "worker-2" - unsuitableNodes := []string{otherNodeName} - potentialNodes := []string{nodeName, otherNodeName} - maxNodes := make([]string, resourceapi.PodSchedulingNodeListMaxSize) - for i := range maxNodes { - maxNodes[i] = fmt.Sprintf("node-%d", i) - } - withDeletionTimestamp := func(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { - var deleted metav1.Time - claim = claim.DeepCopy() - claim.DeletionTimestamp = &deleted - return claim - } - withReservedFor := func(claim *resourceapi.ResourceClaim, pod *corev1.Pod) *resourceapi.ResourceClaim { - claim = claim.DeepCopy() - claim.Status.ReservedFor = append(claim.Status.ReservedFor, resourceapi.ResourceClaimConsumerReference{ - Resource: "pods", - Name: pod.Name, - UID: pod.UID, - }) - return claim - } - withFinalizer := func(claim *resourceapi.ResourceClaim, finalizer string) *resourceapi.ResourceClaim { - claim = claim.DeepCopy() - claim.Finalizers = append(claim.Finalizers, finalizer) - return claim - } - allocation := resourceapi.AllocationResult{Controller: driverName} - withAllocate := func(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { - // Any allocated claim must have our finalizer. - claim = withFinalizer(claim, ourFinalizer) - claim.Status.Allocation = &allocation - return claim - } - withDeallocate := func(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim { - claim.Status.DeallocationRequested = true - return claim - } - withSelectedNode := func(podSchedulingCtx *resourceapi.PodSchedulingContext) *resourceapi.PodSchedulingContext { - podSchedulingCtx = podSchedulingCtx.DeepCopy() - podSchedulingCtx.Spec.SelectedNode = nodeName - return podSchedulingCtx - } - withSpecificUnsuitableNodes := func(podSchedulingCtx *resourceapi.PodSchedulingContext, unsuitableNodes []string) *resourceapi.PodSchedulingContext { - podSchedulingCtx = podSchedulingCtx.DeepCopy() - podSchedulingCtx.Status.ResourceClaims = append(podSchedulingCtx.Status.ResourceClaims, - resourceapi.ResourceClaimSchedulingStatus{Name: podClaimName, UnsuitableNodes: unsuitableNodes}, - ) - return podSchedulingCtx - } - withUnsuitableNodes := func(podSchedulingCtx *resourceapi.PodSchedulingContext) *resourceapi.PodSchedulingContext { - return withSpecificUnsuitableNodes(podSchedulingCtx, unsuitableNodes) - } - withSpecificPotentialNodes := func(podSchedulingCtx *resourceapi.PodSchedulingContext, potentialNodes []string) *resourceapi.PodSchedulingContext { - podSchedulingCtx = podSchedulingCtx.DeepCopy() - podSchedulingCtx.Spec.PotentialNodes = potentialNodes - return podSchedulingCtx - } - withPotentialNodes := func(podSchedulingCtx *resourceapi.PodSchedulingContext) *resourceapi.PodSchedulingContext { - return withSpecificPotentialNodes(podSchedulingCtx, potentialNodes) - } - - var m mockDriver - - for name, test := range map[string]struct { - key string - driver mockDriver - pod *corev1.Pod - schedulingCtx, expectedSchedulingCtx *resourceapi.PodSchedulingContext - claim, expectedClaim *resourceapi.ResourceClaim - expectedWorkQueueState Mock[string] - expectedError string - // expectedEvent is a slice of strings representing expected events. - // Each string in the slice should follow the format: "EventType Reason Message". - // - "Warning Failed processing failed" - expectedEvent []string - }{ - "invalid-key": { - key: "claim:x/y/z", - expectedWorkQueueState: Mock[string]{ - Failures: map[string]int{ - "claim:x/y/z": 1, - }, - }, - }, - "not-found": { - key: "claim:default/claim", - }, - "wrong-driver": { - key: claimKey, - claim: otherClaim, - expectedClaim: otherClaim, - }, - - // not deleted, reallocate -> deallocate - "immediate-allocated-reallocate": { - key: claimKey, - claim: withDeallocate(withAllocate(claim)), - driver: m.expectDeallocate(map[string]error{claimName: nil}), - expectedClaim: claim, - }, - - // not deleted, reallocate, deallocate failure -> requeue - "immediate-allocated-fail-deallocation-during-reallocate": { - key: claimKey, - claim: withDeallocate(withAllocate(claim)), - driver: m.expectDeallocate(map[string]error{claimName: errors.New("fake error")}), - expectedClaim: withDeallocate(withAllocate(claim)), - expectedWorkQueueState: Mock[string]{ - Failures: map[string]int{ - claimKey: 1, - }, - }, - expectedEvent: []string{"Warning Failed deallocate: fake error"}, - }, - - // deletion time stamp set, our finalizer set, not allocated -> remove finalizer - "deleted-finalizer-removal": { - key: claimKey, - claim: withFinalizer(withDeletionTimestamp(claim), ourFinalizer), - driver: m.expectDeallocate(map[string]error{claimName: nil}), - expectedClaim: withDeletionTimestamp(claim), - }, - // deletion time stamp set, our finalizer set, not allocated, stopping fails -> requeue - "deleted-finalizer-stop-failure": { - key: claimKey, - claim: withFinalizer(withDeletionTimestamp(claim), ourFinalizer), - driver: m.expectDeallocate(map[string]error{claimName: errors.New("fake error")}), - expectedClaim: withFinalizer(withDeletionTimestamp(claim), ourFinalizer), - expectedWorkQueueState: Mock[string]{ - Failures: map[string]int{ - claimKey: 1, - }, - }, - expectedEvent: []string{"Warning Failed stop allocation: fake error"}, - }, - // deletion time stamp set, other finalizer set, not allocated -> do nothing - "deleted-finalizer-no-removal": { - key: claimKey, - claim: withFinalizer(withDeletionTimestamp(claim), otherFinalizer), - expectedClaim: withFinalizer(withDeletionTimestamp(claim), otherFinalizer), - }, - // deletion time stamp set, finalizer set, allocated -> deallocate - "deleted-allocated": { - key: claimKey, - claim: withAllocate(withDeletionTimestamp(claim)), - driver: m.expectDeallocate(map[string]error{claimName: nil}), - expectedClaim: withDeletionTimestamp(claim), - }, - // deletion time stamp set, finalizer set, allocated, deallocation fails -> requeue - "deleted-deallocate-failure": { - key: claimKey, - claim: withAllocate(withDeletionTimestamp(claim)), - driver: m.expectDeallocate(map[string]error{claimName: errors.New("fake error")}), - expectedClaim: withAllocate(withDeletionTimestamp(claim)), - expectedWorkQueueState: Mock[string]{ - Failures: map[string]int{ - claimKey: 1, - }, - }, - expectedEvent: []string{"Warning Failed deallocate: fake error"}, - }, - // deletion time stamp set, finalizer not set -> do nothing - "deleted-no-finalizer": { - key: claimKey, - claim: withDeletionTimestamp(claim), - expectedClaim: withDeletionTimestamp(claim), - }, - // waiting for first consumer -> do nothing - "pending": { - key: claimKey, - claim: claim, - expectedClaim: claim, - }, - - // pod with no claims -> shouldn't occur, check again anyway - "pod-nop": { - key: podSchedulingCtxKey, - pod: pod, - schedulingCtx: withSelectedNode(podSchedulingCtx), - expectedSchedulingCtx: withSelectedNode(podSchedulingCtx), - expectedWorkQueueState: Mock[string]{ - Later: []MockDelayedItem[string]{ - { - Item: podSchedulingCtxKey, - Duration: time.Second * 30, - }, - }, - }, - }, - - // no potential nodes -> shouldn't occur - "no-nodes": { - key: podSchedulingCtxKey, - claim: claim, - expectedClaim: claim, - pod: podWithClaim, - schedulingCtx: podSchedulingCtx, - expectedSchedulingCtx: podSchedulingCtx, - }, - - // potential nodes -> provide unsuitable nodes - "info": { - key: podSchedulingCtxKey, - claim: claim, - expectedClaim: claim, - pod: podWithClaim, - schedulingCtx: withPotentialNodes(podSchedulingCtx), - driver: m.expectClassParameters(map[string]interface{}{className: 1}). - expectClaimParameters(map[string]interface{}{claimName: 2}). - expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil), - expectedSchedulingCtx: withUnsuitableNodes(withPotentialNodes(podSchedulingCtx)), - expectedWorkQueueState: Mock[string]{ - Later: []MockDelayedItem[string]{ - { - Item: podSchedulingCtxKey, - Duration: time.Second * 30, - }, - }, - }, - }, - - // potential nodes, selected node -> allocate - "allocate": { - key: podSchedulingCtxKey, - claim: claim, - expectedClaim: withReservedFor(withAllocate(claim), pod), - pod: podWithClaim, - schedulingCtx: withSelectedNode(withPotentialNodes(podSchedulingCtx)), - driver: m.expectClassParameters(map[string]interface{}{className: 1}). - expectClaimParameters(map[string]interface{}{claimName: 2}). - expectUnsuitableNodes(map[string][]string{podClaimName: unsuitableNodes}, nil). - expectAllocate(map[string]allocate{claimName: {allocResult: &allocation, selectedNode: nodeName, allocErr: nil}}), - expectedSchedulingCtx: withUnsuitableNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx))), - expectedWorkQueueState: Mock[string]{ - Later: []MockDelayedItem[string]{ - { - Item: "schedulingCtx:default/pod", - Duration: time.Second * 30, - }, - }, - }, - }, - // potential nodes, selected node, all unsuitable -> update unsuitable nodes - "is-potential-node": { - key: podSchedulingCtxKey, - claim: claim, - expectedClaim: claim, - pod: podWithClaim, - schedulingCtx: withPotentialNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx))), - driver: m.expectClassParameters(map[string]interface{}{className: 1}). - expectClaimParameters(map[string]interface{}{claimName: 2}). - expectUnsuitableNodes(map[string][]string{podClaimName: potentialNodes}, nil), - expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withPotentialNodes(podSchedulingCtx)), potentialNodes), - expectedWorkQueueState: Mock[string]{ - Later: []MockDelayedItem[string]{ - { - Item: podSchedulingCtxKey, - Duration: time.Second * 30, - }, - }, - }, - }, - // max potential nodes, other selected node, all unsuitable -> update unsuitable nodes with truncation at start - "is-potential-node-truncate-first": { - key: podSchedulingCtxKey, - claim: claim, - expectedClaim: claim, - pod: podWithClaim, - schedulingCtx: withSpecificPotentialNodes(withSelectedNode(withSpecificPotentialNodes(podSchedulingCtx, maxNodes)), maxNodes), - driver: m.expectClassParameters(map[string]interface{}{className: 1}). - expectClaimParameters(map[string]interface{}{claimName: 2}). - expectUnsuitableNodes(map[string][]string{podClaimName: append(maxNodes, nodeName)}, nil), - expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withSpecificPotentialNodes(podSchedulingCtx, maxNodes)), append(maxNodes[1:], nodeName)), - expectedWorkQueueState: Mock[string]{ - Later: []MockDelayedItem[string]{ - { - Item: podSchedulingCtxKey, - Duration: time.Second * 30, - }, - }, - }, - }, - // max potential nodes, other selected node, all unsuitable (but in reverse order) -> update unsuitable nodes with truncation at end - "pod-selected-is-potential-node-truncate-last": { - key: podSchedulingCtxKey, - claim: claim, - expectedClaim: claim, - pod: podWithClaim, - schedulingCtx: withSpecificPotentialNodes(withSelectedNode(withSpecificPotentialNodes(podSchedulingCtx, maxNodes)), maxNodes), - driver: m.expectClassParameters(map[string]interface{}{className: 1}). - expectClaimParameters(map[string]interface{}{claimName: 2}). - expectUnsuitableNodes(map[string][]string{podClaimName: append([]string{nodeName}, maxNodes...)}, nil), - expectedSchedulingCtx: withSpecificUnsuitableNodes(withSelectedNode(withSpecificPotentialNodes(podSchedulingCtx, maxNodes)), append([]string{nodeName}, maxNodes[:len(maxNodes)-1]...)), - expectedWorkQueueState: Mock[string]{ - Later: []MockDelayedItem[string]{ - { - Item: podSchedulingCtxKey, - Duration: time.Second * 30, - }, - }, - }, - }, - } { - t.Run(name, func(t *testing.T) { - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - - initialObjects := []runtime.Object{} - if test.pod != nil { - initialObjects = append(initialObjects, test.pod) - } - if test.schedulingCtx != nil { - initialObjects = append(initialObjects, test.schedulingCtx) - } - if test.claim != nil { - initialObjects = append(initialObjects, test.claim) - } - kubeClient, informerFactory := fakeK8s(initialObjects) - claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims() - podInformer := informerFactory.Core().V1().Pods() - podSchedulingInformer := informerFactory.Resource().V1alpha3().PodSchedulingContexts() - // Order is important: on function exit, we first must - // cancel, then wait (last-in-first-out). - defer informerFactory.Shutdown() - defer cancel() - - for _, obj := range initialObjects { - switch obj.(type) { - case *resourceapi.ResourceClaim: - require.NoError(t, claimInformer.Informer().GetStore().Add(obj), "add resource claim") - case *corev1.Pod: - require.NoError(t, podInformer.Informer().GetStore().Add(obj), "add pod") - case *resourceapi.PodSchedulingContext: - require.NoError(t, podSchedulingInformer.Informer().GetStore().Add(obj), "add pod scheduling") - default: - t.Fatalf("unknown initialObject type: %+v", obj) - } - } - - driver := test.driver - driver.t = t - - ctrl := New(ctx, driverName, driver, kubeClient, informerFactory) - informerFactory.Start(ctx.Done()) - if !cache.WaitForCacheSync(ctx.Done(), - informerFactory.Resource().V1alpha3().ResourceClaims().Informer().HasSynced, - informerFactory.Resource().V1alpha3().PodSchedulingContexts().Informer().HasSynced, - ) { - t.Fatal("could not sync caches") - } - var workQueueState Mock[string] - c := ctrl.(*controller) - // We need to mock the event recorder to test the controller's event. - fakeRecorder := record.NewFakeRecorder(100) - c.eventRecorder = fakeRecorder - workQueueState.SyncOne(test.key, c.sync) - assert.Equal(t, test.expectedWorkQueueState, workQueueState) - - claims, err := kubeClient.ResourceV1alpha3().ResourceClaims("").List(ctx, metav1.ListOptions{}) - require.NoError(t, err, "list claims") - var expectedClaims []resourceapi.ResourceClaim - if test.expectedClaim != nil { - expectedClaims = append(expectedClaims, *test.expectedClaim) - } - assert.Equal(t, expectedClaims, claims.Items) - - podSchedulings, err := kubeClient.ResourceV1alpha3().PodSchedulingContexts("").List(ctx, metav1.ListOptions{}) - require.NoError(t, err, "list pod schedulings") - var expectedPodSchedulings []resourceapi.PodSchedulingContext - if test.expectedSchedulingCtx != nil { - expectedPodSchedulings = append(expectedPodSchedulings, *test.expectedSchedulingCtx) - } - assert.Equal(t, expectedPodSchedulings, podSchedulings.Items) - // Assert that the events are correct. - assertEqualEvents(t, test.expectedEvent, fakeRecorder.Events) - }) - } -} - -type mockDriver struct { - t *testing.T - - // TODO: change this so that the mock driver expects calls in a certain order - // and fails when the next call isn't the expected one or calls didn't happen - classParameters map[string]interface{} - claimParameters map[string]interface{} - allocate map[string]allocate - deallocate map[string]error - unsuitableNodes map[string][]string - unsuitableNodesError error -} - -type allocate struct { - selectedNode string - allocResult *resourceapi.AllocationResult - allocErr error -} - -func (m mockDriver) expectClassParameters(expected map[string]interface{}) mockDriver { - m.classParameters = expected - return m -} - -func (m mockDriver) expectClaimParameters(expected map[string]interface{}) mockDriver { - m.claimParameters = expected - return m -} - -func (m mockDriver) expectAllocate(expected map[string]allocate) mockDriver { - m.allocate = expected - return m -} - -func (m mockDriver) expectDeallocate(expected map[string]error) mockDriver { - m.deallocate = expected - return m -} - -func (m mockDriver) expectUnsuitableNodes(expected map[string][]string, err error) mockDriver { - m.unsuitableNodes = expected - m.unsuitableNodesError = err - return m -} - -func (m mockDriver) Allocate(ctx context.Context, claims []*ClaimAllocation, selectedNode string) { - m.t.Logf("Allocate(number of claims %d)", len(claims)) - for _, claimAllocation := range claims { - m.t.Logf("Allocate(%s)", claimAllocation.Claim.Name) - allocate, ok := m.allocate[claimAllocation.Claim.Name] - if !ok { - m.t.Fatalf("unexpected Allocate call for claim %s", claimAllocation.Claim.Name) - } - assert.Equal(m.t, allocate.selectedNode, selectedNode, "selected node") - claimAllocation.Error = allocate.allocErr - claimAllocation.Allocation = allocate.allocResult - } - return -} - -func (m mockDriver) Deallocate(ctx context.Context, claim *resourceapi.ResourceClaim) error { - m.t.Logf("Deallocate(%s)", claim) - err, ok := m.deallocate[claim.Name] - if !ok { - m.t.Fatal("unexpected Deallocate call") - } - return err -} - -func (m mockDriver) UnsuitableNodes(ctx context.Context, pod *corev1.Pod, claims []*ClaimAllocation, potentialNodes []string) error { - m.t.Logf("UnsuitableNodes(%s, %v, %v)", pod, claims, potentialNodes) - if len(m.unsuitableNodes) == 0 { - m.t.Fatal("unexpected UnsuitableNodes call") - } - if m.unsuitableNodesError != nil { - return m.unsuitableNodesError - } - found := map[string]bool{} - for _, delayed := range claims { - unsuitableNodes, ok := m.unsuitableNodes[delayed.PodClaimName] - if !ok { - m.t.Errorf("unexpected pod claim: %s", delayed.PodClaimName) - } - delayed.UnsuitableNodes = unsuitableNodes - found[delayed.PodClaimName] = true - } - for expectedName := range m.unsuitableNodes { - if !found[expectedName] { - m.t.Errorf("pod claim %s not in actual claims list", expectedName) - } - } - return nil -} - -func createClaim(claimName, claimNamespace, driverName string) *resourceapi.ResourceClaim { - return &resourceapi.ResourceClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: claimName, - Namespace: claimNamespace, - }, - Spec: resourceapi.ResourceClaimSpec{ - Controller: driverName, - }, - } -} - -func createPod(podName, podNamespace string, claims map[string]string) *corev1.Pod { - pod := &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: podNamespace, - UID: "1234", - }, - } - for podClaimName, claimName := range claims { - pod.Spec.ResourceClaims = append(pod.Spec.ResourceClaims, - corev1.PodResourceClaim{ - Name: podClaimName, - ResourceClaimName: &claimName, - }, - ) - } - return pod -} - -func createPodSchedulingContexts(pod *corev1.Pod) *resourceapi.PodSchedulingContext { - controller := true - return &resourceapi.PodSchedulingContext{ - ObjectMeta: metav1.ObjectMeta{ - Name: pod.Name, - Namespace: pod.Namespace, - OwnerReferences: []metav1.OwnerReference{ - { - Name: pod.Name, - Controller: &controller, - UID: pod.UID, - }, - }, - }, - } -} - -func fakeK8s(objs []runtime.Object) (kubernetes.Interface, informers.SharedInformerFactory) { - // This is a very simple replacement for a real apiserver. For example, - // it doesn't do defaulting and accepts updates to the status in normal - // Update calls. Therefore this test does not catch when we use Update - // instead of UpdateStatus. Reactors could be used to catch that, but - // that seems overkill because E2E tests will find that. - // - // Interactions with the fake apiserver also never fail. TODO: - // simulate update errors. - client := fake.NewSimpleClientset(objs...) - informerFactory := informers.NewSharedInformerFactory(client, 0) - return client, informerFactory -} - -func assertEqualEvents(t *testing.T, expected []string, actual <-chan string) { - t.Logf("Assert for events: %v", expected) - c := time.After(wait.ForeverTestTimeout) - for _, e := range expected { - select { - case a := <-actual: - assert.Equal(t, a, e) - case <-c: - t.Errorf("Expected event %q, got nothing", e) - // continue iterating to print all expected events - } - } - for { - select { - case a := <-actual: - t.Errorf("Unexpected event: %q", a) - default: - return // No more events, as expected. - } - } -} diff --git a/staging/src/k8s.io/dynamic-resource-allocation/controller/mock_queue_test.go b/staging/src/k8s.io/dynamic-resource-allocation/controller/mock_queue_test.go deleted file mode 100644 index aa9e013ba18..00000000000 --- a/staging/src/k8s.io/dynamic-resource-allocation/controller/mock_queue_test.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright 2024 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "slices" - "time" - - "k8s.io/client-go/util/workqueue" -) - -// TODO (pohly): move this to k8s.io/client-go/util/workqueue/workqueue.go -// if it turns out to be generally useful. Doc comments are already written -// as if the code was there. - -// MockQueue is an implementation of [TypedRateLimitingInterface] which -// can be used to test a function which pulls work items out of a queue -// and processes them. -// -// A null instance is directly usable. The usual usage is: -// -// var m workqueue.Mock[string] -// m.SyncOne("some-item", func(queue workqueue.TypedRateLimitingInterface[string]) { ... } ) -// if diff := cmp.Diff(workqueue.Mock[string]{}, m); diff != "" { -// t.Errorf("unexpected state of mock work queue after sync (-want, +got):\n%s", diff) -// } -// -// All slices get reset to nil when they become empty, so there are no spurious -// differences because of the nil vs. empty slice. -type Mock[T comparable] struct { - // Ready contains the items which are ready for processing. - Ready []T - - // InFlight contains the items which are currently being processed (= Get - // was called, Done not yet). - InFlight []T - - // MismatchedDone contains the items for which Done was called without - // a matching Get. - MismatchedDone []T - - // Later contains the items which are meant to be added to the queue after - // a certain delay (= AddAfter was called for them). - Later []MockDelayedItem[T] - - // Failures contains the items and their retry count which failed to be - // processed (AddRateLimited called at least once, Forget not yet). - // The retry count is always larger than zero. - Failures map[T]int - - // ShutDownCalled tracks how often ShutDown got called. - ShutDownCalled int - - // ShutDownWithDrainCalled tracks how often ShutDownWithDrain got called. - ShutDownWithDrainCalled int -} - -// MockDelayedItem is an item which was queue for later processing. -type MockDelayedItem[T comparable] struct { - Item T - Duration time.Duration -} - -// SyncOne adds the item to the work queue and calls sync. -// That sync function can pull one or more items from the work -// queue until the queue is empty. Then it is told that the queue -// is shutting down, which must cause it to return. -// -// The test can then retrieve the state of the queue to check the result. -func (m *Mock[T]) SyncOne(item T, sync func(workqueue.TypedRateLimitingInterface[T])) { - m.Ready = append(m.Ready, item) - sync(m) -} - -// Add implements [TypedInterface]. -func (m *Mock[T]) Add(item T) { - m.Ready = append(m.Ready, item) -} - -// Len implements [TypedInterface]. -func (m *Mock[T]) Len() int { - return len(m.Ready) -} - -// Get implements [TypedInterface]. -func (m *Mock[T]) Get() (item T, shutdown bool) { - if len(m.Ready) == 0 { - shutdown = true - return - } - item = m.Ready[0] - m.Ready = m.Ready[1:] - if len(m.Ready) == 0 { - m.Ready = nil - } - m.InFlight = append(m.InFlight, item) - return item, false -} - -// Done implements [TypedInterface]. -func (m *Mock[T]) Done(item T) { - index := slices.Index(m.InFlight, item) - if index < 0 { - m.MismatchedDone = append(m.MismatchedDone, item) - } - m.InFlight = slices.Delete(m.InFlight, index, index+1) - if len(m.InFlight) == 0 { - m.InFlight = nil - } -} - -// ShutDown implements [TypedInterface]. -func (m *Mock[T]) ShutDown() { - m.ShutDownCalled++ -} - -// ShutDownWithDrain implements [TypedInterface]. -func (m *Mock[T]) ShutDownWithDrain() { - m.ShutDownWithDrainCalled++ -} - -// ShuttingDown implements [TypedInterface]. -func (m *Mock[T]) ShuttingDown() bool { - return m.ShutDownCalled > 0 || m.ShutDownWithDrainCalled > 0 -} - -// AddAfter implements [TypedDelayingInterface.AddAfter] -func (m *Mock[T]) AddAfter(item T, duration time.Duration) { - m.Later = append(m.Later, MockDelayedItem[T]{Item: item, Duration: duration}) -} - -// AddRateLimited implements [TypedRateLimitingInterface.AddRateLimited]. -func (m *Mock[T]) AddRateLimited(item T) { - if m.Failures == nil { - m.Failures = make(map[T]int) - } - m.Failures[item]++ -} - -// Forget implements [TypedRateLimitingInterface.Forget]. -func (m *Mock[T]) Forget(item T) { - if m.Failures == nil { - return - } - delete(m.Failures, item) -} - -// NumRequeues implements [TypedRateLimitingInterface.NumRequeues]. -func (m *Mock[T]) NumRequeues(item T) int { - return m.Failures[item] -} diff --git a/staging/src/k8s.io/dynamic-resource-allocation/go.mod b/staging/src/k8s.io/dynamic-resource-allocation/go.mod index f0de94c223a..b793d5151d7 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/go.mod +++ b/staging/src/k8s.io/dynamic-resource-allocation/go.mod @@ -8,7 +8,6 @@ godebug default=go1.23 require ( github.com/blang/semver/v4 v4.0.0 - github.com/go-logr/logr v1.4.2 github.com/google/cel-go v0.21.0 github.com/google/go-cmp v0.6.0 github.com/onsi/gomega v1.33.1 @@ -32,6 +31,7 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect diff --git a/staging/src/k8s.io/dynamic-resource-allocation/structured/allocator_test.go b/staging/src/k8s.io/dynamic-resource-allocation/structured/allocator_test.go index 92c37cc9552..68b300d7c57 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/structured/allocator_test.go +++ b/staging/src/k8s.io/dynamic-resource-allocation/structured/allocator_test.go @@ -277,7 +277,6 @@ func allocationResult(selector *v1.NodeSelector, results ...resourceapi.DeviceRe "Config": gomega.BeNil(), }), "NodeSelector": matchNodeSelector(selector), - "Controller": gomega.BeEmpty(), })) } diff --git a/test/e2e/dra/deploy.go b/test/e2e/dra/deploy.go index a3fd78ba2de..5e65a3e377e 100644 --- a/test/e2e/dra/deploy.go +++ b/test/e2e/dra/deploy.go @@ -73,6 +73,17 @@ type Nodes struct { NodeNames []string } +type Resources struct { + NodeLocal bool + + // Nodes is a fixed list of node names on which resources are + // available. Mutually exclusive with NodeLabels. + Nodes []string + + // Number of devices called "device-000", "device-001", ... on each node or in the cluster. + MaxAllocations int +} + //go:embed test-driver/deploy/example/plugin-permissions.yaml var pluginPermissions string @@ -167,7 +178,7 @@ func validateClaim(claim *resourceapi.ResourceClaim) { // up after the test. // // Call this outside of ginkgo.It, then use the instance inside ginkgo.It. -func NewDriver(f *framework.Framework, nodes *Nodes, configureResources func() app.Resources, devicesPerNode ...map[string]map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) *Driver { +func NewDriver(f *framework.Framework, nodes *Nodes, configureResources func() Resources, devicesPerNode ...map[string]map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) *Driver { d := NewDriverInstance(f) ginkgo.BeforeEach(func() { @@ -180,17 +191,16 @@ func NewDriver(f *framework.Framework, nodes *Nodes, configureResources func() a // be started explicitly with Run. May be used inside ginkgo.It. func NewDriverInstance(f *framework.Framework) *Driver { d := &Driver{ - f: f, - fail: map[MethodInstance]bool{}, - callCounts: map[MethodInstance]int64{}, - NodeV1alpha3: true, - parameterMode: parameterModeStructured, + f: f, + fail: map[MethodInstance]bool{}, + callCounts: map[MethodInstance]int64{}, + NodeV1alpha3: true, } d.initName() return d } -func (d *Driver) Run(nodes *Nodes, configureResources func() app.Resources, devicesPerNode ...map[string]map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) { +func (d *Driver) Run(nodes *Nodes, configureResources func() Resources, devicesPerNode ...map[string]map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) { resources := configureResources() if len(resources.Nodes) == 0 { // This always has to be set because the driver might @@ -215,15 +225,13 @@ type Driver struct { serviceAccountName string NameSuffix string - Controller *app.ExampleController Name string // Nodes contains entries for each node selected for a test when the test runs. // In addition, there is one entry for a fictional node. Nodes map[string]KubeletPlugin - parameterMode parameterMode // empty == parameterModeStructured - NodeV1alpha3 bool + NodeV1alpha3 bool mutex sync.Mutex fail map[MethodInstance]bool @@ -235,22 +243,14 @@ type KubeletPlugin struct { ClientSet kubernetes.Interface } -type parameterMode string - -const ( - parameterModeClassicDRA parameterMode = "classic" // control plane controller - parameterModeStructured parameterMode = "structured" // allocation through scheduler -) - func (d *Driver) initName() { d.Name = d.f.UniqueName + d.NameSuffix + ".k8s.io" } -func (d *Driver) SetUp(nodes *Nodes, resources app.Resources, devicesPerNode ...map[string]map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) { +func (d *Driver) SetUp(nodes *Nodes, resources Resources, devicesPerNode ...map[string]map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) { d.initName() ginkgo.By(fmt.Sprintf("deploying driver %s on nodes %v", d.Name, nodes.NodeNames)) d.Nodes = make(map[string]KubeletPlugin) - resources.DriverName = d.Name ctx, cancel := context.WithCancel(context.Background()) logger := klog.FromContext(ctx) @@ -259,58 +259,47 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources, devicesPerNode ... d.ctx = ctx d.cleanup = append(d.cleanup, cancel) - switch d.parameterMode { - case parameterModeClassicDRA: - // The controller is easy: we simply connect to the API server. - d.Controller = app.NewController(d.f.ClientSet, resources) - d.wg.Add(1) - go func() { - defer d.wg.Done() - d.Controller.Run(d.ctx, 5 /* workers */) - }() - case parameterModeStructured: - if !resources.NodeLocal { - // Publish one resource pool with "network-attached" devices. - slice := &resourceapi.ResourceSlice{ - ObjectMeta: metav1.ObjectMeta{ - Name: d.Name, // globally unique + if !resources.NodeLocal { + // Publish one resource pool with "network-attached" devices. + slice := &resourceapi.ResourceSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: d.Name, // globally unique + }, + Spec: resourceapi.ResourceSliceSpec{ + Driver: d.Name, + Pool: resourceapi.ResourcePool{ + Name: "network", + Generation: 1, + ResourceSliceCount: 1, }, - Spec: resourceapi.ResourceSliceSpec{ - Driver: d.Name, - Pool: resourceapi.ResourcePool{ - Name: "network", - Generation: 1, - ResourceSliceCount: 1, - }, - NodeSelector: &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{{ - MatchFields: []v1.NodeSelectorRequirement{{ - Key: "metadata.name", - Operator: v1.NodeSelectorOpIn, - Values: nodes.NodeNames, - }}, + NodeSelector: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{{ + MatchFields: []v1.NodeSelectorRequirement{{ + Key: "metadata.name", + Operator: v1.NodeSelectorOpIn, + Values: nodes.NodeNames, }}, - }, + }}, }, - } - maxAllocations := resources.MaxAllocations - if maxAllocations <= 0 { - // Cannot be empty, otherwise nothing runs. - maxAllocations = 10 - } - for i := 0; i < maxAllocations; i++ { - slice.Spec.Devices = append(slice.Spec.Devices, resourceapi.Device{ - Name: fmt.Sprintf("device-%d", i), - Basic: &resourceapi.BasicDevice{}, - }) - } - - _, err := d.f.ClientSet.ResourceV1alpha3().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{}) - framework.ExpectNoError(err) - ginkgo.DeferCleanup(func(ctx context.Context) { - framework.ExpectNoError(d.f.ClientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{})) + }, + } + maxAllocations := resources.MaxAllocations + if maxAllocations <= 0 { + // Cannot be empty, otherwise nothing runs. + maxAllocations = 10 + } + for i := 0; i < maxAllocations; i++ { + slice.Spec.Devices = append(slice.Spec.Devices, resourceapi.Device{ + Name: fmt.Sprintf("device-%d", i), + Basic: &resourceapi.BasicDevice{}, }) } + + _, err := d.f.ClientSet.ResourceV1alpha3().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{}) + framework.ExpectNoError(err) + ginkgo.DeferCleanup(func(ctx context.Context) { + framework.ExpectNoError(d.f.ClientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{})) + }) } manifests := []string{ @@ -319,14 +308,9 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources, devicesPerNode ... "test/e2e/testing-manifests/dra/dra-test-driver-proxy.yaml", } var numDevices = -1 // disabled - if d.parameterMode != parameterModeClassicDRA && resources.NodeLocal { + if resources.NodeLocal { numDevices = resources.MaxAllocations } - switch d.parameterMode { - case parameterModeClassicDRA, parameterModeStructured: - default: - framework.Failf("unknown test driver parameter mode: %s", d.parameterMode) - } // Create service account and corresponding RBAC rules. d.serviceAccountName = "dra-kubelet-plugin-" + d.Name + "-service-account" diff --git a/test/e2e/dra/dra.go b/test/e2e/dra/dra.go index e8c176b6c91..d03193a3f4c 100644 --- a/test/e2e/dra/dra.go +++ b/test/e2e/dra/dra.go @@ -38,16 +38,12 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" applyv1 "k8s.io/client-go/applyconfigurations/core/v1" "k8s.io/client-go/kubernetes" - "k8s.io/dynamic-resource-allocation/controller" "k8s.io/klog/v2" - "k8s.io/kubernetes/test/e2e/dra/test-driver/app" "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" - e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" admissionapi "k8s.io/pod-security-admission/api" "k8s.io/utils/ptr" @@ -62,16 +58,16 @@ const ( var adminAccessPolicyYAML string // networkResources can be passed to NewDriver directly. -func networkResources() app.Resources { - return app.Resources{} +func networkResources() Resources { + return Resources{} } // perNode returns a function which can be passed to NewDriver. The nodes // parameter has be instantiated, but not initialized yet, so the returned // function has to capture it and use it when being called. -func perNode(maxAllocations int, nodes *Nodes) func() app.Resources { - return func() app.Resources { - return app.Resources{ +func perNode(maxAllocations int, nodes *Nodes) func() Resources { + return func() Resources { + return Resources{ NodeLocal: true, MaxAllocations: maxAllocations, Nodes: nodes.NodeNames, @@ -382,9 +378,6 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, gomega.Eventually(ctx, func(ctx context.Context) (*v1.Pod, error) { return f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) }).WithTimeout(f.Timeouts.PodStartSlow).Should(gomega.HaveField("Status.ContainerStatuses", gomega.ContainElements(gomega.HaveField("RestartCount", gomega.BeNumerically(">=", 2))))) - if driver.Controller != nil { - gomega.Expect(driver.Controller.GetNumAllocations()).To(gomega.Equal(int64(1)), "number of allocations") - } }) ginkgo.It("must deallocate after use", func(ctx context.Context) { @@ -408,16 +401,15 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, }) } - singleNodeTests := func(parameterMode parameterMode) { + singleNodeTests := func() { nodes := NewNodes(f, 1, 1) maxAllocations := 1 numPods := 10 - generateResources := func() app.Resources { + generateResources := func() Resources { resources := perNode(maxAllocations, nodes)() return resources } driver := NewDriver(f, nodes, generateResources) // All tests get their own driver instance. - driver.parameterMode = parameterMode b := newBuilder(f, driver) // We have to set the parameters *before* creating the class. b.classParameters = `{"x":"y"}` @@ -553,30 +545,11 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, class, err := f.ClientSet.ResourceV1alpha3().DeviceClasses().Get(ctx, deviceClassName, metav1.GetOptions{}) framework.ExpectNoError(err) originalClass := class.DeepCopy() - switch driver.parameterMode { - case parameterModeClassicDRA: - class.Spec.SuitableNodes = &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "no-such-label", - Operator: v1.NodeSelectorOpIn, - Values: []string{"no-such-value"}, - }, - }, - }, - }, - } - case parameterModeStructured: - class.Spec.Selectors = []resourceapi.DeviceSelector{{ - CEL: &resourceapi.CELDeviceSelector{ - Expression: "false", - }, - }} - default: - framework.Failf("unexpected mode: %s", driver.parameterMode) - } + class.Spec.Selectors = []resourceapi.DeviceSelector{{ + CEL: &resourceapi.CELDeviceSelector{ + Expression: "false", + }, + }} class, err = f.ClientSet.ResourceV1alpha3().DeviceClasses().Update(ctx, class, metav1.UpdateOptions{}) framework.ExpectNoError(err) @@ -587,7 +560,6 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, framework.ExpectNoError(e2epod.WaitForPodNameUnschedulableInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace)) // Unblock the pod. - class.Spec.SuitableNodes = originalClass.Spec.SuitableNodes class.Spec.Selectors = originalClass.Spec.Selectors _, err = f.ClientSet.ResourceV1alpha3().DeviceClasses().Update(ctx, class, metav1.UpdateOptions{}) framework.ExpectNoError(err) @@ -616,313 +588,93 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, claimTests(b, driver) } - // These tests depend on having more than one node and a DRA driver controller. - multiNodeDRAControllerTests := func(nodes *Nodes) { - driver := NewDriver(f, nodes, networkResources) - driver.parameterMode = parameterModeClassicDRA - b := newBuilder(f, driver) + // The following tests only make sense when there is more than one node. + // They get skipped when there's only one node. + multiNodeTests := func() { + nodes := NewNodes(f, 2, 8) - ginkgo.It("schedules onto different nodes", func(ctx context.Context) { - label := "app.kubernetes.io/instance" - instance := f.UniqueName + "-test-app" - antiAffinity := &v1.Affinity{ - PodAntiAffinity: &v1.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ - { - TopologyKey: "kubernetes.io/hostname", - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - label: instance, - }, - }, - }, + ginkgo.Context("with different ResourceSlices", func() { + firstDevice := "pre-defined-device-01" + secondDevice := "pre-defined-device-02" + devicesPerNode := []map[string]map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{ + // First node: + { + firstDevice: { + "healthy": {BoolValue: ptr.To(true)}, + "exists": {BoolValue: ptr.To(true)}, + }, + }, + // Second node: + { + secondDevice: { + "healthy": {BoolValue: ptr.To(false)}, + // Has no "exists" attribute! }, }, } - createPod := func() *v1.Pod { - pod := b.podExternal() - pod.Labels[label] = instance - pod.Spec.Affinity = antiAffinity - return pod - } - pod1 := createPod() - pod2 := createPod() - claim := b.externalClaim() - b.create(ctx, claim, pod1, pod2) + driver := NewDriver(f, nodes, perNode(-1, nodes), devicesPerNode...) + b := newBuilder(f, driver) - for _, pod := range []*v1.Pod{pod1, pod2} { - err := e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod) - framework.ExpectNoError(err, "start pod") - } - }) + ginkgo.It("keeps pod pending because of CEL runtime errors", func(ctx context.Context) { + // When pod scheduling encounters CEL runtime errors for some nodes, but not all, + // it should still not schedule the pod because there is something wrong with it. + // Scheduling it would make it harder to detect that there is a problem. + // + // This matches the "CEL-runtime-error-for-subset-of-nodes" unit test, except that + // here we try it in combination with the actual scheduler and can extend it with + // other checks, like event handling (future extension). - // This test covers aspects of non graceful node shutdown by DRA controller - // More details about this can be found in the KEP: - // https://github.com/kubernetes/enhancements/tree/master/keps/sig-storage/2268-non-graceful-shutdown - // NOTE: this test depends on kind. It will only work with kind cluster as it shuts down one of the - // nodes by running `docker stop `, which is very kind-specific. - f.It(f.WithSerial(), f.WithDisruptive(), f.WithSlow(), "must deallocate on non graceful node shutdown", func(ctx context.Context) { - ginkgo.By("create test pod") - label := "app.kubernetes.io/instance" - instance := f.UniqueName + "-test-app" - pod := b.podExternal() - pod.Labels[label] = instance - claim := b.externalClaim() - b.create(ctx, claim, pod) - - ginkgo.By("wait for test pod " + pod.Name + " to run") - labelSelector := labels.SelectorFromSet(labels.Set(pod.Labels)) - pods, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, pod.Namespace, labelSelector, 1, framework.PodStartTimeout) - framework.ExpectNoError(err, "start pod") - runningPod := &pods.Items[0] - - nodeName := runningPod.Spec.NodeName - // Prevent builder tearDown to fail waiting for unprepared resources - delete(b.driver.Nodes, nodeName) - ginkgo.By("stop node " + nodeName + " non gracefully") - _, stderr, err := framework.RunCmd("docker", "stop", nodeName) - gomega.Expect(stderr).To(gomega.BeEmpty()) - framework.ExpectNoError(err) - ginkgo.DeferCleanup(framework.RunCmd, "docker", "start", nodeName) - if ok := e2enode.WaitForNodeToBeNotReady(ctx, f.ClientSet, nodeName, f.Timeouts.NodeNotReady); !ok { - framework.Failf("Node %s failed to enter NotReady state", nodeName) - } - - ginkgo.By("apply out-of-service taint on node " + nodeName) - taint := v1.Taint{ - Key: v1.TaintNodeOutOfService, - Effect: v1.TaintEffectNoExecute, - } - e2enode.AddOrUpdateTaintOnNode(ctx, f.ClientSet, nodeName, taint) - e2enode.ExpectNodeHasTaint(ctx, f.ClientSet, nodeName, &taint) - ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, taint) - - ginkgo.By("waiting for claim to get deallocated") - gomega.Eventually(ctx, framework.GetObject(b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Get, claim.Name, metav1.GetOptions{})).WithTimeout(f.Timeouts.PodDelete).Should(gomega.HaveField("Status.Allocation", gomega.BeNil())) - }) - } - - // The following tests only make sense when there is more than one node. - // They get skipped when there's only one node. - multiNodeTests := func(parameterMode parameterMode) { - nodes := NewNodes(f, 2, 8) - - switch parameterMode { - case parameterModeStructured: - ginkgo.Context("with different ResourceSlices", func() { - firstDevice := "pre-defined-device-01" - secondDevice := "pre-defined-device-02" - devicesPerNode := []map[string]map[resourceapi.QualifiedName]resourceapi.DeviceAttribute{ - // First node: - { - firstDevice: { - "healthy": {BoolValue: ptr.To(true)}, - "exists": {BoolValue: ptr.To(true)}, - }, + gomega.Eventually(ctx, framework.ListObjects(f.ClientSet.ResourceV1alpha3().ResourceSlices().List, + metav1.ListOptions{ + FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + driver.Name, }, - // Second node: - { - secondDevice: { - "healthy": {BoolValue: ptr.To(false)}, - // Has no "exists" attribute! - }, - }, - } - driver := NewDriver(f, nodes, perNode(-1, nodes), devicesPerNode...) - b := newBuilder(f, driver) - - ginkgo.It("keeps pod pending because of CEL runtime errors", func(ctx context.Context) { - // When pod scheduling encounters CEL runtime errors for some nodes, but not all, - // it should still not schedule the pod because there is something wrong with it. - // Scheduling it would make it harder to detect that there is a problem. - // - // This matches the "CEL-runtime-error-for-subset-of-nodes" unit test, except that - // here we try it in combination with the actual scheduler and can extend it with - // other checks, like event handling (future extension). - - gomega.Eventually(ctx, framework.ListObjects(f.ClientSet.ResourceV1alpha3().ResourceSlices().List, - metav1.ListOptions{ - FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + driver.Name, - }, - )).Should(gomega.HaveField("Items", gomega.ConsistOf( - gomega.HaveField("Spec.Devices", gomega.ConsistOf( - gomega.Equal(resourceapi.Device{ - Name: firstDevice, - Basic: &resourceapi.BasicDevice{ - Attributes: devicesPerNode[0][firstDevice], - }, - }))), - gomega.HaveField("Spec.Devices", gomega.ConsistOf( - gomega.Equal(resourceapi.Device{ - Name: secondDevice, - Basic: &resourceapi.BasicDevice{ - Attributes: devicesPerNode[1][secondDevice], - }, - }))), - ))) - - pod, template := b.podInline() - template.Spec.Spec.Devices.Requests[0].Selectors = append(template.Spec.Spec.Devices.Requests[0].Selectors, - resourceapi.DeviceSelector{ - CEL: &resourceapi.CELDeviceSelector{ - // Runtime error on one node, but not all. - Expression: fmt.Sprintf(`device.attributes["%s"].exists`, driver.Name), + )).Should(gomega.HaveField("Items", gomega.ConsistOf( + gomega.HaveField("Spec.Devices", gomega.ConsistOf( + gomega.Equal(resourceapi.Device{ + Name: firstDevice, + Basic: &resourceapi.BasicDevice{ + Attributes: devicesPerNode[0][firstDevice], }, - }, - ) - b.create(ctx, pod, template) + }))), + gomega.HaveField("Spec.Devices", gomega.ConsistOf( + gomega.Equal(resourceapi.Device{ + Name: secondDevice, + Basic: &resourceapi.BasicDevice{ + Attributes: devicesPerNode[1][secondDevice], + }, + }))), + ))) - framework.ExpectNoError(e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "scheduling failure", f.Timeouts.PodStartShort, func(pod *v1.Pod) (bool, error) { - for _, condition := range pod.Status.Conditions { - if condition.Type == "PodScheduled" { - if condition.Status != "False" { - gomega.StopTrying("pod got scheduled unexpectedly").Now() - } - if strings.Contains(condition.Message, "CEL runtime error") { - // This is what we are waiting for. - return true, nil - } + pod, template := b.podInline() + template.Spec.Spec.Devices.Requests[0].Selectors = append(template.Spec.Spec.Devices.Requests[0].Selectors, + resourceapi.DeviceSelector{ + CEL: &resourceapi.CELDeviceSelector{ + // Runtime error on one node, but not all. + Expression: fmt.Sprintf(`device.attributes["%s"].exists`, driver.Name), + }, + }, + ) + b.create(ctx, pod, template) + + framework.ExpectNoError(e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "scheduling failure", f.Timeouts.PodStartShort, func(pod *v1.Pod) (bool, error) { + for _, condition := range pod.Status.Conditions { + if condition.Type == "PodScheduled" { + if condition.Status != "False" { + gomega.StopTrying("pod got scheduled unexpectedly").Now() + } + if strings.Contains(condition.Message, "CEL runtime error") { + // This is what we are waiting for. + return true, nil } } - return false, nil - }), "pod must not get scheduled because of a CEL runtime error") - }) - }) - case parameterModeClassicDRA: - ginkgo.Context("with network-attached resources", func() { - multiNodeDRAControllerTests(nodes) - }) - - ginkgo.Context("reallocation", func() { - var allocateWrapper2 app.AllocateWrapperType - driver := NewDriver(f, nodes, perNode(1, nodes)) - driver.parameterMode = parameterModeClassicDRA - driver2 := NewDriver(f, nodes, func() app.Resources { - return app.Resources{ - NodeLocal: true, - MaxAllocations: 1, - Nodes: nodes.NodeNames, - - AllocateWrapper: func( - ctx context.Context, - claimAllocations []*controller.ClaimAllocation, - selectedNode string, - handler func( - ctx context.Context, - claimAllocations []*controller.ClaimAllocation, - selectedNode string), - ) { - allocateWrapper2(ctx, claimAllocations, selectedNode, handler) - }, } - }) - driver2.NameSuffix = "-other" - driver2.parameterMode = parameterModeClassicDRA - - b := newBuilder(f, driver) - b2 := newBuilder(f, driver2) - - ginkgo.It("works", func(ctx context.Context) { - // A pod with multiple claims can run on a node, but - // only if allocation of all succeeds. This - // test simulates the scenario where one claim - // gets allocated from one driver, but the claims - // from second driver fail allocation because of a - // race with some other pod. - // - // To ensure the right timing, allocation of the - // claims from second driver are delayed while - // creating another pod that gets the remaining - // resource on the node from second driver. - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // Order is relevant here: each pod must be matched with its own claim. - pod1claim1 := b.externalClaim() - pod1 := b.podExternal() - pod2claim1 := b2.externalClaim() - pod2 := b2.podExternal() - - // Add another claim to pod1. - pod1claim2 := b2.externalClaim() - pod1.Spec.ResourceClaims = append(pod1.Spec.ResourceClaims, - v1.PodResourceClaim{ - Name: "claim-other", - ResourceClaimName: &pod1claim2.Name, - }, - ) - - // Allocating the second claim in pod1 has to wait until pod2 has - // consumed the available resources on the node. - blockClaim, cancelBlockClaim := context.WithCancel(ctx) - defer cancelBlockClaim() - allocateWrapper2 = func(ctx context.Context, - claimAllocations []*controller.ClaimAllocation, - selectedNode string, - handler func(ctx context.Context, - claimAllocations []*controller.ClaimAllocation, - selectedNode string), - ) { - if claimAllocations[0].Claim.Name == pod1claim2.Name { - <-blockClaim.Done() - } - handler(ctx, claimAllocations, selectedNode) - } - - b.create(ctx, pod1claim1, pod1claim2, pod1) - - ginkgo.By("waiting for one claim from driver1 to be allocated") - var nodeSelector *v1.NodeSelector - gomega.Eventually(ctx, func(ctx context.Context) (int, error) { - claims, err := f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).List(ctx, metav1.ListOptions{}) - if err != nil { - return 0, err - } - allocated := 0 - for _, claim := range claims.Items { - if claim.Status.Allocation != nil { - allocated++ - nodeSelector = claim.Status.Allocation.NodeSelector - } - } - return allocated, nil - }).WithTimeout(time.Minute).Should(gomega.Equal(1), "one claim allocated") - - // Now create a second pod which we force to - // run on the same node that is currently being - // considered for the first one. We know what - // the node selector looks like and can - // directly access the key and value from it. - ginkgo.By(fmt.Sprintf("create second pod on the same node %s", nodeSelector)) - - req := nodeSelector.NodeSelectorTerms[0].MatchExpressions[0] - node := req.Values[0] - pod2.Spec.NodeSelector = map[string]string{req.Key: node} - - b2.create(ctx, pod2claim1, pod2) - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod2), "start pod 2") - - // Allow allocation of second claim in pod1 to proceed. It should fail now - // and the other node must be used instead, after deallocating - // the first claim. - ginkgo.By("move first pod to other node") - cancelBlockClaim() - - framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod1), "start pod 1") - pod1, err := f.ClientSet.CoreV1().Pods(pod1.Namespace).Get(ctx, pod1.Name, metav1.GetOptions{}) - framework.ExpectNoError(err, "get first pod") - if pod1.Spec.NodeName == "" { - framework.Fail("first pod should be running on node, was not scheduled") - } - gomega.Expect(pod1.Spec.NodeName).ToNot(gomega.Equal(node), "first pod should run on different node than second one") - gomega.Expect(driver.Controller.GetNumDeallocations()).To(gomega.Equal(int64(1)), "number of deallocations") - }) + return false, nil + }), "pod must not get scheduled because of a CEL runtime error") }) - } + }) ginkgo.Context("with node-local resources", func() { driver := NewDriver(f, nodes, perNode(1, nodes)) - driver.parameterMode = parameterMode b := newBuilder(f, driver) ginkgo.It("uses all resources", func(ctx context.Context) { @@ -964,17 +716,13 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, }) } - tests := func(parameterMode parameterMode) { - ginkgo.Context("on single node", func() { - singleNodeTests(parameterMode) - }) - ginkgo.Context("on multiple nodes", func() { - multiNodeTests(parameterMode) - }) - } + ginkgo.Context("on single node", func() { + singleNodeTests() + }) - framework.Context("with classic DRA", feature.DRAControlPlaneController, func() { tests(parameterModeClassicDRA) }) - framework.Context("with structured parameters", func() { tests(parameterModeStructured) }) + ginkgo.Context("on multiple nodes", func() { + multiNodeTests() + }) // TODO (https://github.com/kubernetes/kubernetes/issues/123699): move most of the test below into `testDriver` so that they get // executed with different parameters. @@ -1098,58 +846,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, }) }) - // The following tests are all about behavior in combination with a - // control-plane DRA driver controller. - framework.Context("cluster with classic DRA", feature.DRAControlPlaneController, func() { - nodes := NewNodes(f, 1, 4) - - // kube-controller-manager can trigger delayed allocation for pods where the - // node name was already selected when creating the pod. For immediate - // allocation, the creator has to ensure that the node matches the claims. - // This does not work for resource claim templates and only isn't - // a problem here because the resource is network-attached and available - // on all nodes. - preScheduledTests := func(b *builder, driver *Driver) { - ginkgo.It("supports scheduled pod referencing inline resource claim", func(ctx context.Context) { - pod, template := b.podInline() - pod.Spec.NodeName = nodes.NodeNames[0] - b.create(ctx, pod, template) - - b.testPod(ctx, f.ClientSet, pod) - }) - - ginkgo.It("supports scheduled pod referencing external resource claim", func(ctx context.Context) { - claim := b.externalClaim() - pod := b.podExternal() - pod.Spec.NodeName = nodes.NodeNames[0] - b.create(ctx, claim, pod) - - b.testPod(ctx, f.ClientSet, pod) - }) - } - - ginkgo.Context("with setting ReservedFor", func() { - driver := NewDriver(f, nodes, networkResources) - driver.parameterMode = parameterModeClassicDRA - b := newBuilder(f, driver) - preScheduledTests(b, driver) - claimTests(b, driver) - }) - - ginkgo.Context("without setting ReservedFor", func() { - driver := NewDriver(f, nodes, func() app.Resources { - resources := networkResources() - resources.DontSetReservedFor = true - return resources - }) - driver.parameterMode = parameterModeClassicDRA - b := newBuilder(f, driver) - preScheduledTests(b, driver) - claimTests(b, driver) - }) - }) - - ginkgo.Context("cluster with structured parameters", func() { + ginkgo.Context("cluster", func() { nodes := NewNodes(f, 1, 4) driver := NewDriver(f, nodes, perNode(1, nodes)) @@ -1441,16 +1138,11 @@ func (b *builder) class() *resourceapi.DeviceClass { Name: b.className(), }, } - switch b.driver.parameterMode { - case parameterModeClassicDRA: - class.Spec.SuitableNodes = b.nodeSelector() - case parameterModeStructured: - class.Spec.Selectors = []resourceapi.DeviceSelector{{ - CEL: &resourceapi.CELDeviceSelector{ - Expression: fmt.Sprintf(`device.driver == "%s"`, b.driver.Name), - }, - }} - } + class.Spec.Selectors = []resourceapi.DeviceSelector{{ + CEL: &resourceapi.CELDeviceSelector{ + Expression: fmt.Sprintf(`device.driver == "%s"`, b.driver.Name), + }, + }} if b.classParameters != "" { class.Spec.Config = []resourceapi.DeviceClassConfiguration{{ DeviceConfiguration: resourceapi.DeviceConfiguration{ @@ -1464,24 +1156,6 @@ func (b *builder) class() *resourceapi.DeviceClass { return class } -// nodeSelector returns a node selector that matches all nodes on which the -// kubelet plugin was deployed. -func (b *builder) nodeSelector() *v1.NodeSelector { - return &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "kubernetes.io/hostname", - Operator: v1.NodeSelectorOpIn, - Values: b.driver.Nodenames(), - }, - }, - }, - }, - } -} - // externalClaim returns external resource claim // that test pods can reference func (b *builder) externalClaim() *resourceapi.ResourceClaim { @@ -1521,10 +1195,6 @@ func (b *builder) claimSpec() resourceapi.ResourceClaimSpec { }, } - if b.driver.parameterMode == parameterModeClassicDRA { - spec.Controller = b.driver.Name - } - return spec } diff --git a/test/e2e/dra/kind-classic-dra.yaml b/test/e2e/dra/kind-classic-dra.yaml deleted file mode 100644 index 0656acd5a66..00000000000 --- a/test/e2e/dra/kind-classic-dra.yaml +++ /dev/null @@ -1,45 +0,0 @@ -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -containerdConfigPatches: -# Enable CDI as described in -# https://github.com/container-orchestrated-devices/container-device-interface#containerd-configuration -- |- - [plugins."io.containerd.grpc.v1.cri"] - enable_cdi = true -nodes: -- role: control-plane - kubeadmConfigPatches: - - | - kind: ClusterConfiguration - scheduler: - extraArgs: - v: "5" - vmodule: "allocator=6,dynamicresources=6" # structured/allocator.go, DRA scheduler plugin - controllerManager: - extraArgs: - v: "5" - apiServer: - extraArgs: - runtime-config: "resource.k8s.io/v1alpha3=true" - - | - kind: InitConfiguration - nodeRegistration: - kubeletExtraArgs: - v: "5" -- role: worker - kubeadmConfigPatches: - - | - kind: JoinConfiguration - nodeRegistration: - kubeletExtraArgs: - v: "5" -- role: worker - kubeadmConfigPatches: - - | - kind: JoinConfiguration - nodeRegistration: - kubeletExtraArgs: - v: "5" -featureGates: - DynamicResourceAllocation: true - DRAControlPlaneController: true diff --git a/test/e2e/dra/test-driver/app/controller.go b/test/e2e/dra/test-driver/app/controller.go deleted file mode 100644 index dc276b8cea4..00000000000 --- a/test/e2e/dra/test-driver/app/controller.go +++ /dev/null @@ -1,377 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package app does all of the work necessary to configure and run a -// Kubernetes app process. -package app - -import ( - "context" - "errors" - "fmt" - "slices" - "sync" - - v1 "k8s.io/api/core/v1" - resourceapi "k8s.io/api/resource/v1alpha3" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes" - listersv1 "k8s.io/client-go/listers/core/v1" - "k8s.io/dynamic-resource-allocation/controller" - "k8s.io/klog/v2" -) - -type Resources struct { - DriverName string - DontSetReservedFor bool - NodeLocal bool - // Nodes is a fixed list of node names on which resources are - // available. Mutually exclusive with NodeLabels. - Nodes []string - // NodeLabels are labels which determine on which nodes resources are - // available. Mutually exclusive with Nodes. - NodeLabels labels.Set - - // Number of devices called "device-000", "device-001", ... on each node or in the cluster. - MaxAllocations int - - // AllocateWrapper, if set, gets called for each Allocate call. - AllocateWrapper AllocateWrapperType -} - -func (r Resources) AllNodes(nodeLister listersv1.NodeLister) []string { - if len(r.NodeLabels) > 0 { - // Determine nodes with resources dynamically. - nodes, _ := nodeLister.List(labels.SelectorFromValidatedSet(r.NodeLabels)) - nodeNames := make([]string, 0, len(nodes)) - for _, node := range nodes { - nodeNames = append(nodeNames, node.Name) - } - return nodeNames - } - return r.Nodes -} - -func (r Resources) newAllocation(requestName, node string, config []resourceapi.DeviceAllocationConfiguration) *resourceapi.AllocationResult { - allocation := &resourceapi.AllocationResult{ - Devices: resourceapi.DeviceAllocationResult{ - Results: []resourceapi.DeviceRequestAllocationResult{{ - Driver: r.DriverName, - Pool: "none", - Request: requestName, - Device: "none", - }}, - Config: config, - }, - } - if node == "" && len(r.NodeLabels) > 0 { - // Available on all nodes matching the labels. - var requirements []v1.NodeSelectorRequirement - for key, value := range r.NodeLabels { - requirements = append(requirements, v1.NodeSelectorRequirement{ - Key: key, - Operator: v1.NodeSelectorOpIn, - Values: []string{value}, - }) - } - allocation.NodeSelector = &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: requirements, - }, - }, - } - } else { - var nodes []string - if node != "" { - // Local to one node. - nodes = append(nodes, node) - } else { - // Available on the fixed set of nodes. - nodes = r.Nodes - } - if len(nodes) > 0 { - allocation.NodeSelector = &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "kubernetes.io/hostname", - Operator: v1.NodeSelectorOpIn, - Values: nodes, - }, - }, - }, - }, - } - } - } - - return allocation -} - -type AllocateWrapperType func(ctx context.Context, claimAllocations []*controller.ClaimAllocation, - selectedNode string, - handler func(ctx context.Context, - claimAllocations []*controller.ClaimAllocation, - selectedNode string), -) - -type ExampleController struct { - clientset kubernetes.Interface - nodeLister listersv1.NodeLister - resources Resources - - mutex sync.Mutex - // allocated maps claim.UID to the node (if network-attached) or empty (if not). - allocated map[types.UID]string - // claimsPerNode counts how many claims are currently allocated for a node (non-empty key) - // or allocated for the entire cluster (empty key). Must be kept in sync with allocated. - claimsPerNode map[string]int - - numAllocations, numDeallocations int64 -} - -func NewController(clientset kubernetes.Interface, resources Resources) *ExampleController { - c := &ExampleController{ - clientset: clientset, - resources: resources, - - allocated: make(map[types.UID]string), - claimsPerNode: make(map[string]int), - } - return c -} - -func (c *ExampleController) Run(ctx context.Context, workers int) { - informerFactory := informers.NewSharedInformerFactory(c.clientset, 0 /* resync period */) - ctrl := controller.New(ctx, c.resources.DriverName, c, c.clientset, informerFactory) - c.nodeLister = informerFactory.Core().V1().Nodes().Lister() - ctrl.SetReservedFor(!c.resources.DontSetReservedFor) - informerFactory.Start(ctx.Done()) - ctrl.Run(workers) - // If we get here, the context was canceled and we can wait for informer factory goroutines. - informerFactory.Shutdown() -} - -var _ controller.Driver = &ExampleController{} - -// GetNumAllocations returns the number of times that a claim was allocated. -// Idempotent calls to Allocate that do not need to allocate the claim again do -// not contribute to that counter. -func (c *ExampleController) GetNumAllocations() int64 { - c.mutex.Lock() - defer c.mutex.Unlock() - - return c.numAllocations -} - -// GetNumDeallocations returns the number of times that a claim was allocated. -// Idempotent calls to Allocate that do not need to allocate the claim again do -// not contribute to that counter. -func (c *ExampleController) GetNumDeallocations() int64 { - c.mutex.Lock() - defer c.mutex.Unlock() - - return c.numDeallocations -} - -func (c *ExampleController) Allocate(ctx context.Context, claimAllocations []*controller.ClaimAllocation, selectedNode string) { - - if c.resources.AllocateWrapper != nil { - c.resources.AllocateWrapper(ctx, claimAllocations, selectedNode, c.allocateOneByOne) - } else { - c.allocateOneByOne(ctx, claimAllocations, selectedNode) - } - - return -} - -func (c *ExampleController) allocateOneByOne(ctx context.Context, claimAllocations []*controller.ClaimAllocation, selectedNode string) { - for _, ca := range claimAllocations { - allocationResult, err := c.allocateOne(ctx, ca.Claim, ca.DeviceClasses, selectedNode) - if err != nil { - ca.Error = err - continue - } - ca.Allocation = allocationResult - } -} - -// allocate simply copies parameters as JSON map into a ResourceHandle. -func (c *ExampleController) allocateOne(ctx context.Context, claim *resourceapi.ResourceClaim, deviceClasses map[string]*resourceapi.DeviceClass, selectedNode string) (result *resourceapi.AllocationResult, err error) { - logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "Allocate"), "claim", klog.KObj(claim), "uid", claim.UID) - defer func() { - logger.V(3).Info("done", "result", result, "err", err) - }() - - if len(claim.Spec.Devices.Requests) != 1 || - claim.Spec.Devices.Requests[0].DeviceClassName == "" || - claim.Spec.Devices.Requests[0].AllocationMode != resourceapi.DeviceAllocationModeExactCount || - claim.Spec.Devices.Requests[0].Count != 1 { - return nil, errors.New("only claims requesting exactly one device are supported") - } - request := claim.Spec.Devices.Requests[0] - class := deviceClasses[request.DeviceClassName] - if len(request.Selectors) > 0 || - class != nil && len(class.Spec.Selectors) > 0 { - return nil, errors.New("device selectors are not supported") - } - - c.mutex.Lock() - defer c.mutex.Unlock() - - // Already allocated? Then we don't need to count it again. - node, alreadyAllocated := c.allocated[claim.UID] - if alreadyAllocated { - // Idempotent result - kind of. We don't check whether - // the parameters changed in the meantime. A real - // driver would have to do that. - logger.V(3).V(3).Info("already allocated") - } else { - logger.V(3).Info("starting", "selectedNode", selectedNode) - nodes := c.resources.AllNodes(c.nodeLister) - if c.resources.NodeLocal { - node = selectedNode - if !slices.Contains(nodes, node) || - c.resources.MaxAllocations > 0 && - c.claimsPerNode[node] >= c.resources.MaxAllocations { - return nil, fmt.Errorf("resources exhausted on node %q", node) - } - } else { - if c.resources.MaxAllocations > 0 && - len(c.allocated) >= c.resources.MaxAllocations { - return nil, errors.New("resources exhausted in the cluster") - } - } - } - - var configs []resourceapi.DeviceAllocationConfiguration - for i, config := range claim.Spec.Devices.Config { - if len(config.Requests) != 0 && - !slices.Contains(config.Requests, request.Name) { - // Does not apply to request. - continue - } - if config.Opaque == nil { - return nil, fmt.Errorf("claim config #%d: only opaque configuration supported", i) - } - if config.Opaque.Driver != c.resources.DriverName { - // Does not apply to driver. - continue - } - // A normal driver would validate the config here. The test - // driver just passes it through. - configs = append(configs, - resourceapi.DeviceAllocationConfiguration{ - Source: resourceapi.AllocationConfigSourceClaim, - DeviceConfiguration: config.DeviceConfiguration, - }, - ) - } - if class != nil { - for i, config := range class.Spec.Config { - if config.Opaque == nil { - return nil, fmt.Errorf("class config #%d: only opaque configuration supported", i) - } - if config.Opaque.Driver != c.resources.DriverName { - // Does not apply to driver. - continue - } - configs = append(configs, - resourceapi.DeviceAllocationConfiguration{ - Source: resourceapi.AllocationConfigSourceClass, - DeviceConfiguration: config.DeviceConfiguration, - }, - ) - } - } - allocation := c.resources.newAllocation(request.Name, node, configs) - if !alreadyAllocated { - c.numAllocations++ - c.allocated[claim.UID] = node - c.claimsPerNode[node]++ - } - return allocation, nil -} - -func (c *ExampleController) Deallocate(ctx context.Context, claim *resourceapi.ResourceClaim) error { - logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "Deallocate"), "claim", klog.KObj(claim), "uid", claim.UID) - c.mutex.Lock() - defer c.mutex.Unlock() - - node, ok := c.allocated[claim.UID] - if !ok { - logger.V(3).Info("already deallocated") - return nil - } - - logger.V(3).Info("done") - c.numDeallocations++ - delete(c.allocated, claim.UID) - c.claimsPerNode[node]-- - return nil -} - -func (c *ExampleController) UnsuitableNodes(ctx context.Context, pod *v1.Pod, claims []*controller.ClaimAllocation, potentialNodes []string) (finalErr error) { - logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "UnsuitableNodes"), "pod", klog.KObj(pod)) - c.mutex.Lock() - defer c.mutex.Unlock() - - logger.V(3).Info("starting", "claims", claims, "potentialNodes", potentialNodes) - defer func() { - // UnsuitableNodes is the same for all claims. - logger.V(3).Info("done", "unsuitableNodes", claims[0].UnsuitableNodes, "err", finalErr) - }() - - if c.resources.MaxAllocations == 0 { - // All nodes are suitable. - return nil - } - nodes := c.resources.AllNodes(c.nodeLister) - if c.resources.NodeLocal { - for _, claim := range claims { - claim.UnsuitableNodes = nil - for _, node := range potentialNodes { - // If we have more than one claim, then a - // single pod wants to use all of them. That - // can only work if a node has capacity left - // for all of them. Also, nodes that the driver - // doesn't run on cannot be used. - if !slices.Contains(nodes, node) || - c.claimsPerNode[node]+len(claims) > c.resources.MaxAllocations { - claim.UnsuitableNodes = append(claim.UnsuitableNodes, node) - } - } - } - return nil - } - - allocations := c.claimsPerNode[""] - for _, claim := range claims { - claim.UnsuitableNodes = nil - for _, node := range potentialNodes { - if !slices.Contains(nodes, node) || - allocations+len(claims) > c.resources.MaxAllocations { - claim.UnsuitableNodes = append(claim.UnsuitableNodes, node) - } - } - } - - return nil -} diff --git a/test/e2e/dra/test-driver/app/server.go b/test/e2e/dra/test-driver/app/server.go index cb0cb4c3696..251156064e3 100644 --- a/test/e2e/dra/test-driver/app/server.go +++ b/test/e2e/dra/test-driver/app/server.go @@ -19,8 +19,6 @@ limitations under the License. package app import ( - "context" - "encoding/json" "errors" "fmt" "net" @@ -32,7 +30,6 @@ import ( "path/filepath" "strings" "syscall" - "time" "github.com/spf13/cobra" "k8s.io/component-base/metrics" @@ -48,7 +45,6 @@ import ( "k8s.io/component-base/metrics/legacyregistry" "k8s.io/component-base/term" "k8s.io/dynamic-resource-allocation/kubeletplugin" - "k8s.io/dynamic-resource-allocation/leaderelection" "k8s.io/klog/v2" ) @@ -57,7 +53,6 @@ func NewCommand() *cobra.Command { o := logsapi.NewLoggingConfiguration() var clientset kubernetes.Interface var config *rest.Config - ctx := context.Background() logger := klog.Background() cmd := &cobra.Command{ @@ -73,7 +68,6 @@ func NewCommand() *cobra.Command { kubeconfig := fs.String("kubeconfig", "", "Absolute path to the kube.config file. Either this or KUBECONFIG need to be set if the driver is being run out of cluster.") kubeAPIQPS := fs.Float32("kube-api-qps", 50, "QPS to use while communicating with the kubernetes apiserver.") kubeAPIBurst := fs.Int("kube-api-burst", 100, "Burst to use while communicating with the kubernetes apiserver.") - workers := fs.Int("workers", 10, "Concurrency to process multiple claims") fs = sharedFlagSets.FlagSet("http server") httpEndpoint := fs.String("http-endpoint", "", @@ -84,7 +78,6 @@ func NewCommand() *cobra.Command { fs = sharedFlagSets.FlagSet("CDI") driverNameFlagName := "drivername" driverName := fs.String(driverNameFlagName, "test-driver.cdi.k8s.io", "Resource driver name.") - driverNameFlag := fs.Lookup(driverNameFlagName) fs = sharedFlagSets.FlagSet("other") featureGate := featuregate.NewFeatureGate() @@ -177,89 +170,6 @@ func NewCommand() *cobra.Command { return nil } - controller := &cobra.Command{ - Use: "controller", - Short: "run as resource controller", - Long: "cdi-test-driver controller runs as a resource driver controller.", - Args: cobra.ExactArgs(0), - } - controllerFlagSets := cliflag.NamedFlagSets{} - fs = controllerFlagSets.FlagSet("leader election") - enableLeaderElection := fs.Bool("leader-election", false, - "Enables leader election. If leader election is enabled, additional RBAC rules are required.") - leaderElectionNamespace := fs.String("leader-election-namespace", "", - "Namespace where the leader election resource lives. Defaults to the pod namespace if not set.") - leaderElectionLeaseDuration := fs.Duration("leader-election-lease-duration", 15*time.Second, - "Duration, in seconds, that non-leader candidates will wait to force acquire leadership.") - leaderElectionRenewDeadline := fs.Duration("leader-election-renew-deadline", 10*time.Second, - "Duration, in seconds, that the acting leader will retry refreshing leadership before giving up.") - leaderElectionRetryPeriod := fs.Duration("leader-election-retry-period", 5*time.Second, - "Duration, in seconds, the LeaderElector clients should wait between tries of actions.") - fs = controllerFlagSets.FlagSet("controller") - resourceConfig := fs.String("resource-config", "", "A JSON file containing a Resources struct. Defaults are unshared, network-attached resources.") - fs = controller.Flags() - for _, f := range controllerFlagSets.FlagSets { - fs.AddFlagSet(f) - } - - controller.RunE = func(cmd *cobra.Command, args []string) error { - resources := Resources{} - if *resourceConfig != "" { - file, err := os.Open(*resourceConfig) - if err != nil { - return fmt.Errorf("open resource config: %w", err) - } - decoder := json.NewDecoder(file) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&resources); err != nil { - return fmt.Errorf("parse resource config %q: %w", *resourceConfig, err) - } - } - if resources.DriverName == "" || driverNameFlag.Changed { - resources.DriverName = *driverName - } - - run := func() { - controller := NewController(clientset, resources) - controller.Run(ctx, *workers) - } - - if !*enableLeaderElection { - run() - return nil - } - - // This must not change between releases. - lockName := *driverName - - // Create a new clientset for leader election - // to avoid starving it when the normal traffic - // exceeds the QPS+burst limits. - leClientset, err := kubernetes.NewForConfig(config) - if err != nil { - return fmt.Errorf("create leaderelection client: %w", err) - } - - le := leaderelection.New(leClientset, lockName, - func(ctx context.Context) { - run() - }, - leaderelection.LeaseDuration(*leaderElectionLeaseDuration), - leaderelection.RenewDeadline(*leaderElectionRenewDeadline), - leaderelection.RetryPeriod(*leaderElectionRetryPeriod), - leaderelection.Namespace(*leaderElectionNamespace), - ) - if *httpEndpoint != "" { - le.PrepareHealthCheck(mux) - } - if err := le.Run(); err != nil { - return fmt.Errorf("leader election failed: %w", err) - } - - return nil - } - cmd.AddCommand(controller) - kubeletPlugin := &cobra.Command{ Use: "kubelet-plugin", Short: "run as kubelet plugin", @@ -324,7 +234,6 @@ func NewCommand() *cobra.Command { children = append(children, child.Use) } cmd.Use += " [shared flags] " + strings.Join(children, "|") - cliflag.SetUsageAndHelpFunc(controller, controllerFlagSets, cols) cliflag.SetUsageAndHelpFunc(kubeletPlugin, kubeletPluginFlagSets, cols) return cmd diff --git a/test/e2e/feature/feature.go b/test/e2e/feature/feature.go index 2e50189b2f7..edc4e701ee2 100644 --- a/test/e2e/feature/feature.go +++ b/test/e2e/feature/feature.go @@ -91,18 +91,6 @@ var ( // TODO: document the feature (owning SIG, when to use this feature for a test) Downgrade = framework.WithFeature(framework.ValidFeatures.Add("Downgrade")) - // owning-sig: sig-node - // kep: https://kep.k8s.io/3063 - // test-infra jobs: - // - "classic-dra" in https://testgrid.k8s.io/sig-node-dynamic-resource-allocation - // - // This label is used for tests which need: - // - the DynamicResourceAllocation *and* DRAControlPlaneController feature gates - // - the resource.k8s.io API group - // - a container runtime where support for CDI (https://github.com/cncf-tags/container-device-interface) - // is enabled such that passing CDI device IDs through CRI fields is supported - DRAControlPlaneController = framework.WithFeature(framework.ValidFeatures.Add("DRAControlPlaneController")) - // owning-sig: sig-node // kep: https://kep.k8s.io/4381 // test-infra jobs: diff --git a/test/featuregates_linter/test_data/versioned_feature_list.yaml b/test/featuregates_linter/test_data/versioned_feature_list.yaml index c0b8a1d17b0..771001f8101 100644 --- a/test/featuregates_linter/test_data/versioned_feature_list.yaml +++ b/test/featuregates_linter/test_data/versioned_feature_list.yaml @@ -352,12 +352,6 @@ lockToDefault: false preRelease: Deprecated version: "1.31" -- name: DRAControlPlaneController - versionedSpecs: - - default: false - lockToDefault: false - preRelease: Alpha - version: "1.26" - name: DynamicResourceAllocation versionedSpecs: - default: false diff --git a/test/integration/apiserver/apply/reset_fields_test.go b/test/integration/apiserver/apply/reset_fields_test.go index 4847446b741..5ac835dd6ba 100644 --- a/test/integration/apiserver/apply/reset_fields_test.go +++ b/test/integration/apiserver/apply/reset_fields_test.go @@ -59,8 +59,7 @@ var resetFieldsStatusData = map[schema.GroupVersionResource]string{ gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": false}}`, gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`, gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`, - gvr("resource.k8s.io", "v1alpha3", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulingcontexts. - gvr("resource.k8s.io", "v1alpha3", "resourceclaims"): `{"status": {"allocation": {"controller": "other.example.com"}}}`, + gvr("resource.k8s.io", "v1alpha3", "resourceclaims"): `{"status": {"allocation": {"nodeSelector": {"nodeSelectorTerms": [{"matchExpressions": [{"key": "some-label", "operator": "In", "values": ["some-other-value"]}] }]}}}}`, gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"False","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`, // standard for []metav1.Condition gvr("admissionregistration.k8s.io", "v1alpha1", "validatingadmissionpolicies"): `{"status": {"conditions":[{"type":"Accepted","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`, @@ -90,10 +89,6 @@ var noConflicts = map[string]struct{}{ // namespaces only have a spec.finalizers field which is also skipped, // thus it will never have a conflict. "namespaces": {}, - // podschedulingcontexts.status only has a list which contains items with a list, - // therefore apply works because it simply merges either the outer or - // the inner list. - "podschedulingcontexts": {}, } var image2 = image.GetE2EImage(image.Etcd) @@ -152,7 +147,6 @@ var resetFieldsSpecData = map[schema.GroupVersionResource]string{ gvr("awesome.bears.com", "v3", "pandas"): `{"spec": {"replicas": 302}}`, gvr("apiregistration.k8s.io", "v1beta1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`, gvr("apiregistration.k8s.io", "v1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`, - gvr("resource.k8s.io", "v1alpha3", "podschedulingcontexts"): `{"spec": {"selectedNode": "node2name"}}`, gvr("resource.k8s.io", "v1alpha3", "deviceclasses"): `{"metadata": {"labels":{"a":"c"}}}`, gvr("resource.k8s.io", "v1alpha3", "resourceclaims"): `{"spec": {"devices": {"requests": [{"name": "req-0", "deviceClassName": "other-class"}]}}}`, // spec is immutable, but that doesn't matter for the test. gvr("resource.k8s.io", "v1alpha3", "resourceclaimtemplates"): `{"spec": {"spec": {"resourceClassName": "class2name"}}}`, diff --git a/test/integration/apiserver/apply/status_test.go b/test/integration/apiserver/apply/status_test.go index c715a96836d..56d8ec2d54f 100644 --- a/test/integration/apiserver/apply/status_test.go +++ b/test/integration/apiserver/apply/status_test.go @@ -52,8 +52,7 @@ var statusData = map[schema.GroupVersionResource]string{ gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": true}}`, gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`, gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`, - gvr("resource.k8s.io", "v1alpha3", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node1"]}]}}`, - gvr("resource.k8s.io", "v1alpha3", "resourceclaims"): `{"status": {"allocation": {"controller": "example.com"}}}`, + gvr("resource.k8s.io", "v1alpha3", "resourceclaims"): `{"status": {"allocation": {"nodeSelector": {"nodeSelectorTerms": [{"matchExpressions": [{"key": "some-label", "operator": "In", "values": ["some-value"]}] }]}}}}`, gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`, // standard for []metav1.Condition gvr("admissionregistration.k8s.io", "v1alpha1", "validatingadmissionpolicies"): `{"status": {"conditions":[{"type":"Accepted","status":"False","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`, diff --git a/test/integration/etcd/data.go b/test/integration/etcd/data.go index 81279155cca..ad1f71aabdb 100644 --- a/test/integration/etcd/data.go +++ b/test/integration/etcd/data.go @@ -431,10 +431,6 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes Stub: `{"metadata": {"name": "claimtemplate1name"}, "spec": {"spec": {"devices": {"requests": [{"name": "req-0", "deviceClassName": "example-class", "allocationMode": "ExactCount", "count": 1}]}}}}`, ExpectedEtcdPath: "/registry/resourceclaimtemplates/" + namespace + "/claimtemplate1name", }, - gvr("resource.k8s.io", "v1alpha3", "podschedulingcontexts"): { - Stub: `{"metadata": {"name": "pod1name"}, "spec": {"selectedNode": "node1name", "potentialNodes": ["node1name", "node2name"]}}`, - ExpectedEtcdPath: "/registry/podschedulingcontexts/" + namespace + "/pod1name", - }, gvr("resource.k8s.io", "v1alpha3", "resourceslices"): { Stub: `{"metadata": {"name": "node1slice"}, "spec": {"nodeName": "worker1", "driver": "dra.example.com", "pool": {"name": "worker1", "resourceSliceCount": 1}}}`, ExpectedEtcdPath: "/registry/resourceslices/node1slice", diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index b038f82eb21..6bf07b477e1 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -21,34 +21,25 @@ package scheduler import ( "context" "fmt" - "net/http" - "strings" - "sync/atomic" "testing" "time" "github.com/google/go-cmp/cmp" v1 "k8s.io/api/core/v1" - resourceapi "k8s.io/api/resource/v1alpha3" - apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" - utilfeature "k8s.io/apiserver/pkg/util/feature" clientset "k8s.io/client-go/kubernetes" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" - featuregatetesting "k8s.io/component-base/featuregate/testing" configv1 "k8s.io/kube-scheduler/config/v1" - "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/scheduler" configtesting "k8s.io/kubernetes/pkg/scheduler/apis/config/testing" st "k8s.io/kubernetes/pkg/scheduler/testing" testutils "k8s.io/kubernetes/test/integration/util" - "k8s.io/kubernetes/test/utils/format" "k8s.io/utils/pointer" ) @@ -647,117 +638,3 @@ func TestNodeEvents(t *testing.T) { } } - -// TestPodSchedulingContextSSA checks that the dynamicresources plugin falls -// back to SSA successfully when the normal Update call encountered -// a conflict. -// -// This is an integration test because: -// - Unit testing does not cover RBAC rules. -// - Triggering this particular race is harder in E2E testing -// and harder to verify (needs apiserver metrics and there's -// no standard API for those). -func TestPodSchedulingContextSSA(t *testing.T) { - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DynamicResourceAllocation, true) - featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.DRAControlPlaneController, true) - - testCtx := testutils.InitTestAPIServer(t, "podschedulingcontext-ssa", nil) - testCtx.DisableEventSink = true - testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0) - testutils.SyncSchedulerInformerFactory(testCtx) - go testCtx.Scheduler.Run(testCtx.SchedulerCtx) - - // Set up enough objects that the scheduler will start trying to - // schedule the pod and create the PodSchedulingContext. - nodeRes := map[v1.ResourceName]string{ - v1.ResourcePods: "32", - v1.ResourceCPU: "30m", - v1.ResourceMemory: "30", - } - for _, name := range []string{"node-a", "node-b"} { - if _, err := testutils.CreateNode(testCtx.ClientSet, st.MakeNode().Name(name).Capacity(nodeRes).Obj()); err != nil { - t.Fatalf("Failed to create node: %v", err) - } - } - - claim := &resourceapi.ResourceClaim{ - ObjectMeta: metav1.ObjectMeta{ - Name: "my-claim", - Namespace: testCtx.NS.Name, - }, - Spec: resourceapi.ResourceClaimSpec{ - Controller: "dra.example.com", - }, - } - if _, err := testCtx.ClientSet.ResourceV1alpha3().ResourceClaims(claim.Namespace).Create(testCtx.Ctx, claim, metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create claim: %v", err) - } - - podConf := testutils.PausePodConfig{ - Name: "testpod", - Namespace: testCtx.NS.Name, - } - pod := testutils.InitPausePod(&podConf) - podClaimName := "myclaim" - pod.Spec.Containers[0].Resources.Claims = []v1.ResourceClaim{{Name: podClaimName}} - pod.Spec.ResourceClaims = []v1.PodResourceClaim{{Name: podClaimName, ResourceClaimName: &claim.Name}} - if _, err := testCtx.ClientSet.CoreV1().Pods(pod.Namespace).Create(testCtx.Ctx, pod, metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create pod: %v", err) - } - - // Check that the PodSchedulingContext exists and has a selected node. - var schedulingCtx *resourceapi.PodSchedulingContext - if err := wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Microsecond, 30*time.Second, true, - func(context.Context) (bool, error) { - var err error - schedulingCtx, err = testCtx.ClientSet.ResourceV1alpha3().PodSchedulingContexts(pod.Namespace).Get(testCtx.Ctx, pod.Name, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return false, nil - } - if err == nil && schedulingCtx.Spec.SelectedNode != "" { - return true, nil - } - return false, err - }); err != nil { - t.Fatalf("Failed while waiting for PodSchedulingContext with selected node: %v\nLast PodSchedulingContext:\n%s", err, format.Object(schedulingCtx, 1)) - } - - // Force the plugin to use SSA. - var podSchedulingContextPatchCounter atomic.Int64 - roundTrip := testutils.RoundTripWrapper(func(transport http.RoundTripper, req *http.Request) (*http.Response, error) { - if strings.HasPrefix(req.URL.Path, "/apis/resource.k8s.io/") && - strings.HasSuffix(req.URL.Path, "/podschedulingcontexts/"+pod.Name) { - switch req.Method { - case http.MethodPut, http.MethodPost: - return &http.Response{ - Status: fmt.Sprintf("%d %s", http.StatusConflict, metav1.StatusReasonConflict), - StatusCode: http.StatusConflict, - }, nil - case http.MethodPatch: - podSchedulingContextPatchCounter.Add(1) - } - } - return transport.RoundTrip(req) - }) - testCtx.RoundTrip.Store(&roundTrip) - - // Now force the scheduler to update the PodSchedulingContext by setting UnsuitableNodes so that - // the selected node is not suitable. - schedulingCtx.Status.ResourceClaims = []resourceapi.ResourceClaimSchedulingStatus{{ - Name: podClaimName, - UnsuitableNodes: []string{schedulingCtx.Spec.SelectedNode}, - }} - - if _, err := testCtx.ClientSet.ResourceV1alpha3().PodSchedulingContexts(pod.Namespace).UpdateStatus(testCtx.Ctx, schedulingCtx, metav1.UpdateOptions{}); err != nil { - t.Fatalf("Unexpected PodSchedulingContext status update error: %v", err) - } - - // We know that the scheduler has to use SSA because above we inject a conflict - // error whenever it tries to use a plain update. We just need to wait for it... - if err := wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Microsecond, time.Minute, true, - func(context.Context) (bool, error) { - return podSchedulingContextPatchCounter.Load() > 0, nil - }); err != nil { - t.Fatalf("Failed while waiting for PodSchedulingContext Patch: %v", err) - } -} diff --git a/test/integration/scheduler_perf/config/dra/deviceclass-structured.yaml b/test/integration/scheduler_perf/config/dra/deviceclass-structured.yaml deleted file mode 100644 index 873fd282bd2..00000000000 --- a/test/integration/scheduler_perf/config/dra/deviceclass-structured.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: resource.k8s.io/v1alpha3 -kind: DeviceClass -metadata: - name: test-class -spec: - selectors: - - cel: - expression: device.driver == "test-driver.cdi.k8s.io" diff --git a/test/integration/scheduler_perf/config/dra/deviceclass.yaml b/test/integration/scheduler_perf/config/dra/deviceclass.yaml index 62417231b9a..873fd282bd2 100644 --- a/test/integration/scheduler_perf/config/dra/deviceclass.yaml +++ b/test/integration/scheduler_perf/config/dra/deviceclass.yaml @@ -2,3 +2,7 @@ apiVersion: resource.k8s.io/v1alpha3 kind: DeviceClass metadata: name: test-class +spec: + selectors: + - cel: + expression: device.driver == "test-driver.cdi.k8s.io" diff --git a/test/integration/scheduler_perf/config/dra/resourceclaim-structured.yaml b/test/integration/scheduler_perf/config/dra/resourceclaim-structured.yaml deleted file mode 100644 index 0207afc2a51..00000000000 --- a/test/integration/scheduler_perf/config/dra/resourceclaim-structured.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: resource.k8s.io/v1alpha3 -kind: ResourceClaim -metadata: - name: test-claim-{{.Index}} -spec: - devices: - requests: - - name: req-0 - deviceClassName: test-class diff --git a/test/integration/scheduler_perf/config/dra/resourceclaim.yaml b/test/integration/scheduler_perf/config/dra/resourceclaim.yaml index 510a5a1ba2c..0207afc2a51 100644 --- a/test/integration/scheduler_perf/config/dra/resourceclaim.yaml +++ b/test/integration/scheduler_perf/config/dra/resourceclaim.yaml @@ -3,7 +3,6 @@ kind: ResourceClaim metadata: name: test-claim-{{.Index}} spec: - controller: test-driver.cdi.k8s.io devices: requests: - name: req-0 diff --git a/test/integration/scheduler_perf/config/dra/resourceclaimtemplate-structured.yaml b/test/integration/scheduler_perf/config/dra/resourceclaimtemplate-structured.yaml deleted file mode 100644 index 77aa178afa5..00000000000 --- a/test/integration/scheduler_perf/config/dra/resourceclaimtemplate-structured.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: resource.k8s.io/v1alpha3 -kind: ResourceClaimTemplate -metadata: - name: test-claim-template -spec: - spec: - devices: - requests: - - name: req-0 - deviceClassName: test-class diff --git a/test/integration/scheduler_perf/config/dra/resourceclaimtemplate.yaml b/test/integration/scheduler_perf/config/dra/resourceclaimtemplate.yaml index 9b94a70b987..77aa178afa5 100644 --- a/test/integration/scheduler_perf/config/dra/resourceclaimtemplate.yaml +++ b/test/integration/scheduler_perf/config/dra/resourceclaimtemplate.yaml @@ -4,7 +4,6 @@ metadata: name: test-claim-template spec: spec: - controller: test-driver.cdi.k8s.io devices: requests: - name: req-0 diff --git a/test/integration/scheduler_perf/config/performance-config.yaml b/test/integration/scheduler_perf/config/performance-config.yaml index bc32fba4664..b727f0f0cc6 100644 --- a/test/integration/scheduler_perf/config/performance-config.yaml +++ b/test/integration/scheduler_perf/config/performance-config.yaml @@ -1025,153 +1025,9 @@ measurePods: 4000 # SchedulingWithResourceClaimTemplate uses a ResourceClaimTemplate -# and dynamically created ResourceClaim instances for each pod. -- name: SchedulingWithResourceClaimTemplate - featureGates: - DRAControlPlaneController: true - DynamicResourceAllocation: true - # SchedulerQueueingHints: true - workloadTemplate: - - opcode: createNodes - countParam: $nodesWithoutDRA - - opcode: createNodes - nodeTemplatePath: config/dra/node-with-dra-test-driver.yaml - countParam: $nodesWithDRA - - opcode: createResourceDriver - driverName: test-driver.cdi.k8s.io - nodes: scheduler-perf-dra-* - maxClaimsPerNodeParam: $maxClaimsPerNode - - opcode: createAny - templatePath: config/dra/deviceclass.yaml - - opcode: createAny - templatePath: config/dra/resourceclaimtemplate.yaml - namespace: init - - opcode: createPods - namespace: init - countParam: $initPods - podTemplatePath: config/dra/pod-with-claim-template.yaml - - opcode: createAny - templatePath: config/dra/resourceclaimtemplate.yaml - namespace: test - - opcode: createPods - namespace: test - countParam: $measurePods - podTemplatePath: config/dra/pod-with-claim-template.yaml - collectMetrics: true - workloads: - - name: fast - labels: [integration-test, performance, short] - params: - # This testcase runs through all code paths without - # taking too long overall. - nodesWithDRA: 1 - nodesWithoutDRA: 1 - initPods: 0 - measurePods: 10 - maxClaimsPerNode: 10 - - name: 2000pods_100nodes - params: - # In this testcase, the number of nodes is smaller - # than the limit for the PodScheduling slices. - nodesWithDRA: 100 - nodesWithoutDRA: 0 - initPods: 1000 - measurePods: 1000 - maxClaimsPerNode: 20 - - name: 2000pods_200nodes - params: - # In this testcase, the driver and scheduler must - # truncate the PotentialNodes and UnsuitableNodes - # slices. - nodesWithDRA: 200 - nodesWithoutDRA: 0 - initPods: 1000 - measurePods: 1000 - maxClaimsPerNode: 10 - -# This similar to SchedulingWithResourceClaimTemplate, except -# that it uses four claims per pod, from two different drivers. -# This emphasizes a bit more the complexity of collaborative -# scheduling via PodSchedulingContext. -- name: SchedulingWithMultipleResourceClaims - featureGates: - DRAControlPlaneController: true - DynamicResourceAllocation: true - # SchedulerQueueingHints: true - workloadTemplate: - - opcode: createNodes - countParam: $nodesWithoutDRA - - opcode: createNodes - nodeTemplatePath: config/dra/node-with-dra-test-driver.yaml - countParam: $nodesWithDRA - - opcode: createResourceDriver - driverName: test-driver.cdi.k8s.io - nodes: scheduler-perf-dra-* - maxClaimsPerNodeParam: $maxClaimsPerNode - - opcode: createResourceDriver - driverName: another-test-driver.cdi.k8s.io - nodes: scheduler-perf-dra-* - maxClaimsPerNodeParam: $maxClaimsPerNode - - opcode: createAny - templatePath: config/dra/deviceclass.yaml - - opcode: createAny - templatePath: config/dra/resourceclaimtemplate.yaml - namespace: init - - opcode: createAny - templatePath: config/dra/another-resourceclaimtemplate.yaml - namespace: init - - opcode: createPods - namespace: init - countParam: $initPods - podTemplatePath: config/dra/pod-with-many-claim-templates.yaml - - opcode: createAny - templatePath: config/dra/resourceclaimtemplate.yaml - namespace: test - - opcode: createAny - templatePath: config/dra/another-resourceclaimtemplate.yaml - namespace: test - - opcode: createPods - namespace: test - countParam: $measurePods - podTemplatePath: config/dra/pod-with-many-claim-templates.yaml - collectMetrics: true - workloads: - - name: fast - labels: [integration-test, performance, short] - params: - # This testcase runs through all code paths without - # taking too long overall. - nodesWithDRA: 1 - nodesWithoutDRA: 1 - initPods: 0 - measurePods: 1 - maxClaimsPerNode: 20 - - name: 2000pods_100nodes - params: - # In this testcase, the number of nodes is smaller - # than the limit for the PodScheduling slices. - nodesWithDRA: 100 - nodesWithoutDRA: 0 - initPods: 1000 - measurePods: 1000 - maxClaimsPerNode: 40 - - name: 2000pods_200nodes - params: - # In this testcase, the driver and scheduler must - # truncate the PotentialNodes and UnsuitableNodes - # slices. - nodesWithDRA: 200 - nodesWithoutDRA: 0 - initPods: 1000 - measurePods: 1000 - maxClaimsPerNode: 20 - -# SchedulingWithResourceClaimTemplateStructured uses a ResourceClaimTemplate # and dynamically creates ResourceClaim instances for each pod. Node, pod and # device counts are chosen so that the cluster gets filled up completely. -# -# The driver uses structured parameters. -- name: SchedulingWithResourceClaimTemplateStructured +- name: SchedulingWithResourceClaimTemplate featureGates: DynamicResourceAllocation: true # SchedulerQueueingHints: true @@ -1185,18 +1041,17 @@ driverName: test-driver.cdi.k8s.io nodes: scheduler-perf-dra-* maxClaimsPerNodeParam: $maxClaimsPerNode - structuredParameters: true - opcode: createAny - templatePath: config/dra/deviceclass-structured.yaml + templatePath: config/dra/deviceclass.yaml - opcode: createAny - templatePath: config/dra/resourceclaimtemplate-structured.yaml + templatePath: config/dra/resourceclaimtemplate.yaml namespace: init - opcode: createPods namespace: init countParam: $initPods podTemplatePath: config/dra/pod-with-claim-template.yaml - opcode: createAny - templatePath: config/dra/resourceclaimtemplate-structured.yaml + templatePath: config/dra/resourceclaimtemplate.yaml namespace: test - opcode: createPods namespace: test @@ -1236,16 +1091,14 @@ measurePods: 2500 maxClaimsPerNode: 10 -# SteadyStateResourceClaimTemplateStructured uses a ResourceClaimTemplate and +# SteadyStateResourceClaimTemplate uses a ResourceClaimTemplate and # dynamically creates ResourceClaim instances for each pod. It creates ten # pods, waits for them to be scheduled, deletes them, and starts again, # so the cluster remains at the same level of utilization. # # The number of already allocated claims can be varied, thus simulating # various degrees of pre-existing resource utilization. -# -# The driver uses structured parameters. -- name: SteadyStateClusterResourceClaimTemplateStructured +- name: SteadyStateClusterResourceClaimTemplate featureGates: DynamicResourceAllocation: true # SchedulerQueueingHints: true @@ -1259,17 +1112,16 @@ driverName: test-driver.cdi.k8s.io nodes: scheduler-perf-dra-* maxClaimsPerNodeParam: $maxClaimsPerNode - structuredParameters: true - opcode: createAny - templatePath: config/dra/deviceclass-structured.yaml + templatePath: config/dra/deviceclass.yaml - opcode: createAny - templatePath: config/dra/resourceclaim-structured.yaml + templatePath: config/dra/resourceclaim.yaml countParam: $initClaims namespace: init - opcode: allocResourceClaims namespace: init - opcode: createAny - templatePath: config/dra/resourceclaimtemplate-structured.yaml + templatePath: config/dra/resourceclaimtemplate.yaml namespace: test - opcode: createPods namespace: test @@ -1358,9 +1210,7 @@ # SchedulingWithResourceClaimTemplate uses ResourceClaims # with deterministic names that are shared between pods. # There is a fixed ratio of 1:5 between claims and pods. -# -# The driver uses structured parameters. -- name: SchedulingWithResourceClaimStructured +- name: SchedulingWithResourceClaim featureGates: DynamicResourceAllocation: true # SchedulerQueueingHints: true @@ -1374,11 +1224,10 @@ driverName: test-driver.cdi.k8s.io nodes: scheduler-perf-dra-* maxClaimsPerNodeParam: $maxClaimsPerNode - structuredParameters: true - opcode: createAny - templatePath: config/dra/deviceclass-structured.yaml + templatePath: config/dra/deviceclass.yaml - opcode: createAny - templatePath: config/dra/resourceclaim-structured.yaml + templatePath: config/dra/resourceclaim.yaml namespace: init countParam: $initClaims - opcode: createPods @@ -1386,7 +1235,7 @@ countParam: $initPods podTemplatePath: config/dra/pod-with-claim-ref.yaml - opcode: createAny - templatePath: config/dra/resourceclaim-structured.yaml + templatePath: config/dra/resourceclaim.yaml namespace: test countParam: $measureClaims - opcode: createPods diff --git a/test/integration/scheduler_perf/dra.go b/test/integration/scheduler_perf/dra.go index 8bf0d93e9c6..be884c2c8ee 100644 --- a/test/integration/scheduler_perf/dra.go +++ b/test/integration/scheduler_perf/dra.go @@ -17,7 +17,6 @@ limitations under the License. package benchmark import ( - "context" "fmt" "math/rand/v2" "path/filepath" @@ -34,9 +33,7 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/util/workqueue" "k8s.io/dynamic-resource-allocation/structured" - "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/scheduler/util/assumecache" - draapp "k8s.io/kubernetes/test/e2e/dra/test-driver/app" "k8s.io/kubernetes/test/utils/ktesting" "k8s.io/utils/ptr" ) @@ -134,11 +131,6 @@ type createResourceDriverOp struct { MaxClaimsPerNodeParam string // Nodes matching this glob pattern have resources managed by the driver. Nodes string - // StructuredParameters is true if the controller that is built into the scheduler - // is used and the control-plane controller is not needed. - // Because we don't run the kubelet plugin, ResourceSlices must - // get created for all nodes. - StructuredParameters bool } var _ realOp = &createResourceDriverOp{} @@ -176,13 +168,7 @@ func (op *createResourceDriverOp) requiredNamespaces() []string { return nil } func (op *createResourceDriverOp) run(tCtx ktesting.TContext) { tCtx.Logf("creating resource driver %q for nodes matching %q", op.DriverName, op.Nodes) - // Start the controller side of the DRA test driver such that it simulates - // per-node resources. - resources := draapp.Resources{ - DriverName: op.DriverName, - NodeLocal: true, - MaxAllocations: op.MaxClaimsPerNode, - } + var driverNodes []string nodes, err := tCtx.Client().CoreV1().Nodes().List(tCtx, metav1.ListOptions{}) if err != nil { @@ -194,42 +180,21 @@ func (op *createResourceDriverOp) run(tCtx ktesting.TContext) { tCtx.Fatalf("matching glob pattern %q against node name %q: %v", op.Nodes, node.Name, err) } if match { - resources.Nodes = append(resources.Nodes, node.Name) + driverNodes = append(driverNodes, node.Name) } } - if op.StructuredParameters { - for _, nodeName := range resources.Nodes { - slice := resourceSlice(op.DriverName, nodeName, op.MaxClaimsPerNode) - _, err := tCtx.Client().ResourceV1alpha3().ResourceSlices().Create(tCtx, slice, metav1.CreateOptions{}) - tCtx.ExpectNoError(err, "create node resource slice") - } - tCtx.CleanupCtx(func(tCtx ktesting.TContext) { - err := tCtx.Client().ResourceV1alpha3().ResourceSlices().DeleteCollection(tCtx, - metav1.DeleteOptions{}, - metav1.ListOptions{FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + op.DriverName}, - ) - tCtx.ExpectNoError(err, "delete node resource slices") - }) - // No need for the controller. - return + for _, nodeName := range driverNodes { + slice := resourceSlice(op.DriverName, nodeName, op.MaxClaimsPerNode) + _, err := tCtx.Client().ResourceV1alpha3().ResourceSlices().Create(tCtx, slice, metav1.CreateOptions{}) + tCtx.ExpectNoError(err, "create node resource slice") } - - controller := draapp.NewController(tCtx.Client(), resources) - ctx, cancel := context.WithCancel(tCtx) - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - ctx := klog.NewContext(ctx, klog.LoggerWithName(klog.FromContext(ctx), op.DriverName)) - controller.Run(ctx, 5 /* workers */) - }() - tCtx.Cleanup(func() { - tCtx.Logf("stopping resource driver %q", op.DriverName) - // We must cancel before waiting. - cancel() - wg.Wait() - tCtx.Logf("stopped resource driver %q", op.DriverName) + tCtx.CleanupCtx(func(tCtx ktesting.TContext) { + err := tCtx.Client().ResourceV1alpha3().ResourceSlices().DeleteCollection(tCtx, + metav1.DeleteOptions{}, + metav1.ListOptions{FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + op.DriverName}, + ) + tCtx.ExpectNoError(err, "delete node resource slices") }) } diff --git a/test/integration/util/util.go b/test/integration/util/util.go index 243ff5b1728..626a1a4191f 100644 --- a/test/integration/util/util.go +++ b/test/integration/util/util.go @@ -130,10 +130,9 @@ func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConf func CreateResourceClaimController(ctx context.Context, tb ktesting.TB, clientSet clientset.Interface, informerFactory informers.SharedInformerFactory) func() { podInformer := informerFactory.Core().V1().Pods() - schedulingInformer := informerFactory.Resource().V1alpha3().PodSchedulingContexts() claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims() claimTemplateInformer := informerFactory.Resource().V1alpha3().ResourceClaimTemplates() - claimController, err := resourceclaim.NewController(klog.FromContext(ctx), clientSet, podInformer, schedulingInformer, claimInformer, claimTemplateInformer) + claimController, err := resourceclaim.NewController(klog.FromContext(ctx), clientSet, podInformer, claimInformer, claimTemplateInformer) if err != nil { tb.Fatalf("Error creating claim controller: %v", err) }