diff --git a/pkg/kubelet/cm/dra/manager.go b/pkg/kubelet/cm/dra/manager.go index 54e5a550539..fcc8b30a2a3 100644 --- a/pkg/kubelet/cm/dra/manager.go +++ b/pkg/kubelet/cm/dra/manager.go @@ -28,6 +28,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/dynamic-resource-allocation/resourceclaim" "k8s.io/klog/v2" + drapb "k8s.io/kubelet/pkg/apis/dra/v1alpha3" dra "k8s.io/kubernetes/pkg/kubelet/cm/dra/plugin" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" ) @@ -62,10 +63,12 @@ func NewManagerImpl(kubeClient clientset.Interface, stateFileDirectory string) ( } // PrepareResources attempts to prepare all of the required resource -// plugin resources for the input container, issue an NodePrepareResource rpc request +// plugin resources for the input container, issue NodePrepareResources rpc requests // for each new resource requirement, process their responses and update the cached // containerResources on success. func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error { + batches := make(map[string][]*drapb.Claim) + claimInfos := make(map[types.UID]*ClaimInfo) for i := range pod.Spec.ResourceClaims { podClaim := &pod.Spec.ResourceClaims[i] klog.V(3).InfoS("Processing resource", "podClaim", podClaim.Name, "pod", pod.Name) @@ -139,7 +142,7 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error { sets.New(string(pod.UID)), ) - // Walk through each resourceHandle + // Loop through all plugins and prepare for calling NodePrepareResources. for _, resourceHandle := range resourceHandles { // If no DriverName is provided in the resourceHandle, we // use the DriverName from the status @@ -147,48 +150,71 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error { if pluginName == "" { pluginName = resourceClaim.Status.DriverName } - - // Call NodePrepareResource RPC for each resourceHandle - client, err := dra.NewDRAPluginClient(pluginName) - if err != nil { - return fmt.Errorf("failed to get DRA Plugin client for plugin name %s, err=%+v", pluginName, err) + claim := &drapb.Claim{ + Namespace: resourceClaim.Namespace, + Uid: string(resourceClaim.UID), + Name: resourceClaim.Name, + ResourceHandle: resourceHandle.Data, } - response, err := client.NodePrepareResource( - context.Background(), - resourceClaim.Namespace, - resourceClaim.UID, - resourceClaim.Name, - resourceHandle.Data) - if err != nil { - return fmt.Errorf("NodePrepareResource failed, claim UID: %s, claim name: %s, resource handle: %s, err: %+v", - resourceClaim.UID, resourceClaim.Name, resourceHandle.Data, err) - } - klog.V(3).InfoS("NodePrepareResource succeeded", "pluginName", pluginName, "response", response) + batches[pluginName] = append(batches[pluginName], claim) + } + claimInfos[resourceClaim.UID] = claimInfo + } - // Add the CDI Devices returned by NodePrepareResource to + // Call NodePrepareResources for all claims in each batch. + // If there is any error, processing gets aborted. + // We could try to continue, but that would make the code more complex. + for pluginName, claims := range batches { + // Call NodePrepareResources RPC for all resource handles. + client, err := dra.NewDRAPluginClient(pluginName) + if err != nil { + return fmt.Errorf("failed to get DRA Plugin client for plugin name %s: %v", pluginName, err) + } + response, err := client.NodePrepareResources(context.Background(), &drapb.NodePrepareResourcesRequest{Claims: claims}) + if err != nil { + // General error unrelated to any particular claim. + return fmt.Errorf("NodePrepareResources failed: %v", err) + } + for claimUID, result := range response.Claims { + reqClaim := lookupClaimRequest(claims, claimUID) + if reqClaim == nil { + return fmt.Errorf("NodePrepareResources returned result for unknown claim UID %s", claimUID) + } + if result.Error != "" { + return fmt.Errorf("NodePrepareResources failed for claim %s/%s: %s", reqClaim.Namespace, reqClaim.Name, result.Error) + } + + claimInfo := claimInfos[types.UID(claimUID)] + + // Add the CDI Devices returned by NodePrepareResources to // the claimInfo object. - err = claimInfo.addCDIDevices(pluginName, response.CdiDevices) + err = claimInfo.addCDIDevices(pluginName, result.CDIDevices) if err != nil { return fmt.Errorf("failed to add CDIDevices to claimInfo %+v: %+v", claimInfo, err) } // TODO: We (re)add the claimInfo object to the cache and // sync it to the checkpoint *after* the - // NodePrepareResource call has completed. This will cause + // NodePrepareResources call has completed. This will cause // issues if the kubelet gets restarted between - // NodePrepareResource and syncToCheckpoint. It will result - // in not calling NodeUnprepareResource for this claim + // NodePrepareResources and syncToCheckpoint. It will result + // in not calling NodeUnprepareResources for this claim // because no claimInfo will be synced back to the cache // for it after the restart. We need to resolve this issue // before moving to beta. m.cache.add(claimInfo) + } - // Checkpoint to reduce redundant calls to - // NodePrepareResource() after a kubelet restart. - err = m.cache.syncToCheckpoint() - if err != nil { - return fmt.Errorf("failed to checkpoint claimInfo state, err: %+v", err) - } + // Checkpoint to reduce redundant calls to + // NodePrepareResources after a kubelet restart. + err = m.cache.syncToCheckpoint() + if err != nil { + return fmt.Errorf("failed to checkpoint claimInfo state, err: %+v", err) + } + + unfinished := len(claims) - len(response.Claims) + if unfinished != 0 { + return fmt.Errorf("NodePrepareResources left out %d claims", unfinished) } } // Checkpoint to capture all of the previous addPodReference() calls. @@ -199,6 +225,15 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error { return nil } +func lookupClaimRequest(claims []*drapb.Claim, claimUID string) *drapb.Claim { + for _, claim := range claims { + if claim.Uid == claimUID { + return claim + } + } + return nil +} + func claimIsUsedByPod(podClaim *v1.PodResourceClaim, pod *v1.Pod) bool { if claimIsUsedByContainers(podClaim, pod.Spec.InitContainers) { return true @@ -274,7 +309,8 @@ func (m *ManagerImpl) GetResources(pod *v1.Pod, container *v1.Container) (*Conta // As such, calls to the underlying NodeUnprepareResource API are skipped for claims that have // already been successfully unprepared. func (m *ManagerImpl) UnprepareResources(pod *v1.Pod) error { - // Call NodeUnprepareResource RPC for every resource claim referenced by the pod + batches := make(map[string][]*drapb.Claim) + claimInfos := make(map[types.UID]*ClaimInfo) for i := range pod.Spec.ResourceClaims { claimName, _, err := resourceclaim.Name(pod, &pod.Spec.ResourceClaims[i]) if err != nil { @@ -324,8 +360,7 @@ func (m *ManagerImpl) UnprepareResources(pod *v1.Pod) error { resourceHandles = make([]resourcev1alpha2.ResourceHandle, 1) } - // Loop through all plugins and call NodeUnprepareResource only for the - // last pod that references the claim + // Loop through all plugins and prepare for calling NodeUnprepareResources. for _, resourceHandle := range resourceHandles { // If no DriverName is provided in the resourceHandle, we // use the DriverName from the status @@ -334,38 +369,62 @@ func (m *ManagerImpl) UnprepareResources(pod *v1.Pod) error { pluginName = claimInfo.DriverName } - // Call NodeUnprepareResource RPC for each resourceHandle - client, err := dra.NewDRAPluginClient(pluginName) - if err != nil { - return fmt.Errorf("failed to get DRA Plugin client for plugin name %s, err=%+v", pluginName, err) + claim := &drapb.Claim{ + Namespace: resourceClaim.Namespace, + Uid: string(resourceClaim.UID), + Name: resourceClaim.Name, + ResourceHandle: resourceHandle.Data, } - response, err := client.NodeUnprepareResource( - context.Background(), - claimInfo.Namespace, - claimInfo.ClaimUID, - claimInfo.ClaimName, - resourceHandle.Data) - if err != nil { - return fmt.Errorf( - "NodeUnprepareResource failed, pod: %s, claim UID: %s, claim name: %s, resource handle: %s, err: %+v", - pod.Name, claimInfo.ClaimUID, claimInfo.ClaimName, resourceHandle.Data, err) - } - klog.V(3).InfoS("NodeUnprepareResource succeeded", "response", response) + batches[pluginName] = append(batches[pluginName], claim) + } + claimInfos[resourceClaim.UID] = claimInfo + } + + // Call NodeUnprepareResources for all claims in each batch. + // If there is any error, processing gets aborted. + // We could try to continue, but that would make the code more complex. + for pluginName, claims := range batches { + // Call NodeUnprepareResources RPC for all resource handles. + client, err := dra.NewDRAPluginClient(pluginName) + if err != nil { + return fmt.Errorf("failed to get DRA Plugin client for plugin name %s: %v", pluginName, err) + } + response, err := client.NodeUnprepareResources(context.Background(), &drapb.NodeUnprepareResourcesRequest{Claims: claims}) + if err != nil { + // General error unrelated to any particular claim. + return fmt.Errorf("NodeUnprepareResources failed: %v", err) } - // Delete last pod UID only if all NodeUnprepareResource calls succeed. - // This ensures that the status manager doesn't enter termination status - // for the pod. This logic is implemented in - // m.PodMightNeedToUnprepareResources and claimInfo.hasPodReference. - claimInfo.deletePodReference(pod.UID) - m.cache.delete(claimInfo.ClaimName, pod.Namespace) + for claimUID, result := range response.Claims { + reqClaim := lookupClaimRequest(claims, claimUID) + if reqClaim == nil { + return fmt.Errorf("NodeUnprepareResources returned result for unknown claim UID %s", claimUID) + } + if result.Error != "" { + return fmt.Errorf("NodeUnprepareResources failed for claim %s/%s: %s", reqClaim.Namespace, reqClaim.Name, err) + } - // Checkpoint to reduce redundant calls to NodeUnPrepareResource() after a kubelet restart. + // Delete last pod UID only if unprepare succeeds. + // This ensures that the status manager doesn't enter termination status + // for the pod. This logic is implemented in + // m.PodMightNeedToUnprepareResources and claimInfo.hasPodReference. + claimInfo := claimInfos[types.UID(claimUID)] + claimInfo.deletePodReference(pod.UID) + m.cache.delete(claimInfo.ClaimName, pod.Namespace) + } + + // Checkpoint to reduce redundant calls to NodeUnprepareResources after a kubelet restart. err = m.cache.syncToCheckpoint() if err != nil { return fmt.Errorf("failed to checkpoint claimInfo state, err: %+v", err) } + + unfinished := len(claims) - len(response.Claims) + if unfinished != 0 { + return fmt.Errorf("NodeUnprepareResources left out %d claims", unfinished) + } } + // Checkpoint to capture all of the previous deletePodReference() calls. err := m.cache.syncToCheckpoint() if err != nil { diff --git a/pkg/kubelet/cm/dra/plugin/client.go b/pkg/kubelet/cm/dra/plugin/client.go index 395ec5b29b6..f926be1f57e 100644 --- a/pkg/kubelet/cm/dra/plugin/client.go +++ b/pkg/kubelet/cm/dra/plugin/client.go @@ -25,68 +25,53 @@ import ( "time" "google.golang.org/grpc" + grpccodes "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials/insecure" - "k8s.io/apimachinery/pkg/types" + grpcstatus "google.golang.org/grpc/status" "k8s.io/klog/v2" - drapbv1 "k8s.io/kubelet/pkg/apis/dra/v1alpha2" + drapbv1alpha2 "k8s.io/kubelet/pkg/apis/dra/v1alpha2" + drapb "k8s.io/kubelet/pkg/apis/dra/v1alpha3" ) const PluginClientTimeout = 10 * time.Second -type Client interface { - NodePrepareResource( - ctx context.Context, - namespace string, - claimUID types.UID, - claimName string, - resourceHandle string, - ) (*drapbv1.NodePrepareResourceResponse, error) - - NodeUnprepareResource( - ctx context.Context, - namespace string, - claimUID types.UID, - claimName string, - resourceHandle string, - ) (*drapbv1.NodeUnprepareResourceResponse, error) -} - // Strongly typed address. type draAddr string // draPluginClient encapsulates all dra plugin methods. type draPluginClient struct { - pluginName string - addr draAddr - nodeV1ClientCreator nodeV1ClientCreator + pluginName string + addr draAddr + nodeClientCreator nodeClientCreator } -var _ Client = &draPluginClient{} +var _ drapb.NodeClient = &draPluginClient{} -type nodeV1ClientCreator func(addr draAddr) ( - nodeClient drapbv1.NodeClient, +type nodeClientCreator func(addr draAddr) ( + nodeClient drapb.NodeClient, + nodeClientOld drapbv1alpha2.NodeClient, closer io.Closer, err error, ) -// newV1NodeClient creates a new NodeClient with the internally used gRPC +// newNodeClient creates a new NodeClient with the internally used gRPC // connection set up. It also returns a closer which must be called to close // the gRPC connection when the NodeClient is not used anymore. -// This is the default implementation for the nodeV1ClientCreator, used in +// This is the default implementation for the nodeClientCreator, used in // newDRAPluginClient. -func newV1NodeClient(addr draAddr) (nodeClient drapbv1.NodeClient, closer io.Closer, err error) { +func newNodeClient(addr draAddr) (nodeClient drapb.NodeClient, nodeClientOld drapbv1alpha2.NodeClient, closer io.Closer, err error) { var conn *grpc.ClientConn conn, err = newGrpcConn(addr) if err != nil { - return nil, nil, err + return nil, nil, nil, err } - return drapbv1.NewNodeClient(conn), conn, nil + return drapb.NewNodeClient(conn), drapbv1alpha2.NewNodeClient(conn), conn, nil } -func NewDRAPluginClient(pluginName string) (Client, error) { +func NewDRAPluginClient(pluginName string) (drapb.NodeClient, error) { if pluginName == "" { return nil, fmt.Errorf("plugin name is empty") } @@ -97,84 +82,114 @@ func NewDRAPluginClient(pluginName string) (Client, error) { } return &draPluginClient{ - pluginName: pluginName, - addr: draAddr(existingPlugin.endpoint), - nodeV1ClientCreator: newV1NodeClient, + pluginName: pluginName, + addr: draAddr(existingPlugin.endpoint), + nodeClientCreator: newNodeClient, }, nil } -func (r *draPluginClient) NodePrepareResource( +func (r *draPluginClient) NodePrepareResources( ctx context.Context, - namespace string, - claimUID types.UID, - claimName string, - resourceHandle string, -) (*drapbv1.NodePrepareResourceResponse, error) { - klog.V(4).InfoS( - log("calling NodePrepareResource rpc"), - "namespace", namespace, - "claimUID", claimUID, - "claimName", claimName, - "resourceHandle", resourceHandle) + req *drapb.NodePrepareResourcesRequest, + opts ...grpc.CallOption, +) (resp *drapb.NodePrepareResourcesResponse, err error) { + logger := klog.FromContext(ctx) + logger.V(4).Info(log("calling NodePrepareResources rpc"), "request", req) + defer logger.V(4).Info(log("done calling NodePrepareResources rpc"), "response", resp, "err", err) - if r.nodeV1ClientCreator == nil { - return nil, errors.New("failed to call NodePrepareResource. nodeV1ClientCreator is nil") + if r.nodeClientCreator == nil { + return nil, errors.New("failed to call NodePrepareResources. nodeClientCreator is nil") } - nodeClient, closer, err := r.nodeV1ClientCreator(r.addr) + nodeClient, nodeClientOld, closer, err := r.nodeClientCreator(r.addr) if err != nil { return nil, err } defer closer.Close() - req := &drapbv1.NodePrepareResourceRequest{ - Namespace: namespace, - ClaimUid: string(claimUID), - ClaimName: claimName, - ResourceHandle: resourceHandle, - } - ctx, cancel := context.WithTimeout(ctx, PluginClientTimeout) defer cancel() - return nodeClient.NodePrepareResource(ctx, req) + resp, err = nodeClient.NodePrepareResources(ctx, req) + if err != nil { + status, _ := grpcstatus.FromError(err) + if status.Code() == grpccodes.Unimplemented { + // Fall back to the older gRPC API. + resp = &drapb.NodePrepareResourcesResponse{ + Claims: make(map[string]*drapb.NodePrepareResourceResponse), + } + err = nil + for _, claim := range req.Claims { + respOld, errOld := nodeClientOld.NodePrepareResource(ctx, + &drapbv1alpha2.NodePrepareResourceRequest{ + Namespace: claim.Namespace, + ClaimUid: claim.Uid, + ClaimName: claim.Name, + ResourceHandle: claim.ResourceHandle, + }) + result := &drapb.NodePrepareResourceResponse{} + if errOld != nil { + result.Error = errOld.Error() + } else { + result.CDIDevices = respOld.CdiDevices + } + resp.Claims[claim.Uid] = result + } + } + } + + return } -func (r *draPluginClient) NodeUnprepareResource( +func (r *draPluginClient) NodeUnprepareResources( ctx context.Context, - namespace string, - claimUID types.UID, - claimName string, - resourceHandle string, -) (*drapbv1.NodeUnprepareResourceResponse, error) { - klog.V(4).InfoS( - log("calling NodeUnprepareResource rpc"), - "namespace", namespace, - "claimUID", claimUID, - "claimname", claimName, - "resourceHandle", resourceHandle) + req *drapb.NodeUnprepareResourcesRequest, + opts ...grpc.CallOption, +) (resp *drapb.NodeUnprepareResourcesResponse, err error) { + logger := klog.FromContext(ctx) + logger.V(4).Info(log("calling NodeUnprepareResource rpc"), "request", req) + defer logger.V(4).Info(log("done calling NodeUnprepareResources rpc"), "response", resp, "err", err) - if r.nodeV1ClientCreator == nil { - return nil, errors.New("nodeV1ClientCreate is nil") + if r.nodeClientCreator == nil { + return nil, errors.New("failed to call NodeUnprepareResources. nodeClientCreator is nil") } - nodeClient, closer, err := r.nodeV1ClientCreator(r.addr) + nodeClient, nodeClientOld, closer, err := r.nodeClientCreator(r.addr) if err != nil { return nil, err } defer closer.Close() - req := &drapbv1.NodeUnprepareResourceRequest{ - Namespace: namespace, - ClaimUid: string(claimUID), - ClaimName: claimName, - ResourceHandle: resourceHandle, - } - ctx, cancel := context.WithTimeout(ctx, PluginClientTimeout) defer cancel() - return nodeClient.NodeUnprepareResource(ctx, req) + resp, err = nodeClient.NodeUnprepareResources(ctx, req) + if err != nil { + status, _ := grpcstatus.FromError(err) + if status.Code() == grpccodes.Unimplemented { + // Fall back to the older gRPC API. + resp = &drapb.NodeUnprepareResourcesResponse{ + Claims: make(map[string]*drapb.NodeUnprepareResourceResponse), + } + err = nil + for _, claim := range req.Claims { + _, errOld := nodeClientOld.NodeUnprepareResource(ctx, + &drapbv1alpha2.NodeUnprepareResourceRequest{ + Namespace: claim.Namespace, + ClaimUid: claim.Uid, + ClaimName: claim.Name, + ResourceHandle: claim.ResourceHandle, + }) + result := &drapb.NodeUnprepareResourceResponse{} + if errOld != nil { + result.Error = errOld.Error() + } + resp.Claims[claim.Uid] = result + } + } + } + + return } func newGrpcConn(addr draAddr) (*grpc.ClientConn, error) { diff --git a/staging/src/k8s.io/dynamic-resource-allocation/kubeletplugin/draplugin.go b/staging/src/k8s.io/dynamic-resource-allocation/kubeletplugin/draplugin.go index 89c6a70790b..e3a0bafe2b8 100644 --- a/staging/src/k8s.io/dynamic-resource-allocation/kubeletplugin/draplugin.go +++ b/staging/src/k8s.io/dynamic-resource-allocation/kubeletplugin/draplugin.go @@ -24,7 +24,8 @@ import ( "google.golang.org/grpc" "k8s.io/klog/v2" - drapbv1 "k8s.io/kubelet/pkg/apis/dra/v1alpha2" + drapbv1alpha2 "k8s.io/kubelet/pkg/apis/dra/v1alpha2" + drapbv1alpha3 "k8s.io/kubelet/pkg/apis/dra/v1alpha3" registerapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1" ) @@ -144,6 +145,24 @@ func GRPCInterceptor(interceptor grpc.UnaryServerInterceptor) Option { } } +// NodeV1alpha2 explicitly chooses whether the DRA gRPC API v1alpha2 +// gets enabled. +func NodeV1alpha2(enabled bool) Option { + return func(o *options) error { + o.nodeV1alpha2 = enabled + return nil + } +} + +// NodeV1alpha2 explicitly chooses whether the DRA gRPC API v1alpha3 +// gets enabled. +func NodeV1alpha3(enabled bool) Option { + return func(o *options) error { + o.nodeV1alpha3 = enabled + return nil + } +} + type options struct { logger klog.Logger grpcVerbosity int @@ -152,6 +171,8 @@ type options struct { draAddress string pluginRegistrationEndpoint endpoint interceptors []grpc.UnaryServerInterceptor + + nodeV1alpha2, nodeV1alpha3 bool } // draPlugin combines the kubelet registration service and the DRA node plugin @@ -162,13 +183,15 @@ type draPlugin struct { } // Start sets up two gRPC servers (one for registration, one for the DRA node -// client). -func Start(nodeServer drapbv1.NodeServer, opts ...Option) (result DRAPlugin, finalErr error) { +// client). By default, all APIs implemented by the nodeServer get registered. +func Start(nodeServer interface{}, opts ...Option) (result DRAPlugin, finalErr error) { d := &draPlugin{} o := options{ logger: klog.Background(), grpcVerbosity: 4, + nodeV1alpha2: true, + nodeV1alpha3: true, } for _, option := range opts { if err := option(&o); err != nil { @@ -191,8 +214,18 @@ func Start(nodeServer drapbv1.NodeServer, opts ...Option) (result DRAPlugin, fin } // Run the node plugin gRPC server first to ensure that it is ready. + implemented := false plugin, err := startGRPCServer(klog.LoggerWithName(o.logger, "dra"), o.grpcVerbosity, o.interceptors, o.draEndpoint, func(grpcServer *grpc.Server) { - drapbv1.RegisterNodeServer(grpcServer, nodeServer) + if nodeServer, ok := nodeServer.(drapbv1alpha3.NodeServer); ok && o.nodeV1alpha3 { + o.logger.V(5).Info("registering drapbv1alpha3.NodeServer") + drapbv1alpha3.RegisterNodeServer(grpcServer, nodeServer) + implemented = true + } + if nodeServer, ok := nodeServer.(drapbv1alpha2.NodeServer); ok && o.nodeV1alpha2 { + o.logger.V(5).Info("registering drapbv1alpha2.NodeServer") + drapbv1alpha2.RegisterNodeServer(grpcServer, nodeServer) + implemented = true + } }) if err != nil { return nil, fmt.Errorf("start node client: %v", err) @@ -208,6 +241,9 @@ func Start(nodeServer drapbv1.NodeServer, opts ...Option) (result DRAPlugin, fin plugin.stop() } }() + if !implemented { + return nil, errors.New("no supported DRA gRPC API is implemented and enabled") + } // Now make it available to kubelet. registrar, err := startRegistrar(klog.LoggerWithName(o.logger, "registrar"), o.grpcVerbosity, o.interceptors, o.driverName, o.draAddress, o.pluginRegistrationEndpoint) diff --git a/staging/src/k8s.io/kubelet/pkg/apis/dra/v1alpha3/api.pb.go b/staging/src/k8s.io/kubelet/pkg/apis/dra/v1alpha3/api.pb.go new file mode 100644 index 00000000000..6d0310cabd3 --- /dev/null +++ b/staging/src/k8s.io/kubelet/pkg/apis/dra/v1alpha3/api.pb.go @@ -0,0 +1,2134 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: api.proto + +package v1alpha3 + +import ( + context "context" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type NodePrepareResourcesRequest struct { + // The list of ResourceClaims that are to be prepared. + Claims []*Claim `protobuf:"bytes,1,rep,name=claims,proto3" json:"claims,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePrepareResourcesRequest) Reset() { *m = NodePrepareResourcesRequest{} } +func (*NodePrepareResourcesRequest) ProtoMessage() {} +func (*NodePrepareResourcesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{0} +} +func (m *NodePrepareResourcesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodePrepareResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NodePrepareResourcesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NodePrepareResourcesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePrepareResourcesRequest.Merge(m, src) +} +func (m *NodePrepareResourcesRequest) XXX_Size() int { + return m.Size() +} +func (m *NodePrepareResourcesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodePrepareResourcesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePrepareResourcesRequest proto.InternalMessageInfo + +func (m *NodePrepareResourcesRequest) GetClaims() []*Claim { + if m != nil { + return m.Claims + } + return nil +} + +type NodePrepareResourcesResponse struct { + // The ResourceClaims for which preparation was done + // or attempted, with claim_uid as key. + // + // It is an error if some claim listed in NodePrepareResourcesRequest + // does not get prepared. NodePrepareResources + // will be called again for those that are missing. + Claims map[string]*NodePrepareResourceResponse `protobuf:"bytes,1,rep,name=claims,proto3" json:"claims,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePrepareResourcesResponse) Reset() { *m = NodePrepareResourcesResponse{} } +func (*NodePrepareResourcesResponse) ProtoMessage() {} +func (*NodePrepareResourcesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{1} +} +func (m *NodePrepareResourcesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodePrepareResourcesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NodePrepareResourcesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NodePrepareResourcesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePrepareResourcesResponse.Merge(m, src) +} +func (m *NodePrepareResourcesResponse) XXX_Size() int { + return m.Size() +} +func (m *NodePrepareResourcesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodePrepareResourcesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePrepareResourcesResponse proto.InternalMessageInfo + +func (m *NodePrepareResourcesResponse) GetClaims() map[string]*NodePrepareResourceResponse { + if m != nil { + return m.Claims + } + return nil +} + +type NodePrepareResourceResponse struct { + // These are the additional devices that kubelet must + // make available via the container runtime. A resource + // may have zero or more devices. + CDIDevices []string `protobuf:"bytes,1,rep,name=cdi_devices,json=cdiDevices,proto3" json:"cdi_devices,omitempty"` + // If non-empty, preparing the ResourceClaim failed. + // cdi_devices is ignored in that case. + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodePrepareResourceResponse) Reset() { *m = NodePrepareResourceResponse{} } +func (*NodePrepareResourceResponse) ProtoMessage() {} +func (*NodePrepareResourceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{2} +} +func (m *NodePrepareResourceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodePrepareResourceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NodePrepareResourceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NodePrepareResourceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodePrepareResourceResponse.Merge(m, src) +} +func (m *NodePrepareResourceResponse) XXX_Size() int { + return m.Size() +} +func (m *NodePrepareResourceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodePrepareResourceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodePrepareResourceResponse proto.InternalMessageInfo + +func (m *NodePrepareResourceResponse) GetCDIDevices() []string { + if m != nil { + return m.CDIDevices + } + return nil +} + +func (m *NodePrepareResourceResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type NodeUnprepareResourcesRequest struct { + // The list of ResourceClaims that are to be unprepared. + Claims []*Claim `protobuf:"bytes,1,rep,name=claims,proto3" json:"claims,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnprepareResourcesRequest) Reset() { *m = NodeUnprepareResourcesRequest{} } +func (*NodeUnprepareResourcesRequest) ProtoMessage() {} +func (*NodeUnprepareResourcesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{3} +} +func (m *NodeUnprepareResourcesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeUnprepareResourcesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NodeUnprepareResourcesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NodeUnprepareResourcesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnprepareResourcesRequest.Merge(m, src) +} +func (m *NodeUnprepareResourcesRequest) XXX_Size() int { + return m.Size() +} +func (m *NodeUnprepareResourcesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnprepareResourcesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnprepareResourcesRequest proto.InternalMessageInfo + +func (m *NodeUnprepareResourcesRequest) GetClaims() []*Claim { + if m != nil { + return m.Claims + } + return nil +} + +type NodeUnprepareResourcesResponse struct { + // The ResourceClaims for which preparation was reverted. + // The same rules as for NodePrepareResourcesResponse.claims + // apply. + Claims map[string]*NodeUnprepareResourceResponse `protobuf:"bytes,1,rep,name=claims,proto3" json:"claims,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnprepareResourcesResponse) Reset() { *m = NodeUnprepareResourcesResponse{} } +func (*NodeUnprepareResourcesResponse) ProtoMessage() {} +func (*NodeUnprepareResourcesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{4} +} +func (m *NodeUnprepareResourcesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeUnprepareResourcesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NodeUnprepareResourcesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NodeUnprepareResourcesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnprepareResourcesResponse.Merge(m, src) +} +func (m *NodeUnprepareResourcesResponse) XXX_Size() int { + return m.Size() +} +func (m *NodeUnprepareResourcesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnprepareResourcesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnprepareResourcesResponse proto.InternalMessageInfo + +func (m *NodeUnprepareResourcesResponse) GetClaims() map[string]*NodeUnprepareResourceResponse { + if m != nil { + return m.Claims + } + return nil +} + +type NodeUnprepareResourceResponse struct { + // If non-empty, unpreparing the ResourceClaim failed. + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NodeUnprepareResourceResponse) Reset() { *m = NodeUnprepareResourceResponse{} } +func (*NodeUnprepareResourceResponse) ProtoMessage() {} +func (*NodeUnprepareResourceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{5} +} +func (m *NodeUnprepareResourceResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeUnprepareResourceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NodeUnprepareResourceResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NodeUnprepareResourceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeUnprepareResourceResponse.Merge(m, src) +} +func (m *NodeUnprepareResourceResponse) XXX_Size() int { + return m.Size() +} +func (m *NodeUnprepareResourceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NodeUnprepareResourceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeUnprepareResourceResponse proto.InternalMessageInfo + +func (m *NodeUnprepareResourceResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +type Claim struct { + // The ResourceClaim namespace (ResourceClaim.meta.Namespace). + // This field is REQUIRED. + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + // The UID of the Resource claim (ResourceClaim.meta.UUID). + // This field is REQUIRED. + Uid string `protobuf:"bytes,2,opt,name=uid,proto3" json:"uid,omitempty"` + // The name of the Resource claim (ResourceClaim.meta.Name) + // This field is REQUIRED. + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + // Resource handle (AllocationResult.ResourceHandles[*].Data) + // This field is REQUIRED. + ResourceHandle string `protobuf:"bytes,4,opt,name=resource_handle,json=resourceHandle,proto3" json:"resource_handle,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Claim) Reset() { *m = Claim{} } +func (*Claim) ProtoMessage() {} +func (*Claim) Descriptor() ([]byte, []int) { + return fileDescriptor_00212fb1f9d3bf1c, []int{6} +} +func (m *Claim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Claim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Claim.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Claim) XXX_Merge(src proto.Message) { + xxx_messageInfo_Claim.Merge(m, src) +} +func (m *Claim) XXX_Size() int { + return m.Size() +} +func (m *Claim) XXX_DiscardUnknown() { + xxx_messageInfo_Claim.DiscardUnknown(m) +} + +var xxx_messageInfo_Claim proto.InternalMessageInfo + +func (m *Claim) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *Claim) GetUid() string { + if m != nil { + return m.Uid + } + return "" +} + +func (m *Claim) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Claim) GetResourceHandle() string { + if m != nil { + return m.ResourceHandle + } + return "" +} + +func init() { + proto.RegisterType((*NodePrepareResourcesRequest)(nil), "v1alpha3.NodePrepareResourcesRequest") + proto.RegisterType((*NodePrepareResourcesResponse)(nil), "v1alpha3.NodePrepareResourcesResponse") + proto.RegisterMapType((map[string]*NodePrepareResourceResponse)(nil), "v1alpha3.NodePrepareResourcesResponse.ClaimsEntry") + proto.RegisterType((*NodePrepareResourceResponse)(nil), "v1alpha3.NodePrepareResourceResponse") + proto.RegisterType((*NodeUnprepareResourcesRequest)(nil), "v1alpha3.NodeUnprepareResourcesRequest") + proto.RegisterType((*NodeUnprepareResourcesResponse)(nil), "v1alpha3.NodeUnprepareResourcesResponse") + proto.RegisterMapType((map[string]*NodeUnprepareResourceResponse)(nil), "v1alpha3.NodeUnprepareResourcesResponse.ClaimsEntry") + proto.RegisterType((*NodeUnprepareResourceResponse)(nil), "v1alpha3.NodeUnprepareResourceResponse") + proto.RegisterType((*Claim)(nil), "v1alpha3.Claim") +} + +func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } + +var fileDescriptor_00212fb1f9d3bf1c = []byte{ + // 500 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0xcd, 0x36, 0x49, 0x45, 0x26, 0x52, 0x8b, 0x56, 0x15, 0xb2, 0x42, 0x31, 0x91, 0x45, 0x49, + 0x2e, 0xd8, 0x22, 0x05, 0xa9, 0x02, 0x71, 0x49, 0x0b, 0x2a, 0x08, 0x21, 0x64, 0x89, 0x0b, 0x97, + 0xb2, 0xb6, 0x07, 0xc7, 0x8a, 0xe3, 0x35, 0xbb, 0x76, 0xa4, 0xde, 0xf8, 0x09, 0xfc, 0xac, 0x1e, + 0x38, 0x20, 0x4e, 0x9c, 0x2a, 0x6a, 0xfe, 0x08, 0xf2, 0xda, 0x4e, 0x3f, 0xe4, 0x34, 0x95, 0x7a, + 0x9b, 0x7d, 0xbb, 0x33, 0x6f, 0xe6, 0xbd, 0xb1, 0xa1, 0xc3, 0xe2, 0xc0, 0x8c, 0x05, 0x4f, 0x38, + 0xbd, 0x33, 0x7f, 0xca, 0xc2, 0x78, 0xc2, 0x76, 0x7b, 0x4f, 0xfc, 0x20, 0x99, 0xa4, 0x8e, 0xe9, + 0xf2, 0x99, 0xe5, 0x73, 0x9f, 0x5b, 0xea, 0x81, 0x93, 0x7e, 0x55, 0x27, 0x75, 0x50, 0x51, 0x91, + 0x68, 0xbc, 0x81, 0xfb, 0x1f, 0xb8, 0x87, 0x1f, 0x05, 0xc6, 0x4c, 0xa0, 0x8d, 0x92, 0xa7, 0xc2, + 0x45, 0x69, 0xe3, 0xb7, 0x14, 0x65, 0x42, 0x07, 0xb0, 0xee, 0x86, 0x2c, 0x98, 0x49, 0x8d, 0xf4, + 0x9b, 0xc3, 0xee, 0x68, 0xd3, 0xac, 0x88, 0xcc, 0xfd, 0x1c, 0xb7, 0xcb, 0x6b, 0xe3, 0x27, 0x81, + 0xed, 0xfa, 0x42, 0x32, 0xe6, 0x91, 0x44, 0xfa, 0xee, 0x4a, 0xa5, 0xd1, 0x79, 0xa5, 0xeb, 0xf2, + 0x0a, 0x1a, 0xf9, 0x3a, 0x4a, 0xc4, 0x71, 0x45, 0xd6, 0xfb, 0x02, 0xdd, 0x0b, 0x30, 0xbd, 0x0b, + 0xcd, 0x29, 0x1e, 0x6b, 0xa4, 0x4f, 0x86, 0x1d, 0x3b, 0x0f, 0xe9, 0x4b, 0x68, 0xcf, 0x59, 0x98, + 0xa2, 0xb6, 0xd6, 0x27, 0xc3, 0xee, 0x68, 0xe7, 0x5a, 0xae, 0x8a, 0xca, 0x2e, 0x72, 0x5e, 0xac, + 0xed, 0x11, 0xc3, 0xab, 0x95, 0x65, 0x31, 0x8c, 0x05, 0x5d, 0xd7, 0x0b, 0x8e, 0x3c, 0x9c, 0x07, + 0x2e, 0x16, 0x13, 0x75, 0xc6, 0x1b, 0xd9, 0xe9, 0x43, 0xd8, 0x3f, 0x78, 0x7b, 0x50, 0xa0, 0x36, + 0xb8, 0x5e, 0x50, 0xc6, 0x74, 0x0b, 0xda, 0x28, 0x04, 0x17, 0xaa, 0xa1, 0x8e, 0x5d, 0x1c, 0x8c, + 0x43, 0x78, 0x90, 0xb3, 0x7c, 0x8a, 0xe2, 0xdb, 0xca, 0xff, 0x9b, 0x80, 0xbe, 0xac, 0x54, 0xd9, + 0xf3, 0xfb, 0x2b, 0xb5, 0x9e, 0x5d, 0x16, 0x65, 0x79, 0x66, 0xad, 0x05, 0xce, 0x2a, 0x0b, 0x5e, + 0x5d, 0xb6, 0x60, 0xb0, 0x82, 0xad, 0xce, 0x84, 0xe7, 0x4b, 0xe4, 0x59, 0x8c, 0xb4, 0x50, 0x95, + 0x5c, 0x54, 0x35, 0x81, 0xb6, 0x6a, 0x8d, 0x6e, 0x43, 0x27, 0x62, 0x33, 0x94, 0x31, 0x73, 0xb1, + 0x7c, 0x72, 0x0e, 0xe4, 0x2d, 0xa7, 0x81, 0x57, 0x1a, 0x92, 0x87, 0x94, 0x42, 0x2b, 0xbf, 0xd6, + 0x9a, 0x0a, 0x52, 0x31, 0x1d, 0xc0, 0xa6, 0x28, 0x69, 0x8f, 0x26, 0x2c, 0xf2, 0x42, 0xd4, 0x5a, + 0xea, 0x7a, 0xa3, 0x82, 0x0f, 0x15, 0x3a, 0x3a, 0x25, 0xd0, 0xca, 0xbb, 0xa5, 0x3e, 0x6c, 0xd5, + 0x2d, 0x34, 0xdd, 0x59, 0xb5, 0xf0, 0xca, 0xf2, 0xde, 0xe3, 0x9b, 0x7d, 0x17, 0x46, 0x83, 0xce, + 0xe0, 0x5e, 0xbd, 0x71, 0x74, 0xb0, 0xda, 0xda, 0x82, 0x6c, 0x78, 0xd3, 0x1d, 0x30, 0x1a, 0xe3, + 0xf1, 0xc9, 0x99, 0x4e, 0xfe, 0x9c, 0xe9, 0x8d, 0xef, 0x99, 0x4e, 0x4e, 0x32, 0x9d, 0xfc, 0xca, + 0x74, 0xf2, 0x37, 0xd3, 0xc9, 0x8f, 0x7f, 0x7a, 0xe3, 0xf3, 0xa3, 0xe9, 0x9e, 0x34, 0x03, 0x6e, + 0x4d, 0x53, 0x07, 0x43, 0x4c, 0xac, 0x78, 0xea, 0x5b, 0x2c, 0x0e, 0xa4, 0xe5, 0x09, 0x66, 0x55, + 0x24, 0xce, 0xba, 0xfa, 0xe9, 0xec, 0xfe, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x42, 0xff, 0x15, 0x6b, + 0xba, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// NodeClient is the client API for Node service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type NodeClient interface { + // NodePrepareResources prepares several ResourceClaims + // for use on the node. If an error is returned, the + // response is ignored. Failures for individidual claims + // can be reported inside NodePrepareResourcesResponse. + NodePrepareResources(ctx context.Context, in *NodePrepareResourcesRequest, opts ...grpc.CallOption) (*NodePrepareResourcesResponse, error) + // NodeUnprepareResources is the opposite of NodePrepareResources. + // The same error handling rules apply, + NodeUnprepareResources(ctx context.Context, in *NodeUnprepareResourcesRequest, opts ...grpc.CallOption) (*NodeUnprepareResourcesResponse, error) +} + +type nodeClient struct { + cc *grpc.ClientConn +} + +func NewNodeClient(cc *grpc.ClientConn) NodeClient { + return &nodeClient{cc} +} + +func (c *nodeClient) NodePrepareResources(ctx context.Context, in *NodePrepareResourcesRequest, opts ...grpc.CallOption) (*NodePrepareResourcesResponse, error) { + out := new(NodePrepareResourcesResponse) + err := c.cc.Invoke(ctx, "/v1alpha3.Node/NodePrepareResources", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeClient) NodeUnprepareResources(ctx context.Context, in *NodeUnprepareResourcesRequest, opts ...grpc.CallOption) (*NodeUnprepareResourcesResponse, error) { + out := new(NodeUnprepareResourcesResponse) + err := c.cc.Invoke(ctx, "/v1alpha3.Node/NodeUnprepareResources", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// NodeServer is the server API for Node service. +type NodeServer interface { + // NodePrepareResources prepares several ResourceClaims + // for use on the node. If an error is returned, the + // response is ignored. Failures for individidual claims + // can be reported inside NodePrepareResourcesResponse. + NodePrepareResources(context.Context, *NodePrepareResourcesRequest) (*NodePrepareResourcesResponse, error) + // NodeUnprepareResources is the opposite of NodePrepareResources. + // The same error handling rules apply, + NodeUnprepareResources(context.Context, *NodeUnprepareResourcesRequest) (*NodeUnprepareResourcesResponse, error) +} + +// UnimplementedNodeServer can be embedded to have forward compatible implementations. +type UnimplementedNodeServer struct { +} + +func (*UnimplementedNodeServer) NodePrepareResources(ctx context.Context, req *NodePrepareResourcesRequest) (*NodePrepareResourcesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodePrepareResources not implemented") +} +func (*UnimplementedNodeServer) NodeUnprepareResources(ctx context.Context, req *NodeUnprepareResourcesRequest) (*NodeUnprepareResourcesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NodeUnprepareResources not implemented") +} + +func RegisterNodeServer(s *grpc.Server, srv NodeServer) { + s.RegisterService(&_Node_serviceDesc, srv) +} + +func _Node_NodePrepareResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodePrepareResourcesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodePrepareResources(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1alpha3.Node/NodePrepareResources", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodePrepareResources(ctx, req.(*NodePrepareResourcesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Node_NodeUnprepareResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeUnprepareResourcesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeServer).NodeUnprepareResources(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/v1alpha3.Node/NodeUnprepareResources", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeServer).NodeUnprepareResources(ctx, req.(*NodeUnprepareResourcesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Node_serviceDesc = grpc.ServiceDesc{ + ServiceName: "v1alpha3.Node", + HandlerType: (*NodeServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "NodePrepareResources", + Handler: _Node_NodePrepareResources_Handler, + }, + { + MethodName: "NodeUnprepareResources", + Handler: _Node_NodeUnprepareResources_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "api.proto", +} + +func (m *NodePrepareResourcesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodePrepareResourcesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodePrepareResourcesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Claims) > 0 { + for iNdEx := len(m.Claims) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Claims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NodePrepareResourcesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodePrepareResourcesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodePrepareResourcesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Claims) > 0 { + for k := range m.Claims { + v := m.Claims[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NodePrepareResourceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodePrepareResourceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodePrepareResourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintApi(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x12 + } + if len(m.CDIDevices) > 0 { + for iNdEx := len(m.CDIDevices) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CDIDevices[iNdEx]) + copy(dAtA[i:], m.CDIDevices[iNdEx]) + i = encodeVarintApi(dAtA, i, uint64(len(m.CDIDevices[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NodeUnprepareResourcesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeUnprepareResourcesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeUnprepareResourcesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Claims) > 0 { + for iNdEx := len(m.Claims) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Claims[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NodeUnprepareResourcesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeUnprepareResourcesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeUnprepareResourcesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Claims) > 0 { + for k := range m.Claims { + v := m.Claims[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintApi(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintApi(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NodeUnprepareResourceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeUnprepareResourceResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeUnprepareResourceResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintApi(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Claim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Claim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Claim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceHandle) > 0 { + i -= len(m.ResourceHandle) + copy(dAtA[i:], m.ResourceHandle) + i = encodeVarintApi(dAtA, i, uint64(len(m.ResourceHandle))) + i-- + dAtA[i] = 0x22 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.Uid) > 0 { + i -= len(m.Uid) + copy(dAtA[i:], m.Uid) + i = encodeVarintApi(dAtA, i, uint64(len(m.Uid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Namespace) > 0 { + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintApi(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + offset -= sovApi(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *NodePrepareResourcesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Claims) > 0 { + for _, e := range m.Claims { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *NodePrepareResourcesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Claims) > 0 { + for k, v := range m.Claims { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovApi(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + l + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodePrepareResourceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CDIDevices) > 0 { + for _, s := range m.CDIDevices { + l = len(s) + n += 1 + l + sovApi(uint64(l)) + } + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *NodeUnprepareResourcesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Claims) > 0 { + for _, e := range m.Claims { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + return n +} + +func (m *NodeUnprepareResourcesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Claims) > 0 { + for k, v := range m.Claims { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovApi(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovApi(uint64(len(k))) + l + n += mapEntrySize + 1 + sovApi(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeUnprepareResourceResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func (m *Claim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Uid) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.ResourceHandle) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func sovApi(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *NodePrepareResourcesRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForClaims := "[]*Claim{" + for _, f := range this.Claims { + repeatedStringForClaims += strings.Replace(f.String(), "Claim", "Claim", 1) + "," + } + repeatedStringForClaims += "}" + s := strings.Join([]string{`&NodePrepareResourcesRequest{`, + `Claims:` + repeatedStringForClaims + `,`, + `}`, + }, "") + return s +} +func (this *NodePrepareResourcesResponse) String() string { + if this == nil { + return "nil" + } + keysForClaims := make([]string, 0, len(this.Claims)) + for k := range this.Claims { + keysForClaims = append(keysForClaims, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForClaims) + mapStringForClaims := "map[string]*NodePrepareResourceResponse{" + for _, k := range keysForClaims { + mapStringForClaims += fmt.Sprintf("%v: %v,", k, this.Claims[k]) + } + mapStringForClaims += "}" + s := strings.Join([]string{`&NodePrepareResourcesResponse{`, + `Claims:` + mapStringForClaims + `,`, + `}`, + }, "") + return s +} +func (this *NodePrepareResourceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodePrepareResourceResponse{`, + `CDIDevices:` + fmt.Sprintf("%v", this.CDIDevices) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *NodeUnprepareResourcesRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForClaims := "[]*Claim{" + for _, f := range this.Claims { + repeatedStringForClaims += strings.Replace(f.String(), "Claim", "Claim", 1) + "," + } + repeatedStringForClaims += "}" + s := strings.Join([]string{`&NodeUnprepareResourcesRequest{`, + `Claims:` + repeatedStringForClaims + `,`, + `}`, + }, "") + return s +} +func (this *NodeUnprepareResourcesResponse) String() string { + if this == nil { + return "nil" + } + keysForClaims := make([]string, 0, len(this.Claims)) + for k := range this.Claims { + keysForClaims = append(keysForClaims, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForClaims) + mapStringForClaims := "map[string]*NodeUnprepareResourceResponse{" + for _, k := range keysForClaims { + mapStringForClaims += fmt.Sprintf("%v: %v,", k, this.Claims[k]) + } + mapStringForClaims += "}" + s := strings.Join([]string{`&NodeUnprepareResourcesResponse{`, + `Claims:` + mapStringForClaims + `,`, + `}`, + }, "") + return s +} +func (this *NodeUnprepareResourceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeUnprepareResourceResponse{`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *Claim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Claim{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Uid:` + fmt.Sprintf("%v", this.Uid) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ResourceHandle:` + fmt.Sprintf("%v", this.ResourceHandle) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *NodePrepareResourcesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodePrepareResourcesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodePrepareResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Claims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Claims = append(m.Claims, &Claim{}) + if err := m.Claims[len(m.Claims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodePrepareResourcesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodePrepareResourcesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodePrepareResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Claims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Claims == nil { + m.Claims = make(map[string]*NodePrepareResourceResponse) + } + var mapkey string + var mapvalue *NodePrepareResourceResponse + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthApi + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthApi + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &NodePrepareResourceResponse{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Claims[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodePrepareResourceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodePrepareResourceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodePrepareResourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CDIDevices", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CDIDevices = append(m.CDIDevices, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeUnprepareResourcesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeUnprepareResourcesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeUnprepareResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Claims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Claims = append(m.Claims, &Claim{}) + if err := m.Claims[len(m.Claims)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeUnprepareResourcesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeUnprepareResourcesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeUnprepareResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Claims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Claims == nil { + m.Claims = make(map[string]*NodeUnprepareResourceResponse) + } + var mapkey string + var mapvalue *NodeUnprepareResourceResponse + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthApi + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthApi + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthApi + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthApi + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &NodeUnprepareResourceResponse{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Claims[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeUnprepareResourceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeUnprepareResourceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeUnprepareResourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Claim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Claim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Claim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Uid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceHandle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceHandle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthApi + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupApi + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthApi + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupApi = fmt.Errorf("proto: unexpected end of group") +) diff --git a/staging/src/k8s.io/kubelet/pkg/apis/dra/v1alpha3/api.proto b/staging/src/k8s.io/kubelet/pkg/apis/dra/v1alpha3/api.proto new file mode 100644 index 00000000000..567842711be --- /dev/null +++ b/staging/src/k8s.io/kubelet/pkg/apis/dra/v1alpha3/api.proto @@ -0,0 +1,103 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` + +syntax = "proto3"; + +package v1alpha3; +option go_package = "k8s.io/kubelet/pkg/apis/dra/v1alpha3"; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option (gogoproto.goproto_getters_all) = true; +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_unrecognized_all) = false; + +service Node { + // NodePrepareResources prepares several ResourceClaims + // for use on the node. If an error is returned, the + // response is ignored. Failures for individidual claims + // can be reported inside NodePrepareResourcesResponse. + rpc NodePrepareResources (NodePrepareResourcesRequest) + returns (NodePrepareResourcesResponse) {} + + // NodeUnprepareResources is the opposite of NodePrepareResources. + // The same error handling rules apply, + rpc NodeUnprepareResources (NodeUnprepareResourcesRequest) + returns (NodeUnprepareResourcesResponse) {} +} + +message NodePrepareResourcesRequest { + // The list of ResourceClaims that are to be prepared. + repeated Claim claims = 1; +} + +message NodePrepareResourcesResponse { + // The ResourceClaims for which preparation was done + // or attempted, with claim_uid as key. + // + // It is an error if some claim listed in NodePrepareResourcesRequest + // does not get prepared. NodePrepareResources + // will be called again for those that are missing. + map claims = 1; +} + +message NodePrepareResourceResponse { + // These are the additional devices that kubelet must + // make available via the container runtime. A resource + // may have zero or more devices. + repeated string cdi_devices = 1 [(gogoproto.customname) = "CDIDevices"]; + // If non-empty, preparing the ResourceClaim failed. + // cdi_devices is ignored in that case. + string error = 2; +} + +message NodeUnprepareResourcesRequest { + // The list of ResourceClaims that are to be unprepared. + repeated Claim claims = 1; +} + +message NodeUnprepareResourcesResponse { + // The ResourceClaims for which preparation was reverted. + // The same rules as for NodePrepareResourcesResponse.claims + // apply. + map claims = 1; +} + +message NodeUnprepareResourceResponse { + // If non-empty, unpreparing the ResourceClaim failed. + string error = 1; +} + +message Claim { + // The ResourceClaim namespace (ResourceClaim.meta.Namespace). + // This field is REQUIRED. + string namespace = 1; + // The UID of the Resource claim (ResourceClaim.meta.UUID). + // This field is REQUIRED. + string uid = 2; + // The name of the Resource claim (ResourceClaim.meta.Name) + // This field is REQUIRED. + string name = 3; + // Resource handle (AllocationResult.ResourceHandles[*].Data) + // This field is REQUIRED. + string resource_handle = 4; +} diff --git a/test/e2e/dra/deploy.go b/test/e2e/dra/deploy.go index 6d762b13c28..180c937e369 100644 --- a/test/e2e/dra/deploy.go +++ b/test/e2e/dra/deploy.go @@ -48,8 +48,10 @@ import ( ) const ( - NodePrepareResourceMethod = "/v1alpha2.Node/NodePrepareResource" - NodeUnprepareResourceMethod = "/v1alpha2.Node/NodeUnprepareResource" + NodePrepareResourceMethod = "/v1alpha2.Node/NodePrepareResource" + NodePrepareResourcesMethod = "/v1alpha3.Node/NodePrepareResources" + NodeUnprepareResourceMethod = "/v1alpha2.Node/NodeUnprepareResource" + NodeUnprepareResourcesMethod = "/v1alpha3.Node/NodeUnprepareResources" ) type Nodes struct { @@ -87,9 +89,11 @@ func NewNodes(f *framework.Framework, minNodes, maxNodes int) *Nodes { // up after the test. func NewDriver(f *framework.Framework, nodes *Nodes, configureResources func() app.Resources) *Driver { d := &Driver{ - f: f, - fail: map[MethodInstance]bool{}, - callCounts: map[MethodInstance]int64{}, + f: f, + fail: map[MethodInstance]bool{}, + callCounts: map[MethodInstance]int64{}, + NodeV1alpha2: true, + NodeV1alpha3: true, } ginkgo.BeforeEach(func() { @@ -121,6 +125,8 @@ type Driver struct { Name string Nodes map[string]*app.ExamplePlugin + NodeV1alpha2, NodeV1alpha3 bool + mutex sync.Mutex fail map[MethodInstance]bool callCounts map[MethodInstance]int64 @@ -229,6 +235,8 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) { kubeletplugin.PluginListener(listen(ctx, d.f, pod.Name, "plugin", 9001)), kubeletplugin.RegistrarListener(listen(ctx, d.f, pod.Name, "registrar", 9000)), kubeletplugin.KubeletPluginSocketPath(draAddr), + kubeletplugin.NodeV1alpha2(d.NodeV1alpha2), + kubeletplugin.NodeV1alpha3(d.NodeV1alpha3), ) framework.ExpectNoError(err, "start kubelet plugin for node %s", pod.Spec.NodeName) d.cleanup = append(d.cleanup, func() { diff --git a/test/e2e/dra/dra.go b/test/e2e/dra/dra.go index df849d4810c..112fa0d733c 100644 --- a/test/e2e/dra/dra.go +++ b/test/e2e/dra/dra.go @@ -67,9 +67,9 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu ginkgo.By("the driver is running") }) - ginkgo.It("must retry NodePrepareResource", func(ctx context.Context) { + ginkgo.It("must retry NodePrepareResources", func(ctx context.Context) { // We have exactly one host. - m := MethodInstance{driver.Nodenames()[0], NodePrepareResourceMethod} + m := MethodInstance{driver.Nodenames()[0], NodePrepareResourcesMethod} driver.Fail(m, true) @@ -79,10 +79,10 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu b.create(ctx, parameters, pod, template) - ginkgo.By("wait for NodePrepareResource call") + ginkgo.By("wait for NodePrepareResources call") gomega.Eventually(ctx, func(ctx context.Context) error { if driver.CallCount(m) == 0 { - return errors.New("NodePrepareResource not called yet") + return errors.New("NodePrepareResources not called yet") } return nil }).WithTimeout(podStartTimeout).Should(gomega.Succeed()) @@ -93,7 +93,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace) framework.ExpectNoError(err, "start pod with inline resource claim") if driver.CallCount(m) == callCount { - framework.Fail("NodePrepareResource should have been called again") + framework.Fail("NodePrepareResources should have been called again") } }) @@ -593,44 +593,64 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu }) }) - ginkgo.Context("multiple drivers", func() { + multipleDrivers := func(nodeV1alpha2, nodeV1alpha3 bool) { nodes := NewNodes(f, 1, 4) driver1 := NewDriver(f, nodes, func() app.Resources { return app.Resources{ NodeLocal: true, - MaxAllocations: 1, + MaxAllocations: 2, Nodes: nodes.NodeNames, } }) + driver1.NodeV1alpha2 = nodeV1alpha2 + driver1.NodeV1alpha3 = nodeV1alpha3 b1 := newBuilder(f, driver1) driver2 := NewDriver(f, nodes, func() app.Resources { return app.Resources{ NodeLocal: true, - MaxAllocations: 1, + MaxAllocations: 2, Nodes: nodes.NodeNames, } }) driver2.NameSuffix = "-other" + driver2.NodeV1alpha2 = nodeV1alpha2 + driver2.NodeV1alpha3 = nodeV1alpha3 b2 := newBuilder(f, driver2) ginkgo.It("work", func(ctx context.Context) { parameters1 := b1.parameters() parameters2 := b2.parameters() claim1 := b1.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer) + claim1b := b1.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer) claim2 := b2.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer) + claim2b := b2.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer) pod := b1.podExternal() - pod.Spec.ResourceClaims = append(pod.Spec.ResourceClaims, - v1.PodResourceClaim{ - Name: "claim2", - Source: v1.ClaimSource{ - ResourceClaimName: &claim2.Name, + for i, claim := range []*resourcev1alpha2.ResourceClaim{claim1b, claim2, claim2b} { + claim := claim + pod.Spec.ResourceClaims = append(pod.Spec.ResourceClaims, + v1.PodResourceClaim{ + Name: fmt.Sprintf("claim%d", i+1), + Source: v1.ClaimSource{ + ResourceClaimName: &claim.Name, + }, }, - }, - ) - b1.create(ctx, parameters1, parameters2, claim1, claim2, pod) + ) + } + b1.create(ctx, parameters1, parameters2, claim1, claim1b, claim2, claim2b, pod) b1.testPod(ctx, f.ClientSet, pod) }) + } + multipleDriversContext := func(prefix string, nodeV1alpha2, nodeV1alpha3 bool) { + ginkgo.Context(prefix, func() { + multipleDrivers(nodeV1alpha2, nodeV1alpha3) + }) + } + + ginkgo.Context("multiple drivers", func() { + multipleDriversContext("using only drapbv1alpha2", true, false) + multipleDriversContext("using only drapbv1alpha3", false, true) + multipleDriversContext("using both drapbv1alpha2 and drapbv1alpha3", true, true) }) }) diff --git a/test/e2e/dra/test-driver/app/gomega.go b/test/e2e/dra/test-driver/app/gomega.go index 7bb70984652..45f4ceff5b5 100644 --- a/test/e2e/dra/test-driver/app/gomega.go +++ b/test/e2e/dra/test-driver/app/gomega.go @@ -42,3 +42,13 @@ var NodePrepareResourceCalled = gcustom.MakeMatcher(func(actualCalls []GRPCCall) } return false, nil }).WithMessage("contain NodePrepareResource call") + +// NodePrepareResoucesCalled checks that NodePrepareResources API has been called +var NodePrepareResourcesCalled = gcustom.MakeMatcher(func(actualCalls []GRPCCall) (bool, error) { + for _, call := range actualCalls { + if strings.HasSuffix(call.FullMethod, "/NodePrepareResources") && call.Err == nil { + return true, nil + } + } + return false, nil +}).WithMessage("contain NodePrepareResources call") diff --git a/test/e2e/dra/test-driver/app/kubeletplugin.go b/test/e2e/dra/test-driver/app/kubeletplugin.go index a53de6b2116..6d9add80ad7 100644 --- a/test/e2e/dra/test-driver/app/kubeletplugin.go +++ b/test/e2e/dra/test-driver/app/kubeletplugin.go @@ -28,7 +28,8 @@ import ( "k8s.io/dynamic-resource-allocation/kubeletplugin" "k8s.io/klog/v2" - drapbv1 "k8s.io/kubelet/pkg/apis/dra/v1alpha2" + drapbv1alpha2 "k8s.io/kubelet/pkg/apis/dra/v1alpha2" + drapbv1alpha3 "k8s.io/kubelet/pkg/apis/dra/v1alpha3" ) type ExamplePlugin struct { @@ -69,7 +70,7 @@ type ClaimID struct { UID string } -var _ drapbv1.NodeServer = &ExamplePlugin{} +var _ drapbv1alpha2.NodeServer = &ExamplePlugin{} // getJSONFilePath returns the absolute path where CDI file is/should be. func (ex *ExamplePlugin) getJSONFilePath(claimUID string) string { @@ -147,7 +148,7 @@ func (ex *ExamplePlugin) Block() { // a deterministic name to simplify NodeUnprepareResource (no need to remember // or discover the name) and idempotency (when called again, the file simply // gets written again). -func (ex *ExamplePlugin) NodePrepareResource(ctx context.Context, req *drapbv1.NodePrepareResourceRequest) (*drapbv1.NodePrepareResourceResponse, error) { +func (ex *ExamplePlugin) NodePrepareResource(ctx context.Context, req *drapbv1alpha2.NodePrepareResourceRequest) (*drapbv1alpha2.NodePrepareResourceResponse, error) { logger := klog.FromContext(ctx) // Block to emulate plugin stuckness or slowness. @@ -201,7 +202,7 @@ func (ex *ExamplePlugin) NodePrepareResource(ctx context.Context, req *drapbv1.N } dev := vendor + "/" + class + "=" + deviceName - resp := &drapbv1.NodePrepareResourceResponse{CdiDevices: []string{dev}} + resp := &drapbv1alpha2.NodePrepareResourceResponse{CdiDevices: []string{dev}} ex.mutex.Lock() defer ex.mutex.Unlock() @@ -211,10 +212,34 @@ func (ex *ExamplePlugin) NodePrepareResource(ctx context.Context, req *drapbv1.N return resp, nil } +func (ex *ExamplePlugin) NodePrepareResources(ctx context.Context, req *drapbv1alpha3.NodePrepareResourcesRequest) (*drapbv1alpha3.NodePrepareResourcesResponse, error) { + resp := &drapbv1alpha3.NodePrepareResourcesResponse{ + Claims: make(map[string]*drapbv1alpha3.NodePrepareResourceResponse), + } + for _, claimReq := range req.Claims { + claimResp, err := ex.NodePrepareResource(ctx, &drapbv1alpha2.NodePrepareResourceRequest{ + Namespace: claimReq.Namespace, + ClaimName: claimReq.Name, + ClaimUid: claimReq.Uid, + ResourceHandle: claimReq.ResourceHandle, + }) + if err != nil { + resp.Claims[claimReq.Uid] = &drapbv1alpha3.NodePrepareResourceResponse{ + Error: err.Error(), + } + } else { + resp.Claims[claimReq.Uid] = &drapbv1alpha3.NodePrepareResourceResponse{ + CDIDevices: claimResp.CdiDevices, + } + } + } + return resp, nil +} + // NodeUnprepareResource removes the CDI file created by // NodePrepareResource. It's idempotent, therefore it is not an error when that // file is already gone. -func (ex *ExamplePlugin) NodeUnprepareResource(ctx context.Context, req *drapbv1.NodeUnprepareResourceRequest) (*drapbv1.NodeUnprepareResourceResponse, error) { +func (ex *ExamplePlugin) NodeUnprepareResource(ctx context.Context, req *drapbv1alpha2.NodeUnprepareResourceRequest) (*drapbv1alpha2.NodeUnprepareResourceResponse, error) { logger := klog.FromContext(ctx) // Block to emulate plugin stuckness or slowness. @@ -234,7 +259,29 @@ func (ex *ExamplePlugin) NodeUnprepareResource(ctx context.Context, req *drapbv1 defer ex.mutex.Unlock() delete(ex.prepared, ClaimID{Name: req.ClaimName, UID: req.ClaimUid}) - return &drapbv1.NodeUnprepareResourceResponse{}, nil + return &drapbv1alpha2.NodeUnprepareResourceResponse{}, nil +} + +func (ex *ExamplePlugin) NodeUnprepareResources(ctx context.Context, req *drapbv1alpha3.NodeUnprepareResourcesRequest) (*drapbv1alpha3.NodeUnprepareResourcesResponse, error) { + resp := &drapbv1alpha3.NodeUnprepareResourcesResponse{ + Claims: make(map[string]*drapbv1alpha3.NodeUnprepareResourceResponse), + } + for _, claimReq := range req.Claims { + _, err := ex.NodeUnprepareResource(ctx, &drapbv1alpha2.NodeUnprepareResourceRequest{ + Namespace: claimReq.Namespace, + ClaimName: claimReq.Name, + ClaimUid: claimReq.Uid, + ResourceHandle: claimReq.ResourceHandle, + }) + if err != nil { + resp.Claims[claimReq.Uid] = &drapbv1alpha3.NodeUnprepareResourceResponse{ + Error: err.Error(), + } + } else { + resp.Claims[claimReq.Uid] = &drapbv1alpha3.NodeUnprepareResourceResponse{} + } + } + return resp, nil } func (ex *ExamplePlugin) GetPreparedResources() []ClaimID { diff --git a/test/e2e_node/dra_test.go b/test/e2e_node/dra_test.go index c654fa04f3b..22e7221ea75 100644 --- a/test/e2e_node/dra_test.go +++ b/test/e2e_node/dra_test.go @@ -109,8 +109,8 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation][Node framework.ExpectNoError(err) }) - ginkgo.It("must keep pod in pending state if NodePrepareResource times out", func(ctx context.Context) { - ginkgo.By("set delay for the NodePrepareResource call") + ginkgo.It("must keep pod in pending state if NodePrepareResources times out", func(ctx context.Context) { + ginkgo.By("set delay for the NodePrepareResources call") kubeletPlugin.Block() pod := createTestObjects(ctx, f.ClientSet, getNodeName(ctx, f), f.Namespace.Name, "draclass", "external-claim", "drapod") @@ -120,8 +120,8 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation][Node }) framework.ExpectNoError(err) - ginkgo.By("wait for NodePrepareResource call") - gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(dra.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourceCalled) + ginkgo.By("wait for NodePrepareResources call") + gomega.Eventually(kubeletPlugin.GetGRPCCalls).WithTimeout(dra.PluginClientTimeout * 2).Should(testdriver.NodePrepareResourcesCalled) // TODO: Check condition or event when implemented // see https://github.com/kubernetes/kubernetes/issues/118468 for details diff --git a/vendor/modules.txt b/vendor/modules.txt index 5fe0ab42620..fdd5396a4b0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -2262,6 +2262,7 @@ k8s.io/kubelet/pkg/apis/credentialprovider/v1alpha1 k8s.io/kubelet/pkg/apis/credentialprovider/v1beta1 k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 k8s.io/kubelet/pkg/apis/dra/v1alpha2 +k8s.io/kubelet/pkg/apis/dra/v1alpha3 k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/podresources/v1 k8s.io/kubelet/pkg/apis/podresources/v1alpha1