From 99248b14064ee473d6753cfcbaea8844977a875d Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 8 Feb 2021 16:42:51 +0100 Subject: [PATCH 01/14] e2e: reduce worker threads during mock testing We don't need much concurrency and having too many worker threads has one disadvantage (besides resource usage): when the sidecar looses the connection to the CSI driver, it calls klog.Fatal, which prints all gouroutines. This can lead to much output. --- .../testing-manifests/storage-csi/mock/csi-mock-driver.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml index 13d97fb36b5..f5696bac6e4 100644 --- a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml +++ b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml @@ -24,6 +24,9 @@ spec: - "-v=5" # Needed for fsGroup support. - "--default-fstype=ext4" + # We don't need much concurrency and having many gouroutines + # makes klog.Fatal during shutdown very long. + - "--worker-threads=5" env: - name: ADDRESS value: /csi/csi.sock From 21ffdd1a28f675a294e1157ae1fe5ac9edffa2c9 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 25 Feb 2021 13:19:50 +0100 Subject: [PATCH 02/14] test: update CSI mock driver The new release properly checks for existence of the staging directory. The names of the volumes are now the same as in https://github.com/kubernetes-csi/csi-test/blob/master/mock/example/deploy/csi-mock-driver-deployment.yaml --- .../storage-csi/mock/csi-mock-driver.yaml | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml index f5696bac6e4..7ec5f427b3e 100644 --- a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml +++ b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml @@ -53,7 +53,7 @@ spec: - mountPath: /registration name: registration-dir - name: mock - image: k8s.gcr.io/sig-storage/mock-driver:v4.0.2 + image: k8s.gcr.io/sig-storage/mock-driver:v4.1.0 args: - "--name=mock.storage.k8s.io" - "-v=3" # enabled the gRPC call logging @@ -70,11 +70,12 @@ spec: volumeMounts: - mountPath: /csi name: socket-dir + - mountPath: /etc/hooks + name: hooks - mountPath: /var/lib/kubelet/pods - mountPropagation: Bidirectional - name: mountpoint-dir - - name: hooks - mountPath: /etc/hooks + name: kubelet-pods-dir + - mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi + name: kubelet-csi-dir volumes: - hostPath: path: /var/lib/kubelet/plugins/csi-mock @@ -82,8 +83,14 @@ spec: name: socket-dir - hostPath: path: /var/lib/kubelet/pods + type: Directory + # mock driver doesn't make mounts and therefore doesn't need mount propagation. + # mountPropagation: Bidirectional + name: kubelet-pods-dir + - hostPath: + path: /var/lib/kubelet/plugins/kubernetes.io/csi type: DirectoryOrCreate - name: mountpoint-dir + name: kubelet-csi-dir - hostPath: path: /var/lib/kubelet/plugins_registry type: Directory From 7f2b438020b2eefd9c42f92bfcc3cfebd3d0d3ae Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 25 Nov 2020 08:39:44 +0100 Subject: [PATCH 03/14] e2e: import csi-test mock driver This is a verbatim copy of the corresponding files in csi-test v4.0.2. They'll be modified in future commits to make the code usable when embedded in e2e.test. Some of those changes may be worthwhile backporting to csi-test, but this is uncertain at this time. --- .../csi-test/driver/driver-controller.go | 110 +++ .../drivers/csi-test/driver/driver-node.go | 109 +++ .../storage/drivers/csi-test/driver/driver.go | 312 +++++++ .../drivers/csi-test/driver/driver.mock.go | 392 ++++++++ .../storage/drivers/csi-test/driver/mock.go | 89 ++ .../csi-test/mock/cache/SnapshotCache.go | 89 ++ .../csi-test/mock/service/controller.go | 834 ++++++++++++++++++ .../csi-test/mock/service/hooks-const.go | 24 + .../drivers/csi-test/mock/service/identity.go | 74 ++ .../drivers/csi-test/mock/service/node.go | 460 ++++++++++ .../drivers/csi-test/mock/service/service.go | 293 ++++++ 11 files changed, 2786 insertions(+) create mode 100644 test/e2e/storage/drivers/csi-test/driver/driver-controller.go create mode 100644 test/e2e/storage/drivers/csi-test/driver/driver-node.go create mode 100644 test/e2e/storage/drivers/csi-test/driver/driver.go create mode 100644 test/e2e/storage/drivers/csi-test/driver/driver.mock.go create mode 100644 test/e2e/storage/drivers/csi-test/driver/mock.go create mode 100644 test/e2e/storage/drivers/csi-test/mock/cache/SnapshotCache.go create mode 100644 test/e2e/storage/drivers/csi-test/mock/service/controller.go create mode 100644 test/e2e/storage/drivers/csi-test/mock/service/hooks-const.go create mode 100644 test/e2e/storage/drivers/csi-test/mock/service/identity.go create mode 100644 test/e2e/storage/drivers/csi-test/mock/service/node.go create mode 100644 test/e2e/storage/drivers/csi-test/mock/service/service.go diff --git a/test/e2e/storage/drivers/csi-test/driver/driver-controller.go b/test/e2e/storage/drivers/csi-test/driver/driver-controller.go new file mode 100644 index 00000000000..1d8d2bd771e --- /dev/null +++ b/test/e2e/storage/drivers/csi-test/driver/driver-controller.go @@ -0,0 +1,110 @@ +/* +Copyright 2019 Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "context" + "net" + "sync" + + "google.golang.org/grpc/reflection" + + csi "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc" +) + +// CSIDriverControllerServer is the Controller service component of the driver. +type CSIDriverControllerServer struct { + Controller csi.ControllerServer + Identity csi.IdentityServer +} + +// CSIDriverController is the CSI Driver Controller backend. +type CSIDriverController struct { + listener net.Listener + server *grpc.Server + controllerServer *CSIDriverControllerServer + wg sync.WaitGroup + running bool + lock sync.Mutex + creds *CSICreds +} + +func NewCSIDriverController(controllerServer *CSIDriverControllerServer) *CSIDriverController { + return &CSIDriverController{ + controllerServer: controllerServer, + } +} + +func (c *CSIDriverController) goServe(started chan<- bool) { + goServe(c.server, &c.wg, c.listener, started) +} + +func (c *CSIDriverController) Address() string { + return c.listener.Addr().String() +} + +func (c *CSIDriverController) Start(l net.Listener) error { + c.lock.Lock() + defer c.lock.Unlock() + + // Set listener. + c.listener = l + + // Create a new grpc server. + c.server = grpc.NewServer( + grpc.UnaryInterceptor(c.callInterceptor), + ) + + if c.controllerServer.Controller != nil { + csi.RegisterControllerServer(c.server, c.controllerServer.Controller) + } + if c.controllerServer.Identity != nil { + csi.RegisterIdentityServer(c.server, c.controllerServer.Identity) + } + + reflection.Register(c.server) + + waitForServer := make(chan bool) + c.goServe(waitForServer) + <-waitForServer + c.running = true + return nil +} + +func (c *CSIDriverController) Stop() { + stop(&c.lock, &c.wg, c.server, c.running) +} + +func (c *CSIDriverController) Close() { + c.server.Stop() +} + +func (c *CSIDriverController) IsRunning() bool { + c.lock.Lock() + defer c.lock.Unlock() + + return c.running +} + +func (c *CSIDriverController) SetDefaultCreds() { + setDefaultCreds(c.creds) +} + +func (c *CSIDriverController) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return callInterceptor(ctx, c.creds, req, info, handler) +} diff --git a/test/e2e/storage/drivers/csi-test/driver/driver-node.go b/test/e2e/storage/drivers/csi-test/driver/driver-node.go new file mode 100644 index 00000000000..7720bfc493a --- /dev/null +++ b/test/e2e/storage/drivers/csi-test/driver/driver-node.go @@ -0,0 +1,109 @@ +/* +Copyright 2019 Kubernetes Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + context "context" + "net" + "sync" + + csi "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" +) + +// CSIDriverNodeServer is the Node service component of the driver. +type CSIDriverNodeServer struct { + Node csi.NodeServer + Identity csi.IdentityServer +} + +// CSIDriverNode is the CSI Driver Node backend. +type CSIDriverNode struct { + listener net.Listener + server *grpc.Server + nodeServer *CSIDriverNodeServer + wg sync.WaitGroup + running bool + lock sync.Mutex + creds *CSICreds +} + +func NewCSIDriverNode(nodeServer *CSIDriverNodeServer) *CSIDriverNode { + return &CSIDriverNode{ + nodeServer: nodeServer, + } +} + +func (c *CSIDriverNode) goServe(started chan<- bool) { + goServe(c.server, &c.wg, c.listener, started) +} + +func (c *CSIDriverNode) Address() string { + return c.listener.Addr().String() +} + +func (c *CSIDriverNode) Start(l net.Listener) error { + c.lock.Lock() + defer c.lock.Unlock() + + // Set listener. + c.listener = l + + // Create a new grpc server. + c.server = grpc.NewServer( + grpc.UnaryInterceptor(c.callInterceptor), + ) + + if c.nodeServer.Node != nil { + csi.RegisterNodeServer(c.server, c.nodeServer.Node) + } + if c.nodeServer.Identity != nil { + csi.RegisterIdentityServer(c.server, c.nodeServer.Identity) + } + + reflection.Register(c.server) + + waitForServer := make(chan bool) + c.goServe(waitForServer) + <-waitForServer + c.running = true + return nil +} + +func (c *CSIDriverNode) Stop() { + stop(&c.lock, &c.wg, c.server, c.running) +} + +func (c *CSIDriverNode) Close() { + c.server.Stop() +} + +func (c *CSIDriverNode) IsRunning() bool { + c.lock.Lock() + defer c.lock.Unlock() + + return c.running +} + +func (c *CSIDriverNode) SetDefaultCreds() { + setDefaultCreds(c.creds) +} + +func (c *CSIDriverNode) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return callInterceptor(ctx, c.creds, req, info, handler) +} diff --git a/test/e2e/storage/drivers/csi-test/driver/driver.go b/test/e2e/storage/drivers/csi-test/driver/driver.go new file mode 100644 index 00000000000..33ffe99359d --- /dev/null +++ b/test/e2e/storage/drivers/csi-test/driver/driver.go @@ -0,0 +1,312 @@ +/* +Copyright 2017 Luis Pabón luis@portworx.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +//go:generate mockgen -package=driver -destination=driver.mock.go github.com/container-storage-interface/spec/lib/go/csi IdentityServer,ControllerServer,NodeServer + +package driver + +import ( + "context" + "encoding/json" + "errors" + "net" + "sync" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "k8s.io/klog" + + "github.com/container-storage-interface/spec/lib/go/csi" + "google.golang.org/grpc" + "google.golang.org/grpc/reflection" +) + +var ( + // ErrNoCredentials is the error when a secret is enabled but not passed in the request. + ErrNoCredentials = errors.New("secret must be provided") + // ErrAuthFailed is the error when the secret is incorrect. + ErrAuthFailed = errors.New("authentication failed") +) + +// CSIDriverServers is a unified driver component with both Controller and Node +// services. +type CSIDriverServers struct { + Controller csi.ControllerServer + Identity csi.IdentityServer + Node csi.NodeServer +} + +// This is the key name in all the CSI secret objects. +const secretField = "secretKey" + +// CSICreds is a driver specific secret type. Drivers can have a key-val pair of +// secrets. This mock driver has a single string secret with secretField as the +// key. +type CSICreds struct { + CreateVolumeSecret string + DeleteVolumeSecret string + ControllerPublishVolumeSecret string + ControllerUnpublishVolumeSecret string + NodeStageVolumeSecret string + NodePublishVolumeSecret string + CreateSnapshotSecret string + DeleteSnapshotSecret string + ControllerValidateVolumeCapabilitiesSecret string +} + +type CSIDriver struct { + listener net.Listener + server *grpc.Server + servers *CSIDriverServers + wg sync.WaitGroup + running bool + lock sync.Mutex + creds *CSICreds +} + +func NewCSIDriver(servers *CSIDriverServers) *CSIDriver { + return &CSIDriver{ + servers: servers, + } +} + +func (c *CSIDriver) goServe(started chan<- bool) { + goServe(c.server, &c.wg, c.listener, started) +} + +func (c *CSIDriver) Address() string { + return c.listener.Addr().String() +} +func (c *CSIDriver) Start(l net.Listener) error { + c.lock.Lock() + defer c.lock.Unlock() + + // Set listener + c.listener = l + + // Create a new grpc server + c.server = grpc.NewServer( + grpc.UnaryInterceptor(c.callInterceptor), + ) + + // Register Mock servers + if c.servers.Controller != nil { + csi.RegisterControllerServer(c.server, c.servers.Controller) + } + if c.servers.Identity != nil { + csi.RegisterIdentityServer(c.server, c.servers.Identity) + } + if c.servers.Node != nil { + csi.RegisterNodeServer(c.server, c.servers.Node) + } + reflection.Register(c.server) + + // Start listening for requests + waitForServer := make(chan bool) + c.goServe(waitForServer) + <-waitForServer + c.running = true + return nil +} + +func (c *CSIDriver) Stop() { + stop(&c.lock, &c.wg, c.server, c.running) +} + +func (c *CSIDriver) Close() { + c.server.Stop() +} + +func (c *CSIDriver) IsRunning() bool { + c.lock.Lock() + defer c.lock.Unlock() + + return c.running +} + +// SetDefaultCreds sets the default secrets for CSI creds. +func (c *CSIDriver) SetDefaultCreds() { + setDefaultCreds(c.creds) +} + +func (c *CSIDriver) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return callInterceptor(ctx, c.creds, req, info, handler) +} + +// goServe starts a grpc server. +func goServe(server *grpc.Server, wg *sync.WaitGroup, listener net.Listener, started chan<- bool) { + wg.Add(1) + go func() { + defer wg.Done() + started <- true + err := server.Serve(listener) + if err != nil { + panic(err.Error()) + } + }() +} + +// stop stops a grpc server. +func stop(lock *sync.Mutex, wg *sync.WaitGroup, server *grpc.Server, running bool) { + lock.Lock() + defer lock.Unlock() + + if !running { + return + } + + server.Stop() + wg.Wait() +} + +// setDefaultCreds sets the default credentials, given a CSICreds instance. +func setDefaultCreds(creds *CSICreds) { + creds = &CSICreds{ + CreateVolumeSecret: "secretval1", + DeleteVolumeSecret: "secretval2", + ControllerPublishVolumeSecret: "secretval3", + ControllerUnpublishVolumeSecret: "secretval4", + NodeStageVolumeSecret: "secretval5", + NodePublishVolumeSecret: "secretval6", + CreateSnapshotSecret: "secretval7", + DeleteSnapshotSecret: "secretval8", + ControllerValidateVolumeCapabilitiesSecret: "secretval9", + } +} + +func callInterceptor(ctx context.Context, creds *CSICreds, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + err := authInterceptor(creds, req) + if err != nil { + logGRPC(info.FullMethod, req, nil, err) + return nil, err + } + rsp, err := handler(ctx, req) + logGRPC(info.FullMethod, req, rsp, err) + return rsp, err +} + +func authInterceptor(creds *CSICreds, req interface{}) error { + if creds != nil { + authenticated, authErr := isAuthenticated(req, creds) + if !authenticated { + if authErr == ErrNoCredentials { + return status.Error(codes.InvalidArgument, authErr.Error()) + } + if authErr == ErrAuthFailed { + return status.Error(codes.Unauthenticated, authErr.Error()) + } + } + } + return nil +} + +func logGRPC(method string, request, reply interface{}, err error) { + // Log JSON with the request and response for easier parsing + logMessage := struct { + Method string + Request interface{} + Response interface{} + // Error as string, for backward compatibility. + // "" on no error. + Error string + // Full error dump, to be able to parse out full gRPC error code and message separately in a test. + FullError error + }{ + Method: method, + Request: request, + Response: reply, + FullError: err, + } + + if err != nil { + logMessage.Error = err.Error() + } + + msg, _ := json.Marshal(logMessage) + klog.V(3).Infof("gRPCCall: %s\n", msg) +} + +func isAuthenticated(req interface{}, creds *CSICreds) (bool, error) { + switch r := req.(type) { + case *csi.CreateVolumeRequest: + return authenticateCreateVolume(r, creds) + case *csi.DeleteVolumeRequest: + return authenticateDeleteVolume(r, creds) + case *csi.ControllerPublishVolumeRequest: + return authenticateControllerPublishVolume(r, creds) + case *csi.ControllerUnpublishVolumeRequest: + return authenticateControllerUnpublishVolume(r, creds) + case *csi.NodeStageVolumeRequest: + return authenticateNodeStageVolume(r, creds) + case *csi.NodePublishVolumeRequest: + return authenticateNodePublishVolume(r, creds) + case *csi.CreateSnapshotRequest: + return authenticateCreateSnapshot(r, creds) + case *csi.DeleteSnapshotRequest: + return authenticateDeleteSnapshot(r, creds) + case *csi.ValidateVolumeCapabilitiesRequest: + return authenticateControllerValidateVolumeCapabilities(r, creds) + default: + return true, nil + } +} + +func authenticateCreateVolume(req *csi.CreateVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.CreateVolumeSecret) +} + +func authenticateDeleteVolume(req *csi.DeleteVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.DeleteVolumeSecret) +} + +func authenticateControllerPublishVolume(req *csi.ControllerPublishVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.ControllerPublishVolumeSecret) +} + +func authenticateControllerUnpublishVolume(req *csi.ControllerUnpublishVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.ControllerUnpublishVolumeSecret) +} + +func authenticateNodeStageVolume(req *csi.NodeStageVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.NodeStageVolumeSecret) +} + +func authenticateNodePublishVolume(req *csi.NodePublishVolumeRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.NodePublishVolumeSecret) +} + +func authenticateCreateSnapshot(req *csi.CreateSnapshotRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.CreateSnapshotSecret) +} + +func authenticateDeleteSnapshot(req *csi.DeleteSnapshotRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.DeleteSnapshotSecret) +} + +func authenticateControllerValidateVolumeCapabilities(req *csi.ValidateVolumeCapabilitiesRequest, creds *CSICreds) (bool, error) { + return credsCheck(req.GetSecrets(), creds.ControllerValidateVolumeCapabilitiesSecret) +} + +func credsCheck(secrets map[string]string, secretVal string) (bool, error) { + if len(secrets) == 0 { + return false, ErrNoCredentials + } + + if secrets[secretField] != secretVal { + return false, ErrAuthFailed + } + return true, nil +} diff --git a/test/e2e/storage/drivers/csi-test/driver/driver.mock.go b/test/e2e/storage/drivers/csi-test/driver/driver.mock.go new file mode 100644 index 00000000000..7eeaca0f022 --- /dev/null +++ b/test/e2e/storage/drivers/csi-test/driver/driver.mock.go @@ -0,0 +1,392 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/container-storage-interface/spec/lib/go/csi (interfaces: IdentityServer,ControllerServer,NodeServer) + +// Package driver is a generated GoMock package. +package driver + +import ( + context "context" + csi "github.com/container-storage-interface/spec/lib/go/csi" + gomock "github.com/golang/mock/gomock" + reflect "reflect" +) + +// MockIdentityServer is a mock of IdentityServer interface +type MockIdentityServer struct { + ctrl *gomock.Controller + recorder *MockIdentityServerMockRecorder +} + +// MockIdentityServerMockRecorder is the mock recorder for MockIdentityServer +type MockIdentityServerMockRecorder struct { + mock *MockIdentityServer +} + +// NewMockIdentityServer creates a new mock instance +func NewMockIdentityServer(ctrl *gomock.Controller) *MockIdentityServer { + mock := &MockIdentityServer{ctrl: ctrl} + mock.recorder = &MockIdentityServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockIdentityServer) EXPECT() *MockIdentityServerMockRecorder { + return m.recorder +} + +// GetPluginCapabilities mocks base method +func (m *MockIdentityServer) GetPluginCapabilities(arg0 context.Context, arg1 *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "GetPluginCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.GetPluginCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPluginCapabilities indicates an expected call of GetPluginCapabilities +func (mr *MockIdentityServerMockRecorder) GetPluginCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginCapabilities", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginCapabilities), arg0, arg1) +} + +// GetPluginInfo mocks base method +func (m *MockIdentityServer) GetPluginInfo(arg0 context.Context, arg1 *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { + ret := m.ctrl.Call(m, "GetPluginInfo", arg0, arg1) + ret0, _ := ret[0].(*csi.GetPluginInfoResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPluginInfo indicates an expected call of GetPluginInfo +func (mr *MockIdentityServerMockRecorder) GetPluginInfo(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPluginInfo", reflect.TypeOf((*MockIdentityServer)(nil).GetPluginInfo), arg0, arg1) +} + +// Probe mocks base method +func (m *MockIdentityServer) Probe(arg0 context.Context, arg1 *csi.ProbeRequest) (*csi.ProbeResponse, error) { + ret := m.ctrl.Call(m, "Probe", arg0, arg1) + ret0, _ := ret[0].(*csi.ProbeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Probe indicates an expected call of Probe +func (mr *MockIdentityServerMockRecorder) Probe(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Probe", reflect.TypeOf((*MockIdentityServer)(nil).Probe), arg0, arg1) +} + +// MockControllerServer is a mock of ControllerServer interface +type MockControllerServer struct { + ctrl *gomock.Controller + recorder *MockControllerServerMockRecorder +} + +// MockControllerServerMockRecorder is the mock recorder for MockControllerServer +type MockControllerServerMockRecorder struct { + mock *MockControllerServer +} + +// NewMockControllerServer creates a new mock instance +func NewMockControllerServer(ctrl *gomock.Controller) *MockControllerServer { + mock := &MockControllerServer{ctrl: ctrl} + mock.recorder = &MockControllerServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockControllerServer) EXPECT() *MockControllerServerMockRecorder { + return m.recorder +} + +// ControllerExpandVolume mocks base method +func (m *MockControllerServer) ControllerExpandVolume(arg0 context.Context, arg1 *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { + ret := m.ctrl.Call(m, "ControllerExpandVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerExpandVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerExpandVolume indicates an expected call of ControllerExpandVolume +func (mr *MockControllerServerMockRecorder) ControllerExpandVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerExpandVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerExpandVolume), arg0, arg1) +} + +// ControllerGetCapabilities mocks base method +func (m *MockControllerServer) ControllerGetCapabilities(arg0 context.Context, arg1 *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "ControllerGetCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerGetCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerGetCapabilities indicates an expected call of ControllerGetCapabilities +func (mr *MockControllerServerMockRecorder) ControllerGetCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerGetCapabilities", reflect.TypeOf((*MockControllerServer)(nil).ControllerGetCapabilities), arg0, arg1) +} + +// ControllerPublishVolume mocks base method +func (m *MockControllerServer) ControllerPublishVolume(arg0 context.Context, arg1 *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "ControllerPublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerPublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerPublishVolume indicates an expected call of ControllerPublishVolume +func (mr *MockControllerServerMockRecorder) ControllerPublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerPublishVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerPublishVolume), arg0, arg1) +} + +// ControllerUnpublishVolume mocks base method +func (m *MockControllerServer) ControllerUnpublishVolume(arg0 context.Context, arg1 *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "ControllerUnpublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerUnpublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerUnpublishVolume indicates an expected call of ControllerUnpublishVolume +func (mr *MockControllerServerMockRecorder) ControllerUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerUnpublishVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerUnpublishVolume), arg0, arg1) +} + +// CreateSnapshot mocks base method +func (m *MockControllerServer) CreateSnapshot(arg0 context.Context, arg1 *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { + ret := m.ctrl.Call(m, "CreateSnapshot", arg0, arg1) + ret0, _ := ret[0].(*csi.CreateSnapshotResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateSnapshot indicates an expected call of CreateSnapshot +func (mr *MockControllerServerMockRecorder) CreateSnapshot(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateSnapshot", reflect.TypeOf((*MockControllerServer)(nil).CreateSnapshot), arg0, arg1) +} + +// CreateVolume mocks base method +func (m *MockControllerServer) CreateVolume(arg0 context.Context, arg1 *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { + ret := m.ctrl.Call(m, "CreateVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.CreateVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateVolume indicates an expected call of CreateVolume +func (mr *MockControllerServerMockRecorder) CreateVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateVolume", reflect.TypeOf((*MockControllerServer)(nil).CreateVolume), arg0, arg1) +} + +// DeleteSnapshot mocks base method +func (m *MockControllerServer) DeleteSnapshot(arg0 context.Context, arg1 *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { + ret := m.ctrl.Call(m, "DeleteSnapshot", arg0, arg1) + ret0, _ := ret[0].(*csi.DeleteSnapshotResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteSnapshot indicates an expected call of DeleteSnapshot +func (mr *MockControllerServerMockRecorder) DeleteSnapshot(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteSnapshot", reflect.TypeOf((*MockControllerServer)(nil).DeleteSnapshot), arg0, arg1) +} + +// DeleteVolume mocks base method +func (m *MockControllerServer) DeleteVolume(arg0 context.Context, arg1 *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { + ret := m.ctrl.Call(m, "DeleteVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.DeleteVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteVolume indicates an expected call of DeleteVolume +func (mr *MockControllerServerMockRecorder) DeleteVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteVolume", reflect.TypeOf((*MockControllerServer)(nil).DeleteVolume), arg0, arg1) +} + +// GetCapacity mocks base method +func (m *MockControllerServer) GetCapacity(arg0 context.Context, arg1 *csi.GetCapacityRequest) (*csi.GetCapacityResponse, error) { + ret := m.ctrl.Call(m, "GetCapacity", arg0, arg1) + ret0, _ := ret[0].(*csi.GetCapacityResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetCapacity indicates an expected call of GetCapacity +func (mr *MockControllerServerMockRecorder) GetCapacity(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCapacity", reflect.TypeOf((*MockControllerServer)(nil).GetCapacity), arg0, arg1) +} + +// ListSnapshots mocks base method +func (m *MockControllerServer) ListSnapshots(arg0 context.Context, arg1 *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + ret := m.ctrl.Call(m, "ListSnapshots", arg0, arg1) + ret0, _ := ret[0].(*csi.ListSnapshotsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListSnapshots indicates an expected call of ListSnapshots +func (mr *MockControllerServerMockRecorder) ListSnapshots(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListSnapshots", reflect.TypeOf((*MockControllerServer)(nil).ListSnapshots), arg0, arg1) +} + +// ListVolumes mocks base method +func (m *MockControllerServer) ListVolumes(arg0 context.Context, arg1 *csi.ListVolumesRequest) (*csi.ListVolumesResponse, error) { + ret := m.ctrl.Call(m, "ListVolumes", arg0, arg1) + ret0, _ := ret[0].(*csi.ListVolumesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (m *MockControllerServer) ControllerGetVolume(arg0 context.Context, arg1 *csi.ControllerGetVolumeRequest) (*csi.ControllerGetVolumeResponse, error) { + ret := m.ctrl.Call(m, "ControllerGetVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.ControllerGetVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ControllerGetVolume indicates an expected call of ControllerGetVolume +func (mr *MockControllerServerMockRecorder) ControllerGetVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ControllerGetVolume", reflect.TypeOf((*MockControllerServer)(nil).ControllerGetVolume), arg0, arg1) +} + +// ListVolumes indicates an expected call of ListVolumes +func (mr *MockControllerServerMockRecorder) ListVolumes(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVolumes", reflect.TypeOf((*MockControllerServer)(nil).ListVolumes), arg0, arg1) +} + +// ValidateVolumeCapabilities mocks base method +func (m *MockControllerServer) ValidateVolumeCapabilities(arg0 context.Context, arg1 *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "ValidateVolumeCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.ValidateVolumeCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ValidateVolumeCapabilities indicates an expected call of ValidateVolumeCapabilities +func (mr *MockControllerServerMockRecorder) ValidateVolumeCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ValidateVolumeCapabilities", reflect.TypeOf((*MockControllerServer)(nil).ValidateVolumeCapabilities), arg0, arg1) +} + +// MockNodeServer is a mock of NodeServer interface +type MockNodeServer struct { + ctrl *gomock.Controller + recorder *MockNodeServerMockRecorder +} + +// MockNodeServerMockRecorder is the mock recorder for MockNodeServer +type MockNodeServerMockRecorder struct { + mock *MockNodeServer +} + +// NewMockNodeServer creates a new mock instance +func NewMockNodeServer(ctrl *gomock.Controller) *MockNodeServer { + mock := &MockNodeServer{ctrl: ctrl} + mock.recorder = &MockNodeServerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockNodeServer) EXPECT() *MockNodeServerMockRecorder { + return m.recorder +} + +// NodeExpandVolume mocks base method +func (m *MockNodeServer) NodeExpandVolume(arg0 context.Context, arg1 *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeExpandVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeExpandVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeExpandVolume indicates an expected call of NodeExpandVolume +func (mr *MockNodeServerMockRecorder) NodeExpandVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeExpandVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeExpandVolume), arg0, arg1) +} + +// NodeGetCapabilities mocks base method +func (m *MockNodeServer) NodeGetCapabilities(arg0 context.Context, arg1 *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { + ret := m.ctrl.Call(m, "NodeGetCapabilities", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetCapabilitiesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeGetCapabilities indicates an expected call of NodeGetCapabilities +func (mr *MockNodeServerMockRecorder) NodeGetCapabilities(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetCapabilities", reflect.TypeOf((*MockNodeServer)(nil).NodeGetCapabilities), arg0, arg1) +} + +// NodeGetInfo mocks base method +func (m *MockNodeServer) NodeGetInfo(arg0 context.Context, arg1 *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { + ret := m.ctrl.Call(m, "NodeGetInfo", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetInfoResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeGetInfo indicates an expected call of NodeGetInfo +func (mr *MockNodeServerMockRecorder) NodeGetInfo(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetInfo", reflect.TypeOf((*MockNodeServer)(nil).NodeGetInfo), arg0, arg1) +} + +// NodeGetVolumeStats mocks base method +func (m *MockNodeServer) NodeGetVolumeStats(arg0 context.Context, arg1 *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { + ret := m.ctrl.Call(m, "NodeGetVolumeStats", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeGetVolumeStatsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeGetVolumeStats indicates an expected call of NodeGetVolumeStats +func (mr *MockNodeServerMockRecorder) NodeGetVolumeStats(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeGetVolumeStats", reflect.TypeOf((*MockNodeServer)(nil).NodeGetVolumeStats), arg0, arg1) +} + +// NodePublishVolume mocks base method +func (m *MockNodeServer) NodePublishVolume(arg0 context.Context, arg1 *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodePublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodePublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodePublishVolume indicates an expected call of NodePublishVolume +func (mr *MockNodeServerMockRecorder) NodePublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodePublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodePublishVolume), arg0, arg1) +} + +// NodeStageVolume mocks base method +func (m *MockNodeServer) NodeStageVolume(arg0 context.Context, arg1 *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeStageVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeStageVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeStageVolume indicates an expected call of NodeStageVolume +func (mr *MockNodeServerMockRecorder) NodeStageVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeStageVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeStageVolume), arg0, arg1) +} + +// NodeUnpublishVolume mocks base method +func (m *MockNodeServer) NodeUnpublishVolume(arg0 context.Context, arg1 *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeUnpublishVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeUnpublishVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeUnpublishVolume indicates an expected call of NodeUnpublishVolume +func (mr *MockNodeServerMockRecorder) NodeUnpublishVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnpublishVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnpublishVolume), arg0, arg1) +} + +// NodeUnstageVolume mocks base method +func (m *MockNodeServer) NodeUnstageVolume(arg0 context.Context, arg1 *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) { + ret := m.ctrl.Call(m, "NodeUnstageVolume", arg0, arg1) + ret0, _ := ret[0].(*csi.NodeUnstageVolumeResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// NodeUnstageVolume indicates an expected call of NodeUnstageVolume +func (mr *MockNodeServerMockRecorder) NodeUnstageVolume(arg0, arg1 interface{}) *gomock.Call { + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeUnstageVolume", reflect.TypeOf((*MockNodeServer)(nil).NodeUnstageVolume), arg0, arg1) +} diff --git a/test/e2e/storage/drivers/csi-test/driver/mock.go b/test/e2e/storage/drivers/csi-test/driver/mock.go new file mode 100644 index 00000000000..7e2b5020104 --- /dev/null +++ b/test/e2e/storage/drivers/csi-test/driver/mock.go @@ -0,0 +1,89 @@ +/* +Copyright 2017 Luis Pabón luis@portworx.com + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package driver + +import ( + "net" + + "github.com/kubernetes-csi/csi-test/v4/utils" + "google.golang.org/grpc" +) + +type MockCSIDriverServers struct { + Controller *MockControllerServer + Identity *MockIdentityServer + Node *MockNodeServer +} + +type MockCSIDriver struct { + CSIDriver + conn *grpc.ClientConn +} + +func NewMockCSIDriver(servers *MockCSIDriverServers) *MockCSIDriver { + return &MockCSIDriver{ + CSIDriver: CSIDriver{ + servers: &CSIDriverServers{ + Controller: servers.Controller, + Node: servers.Node, + Identity: servers.Identity, + }, + }, + } +} + +// StartOnAddress starts a new gRPC server listening on given address. +func (m *MockCSIDriver) StartOnAddress(network, address string) error { + l, err := net.Listen(network, address) + if err != nil { + return err + } + + if err := m.CSIDriver.Start(l); err != nil { + l.Close() + return err + } + + return nil +} + +// Start starts a new gRPC server listening on a random TCP loopback port. +func (m *MockCSIDriver) Start() error { + // Listen on a port assigned by the net package + return m.StartOnAddress("tcp", "127.0.0.1:0") +} + +func (m *MockCSIDriver) Nexus() (*grpc.ClientConn, error) { + // Start server + err := m.Start() + if err != nil { + return nil, err + } + + // Create a client connection + m.conn, err = utils.Connect(m.Address(), grpc.WithInsecure()) + if err != nil { + return nil, err + } + + return m.conn, nil +} + +func (m *MockCSIDriver) Close() { + m.conn.Close() + m.server.Stop() +} diff --git a/test/e2e/storage/drivers/csi-test/mock/cache/SnapshotCache.go b/test/e2e/storage/drivers/csi-test/mock/cache/SnapshotCache.go new file mode 100644 index 00000000000..89835e11f20 --- /dev/null +++ b/test/e2e/storage/drivers/csi-test/mock/cache/SnapshotCache.go @@ -0,0 +1,89 @@ +package cache + +import ( + "strings" + "sync" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +type SnapshotCache interface { + Add(snapshot Snapshot) + + Delete(i int) + + List(ready bool) []csi.Snapshot + + FindSnapshot(k, v string) (int, Snapshot) +} + +type Snapshot struct { + Name string + Parameters map[string]string + SnapshotCSI csi.Snapshot +} + +type snapshotCache struct { + snapshotsRWL sync.RWMutex + snapshots []Snapshot +} + +func NewSnapshotCache() SnapshotCache { + return &snapshotCache{ + snapshots: make([]Snapshot, 0), + } +} + +func (snap *snapshotCache) Add(snapshot Snapshot) { + snap.snapshotsRWL.Lock() + defer snap.snapshotsRWL.Unlock() + + snap.snapshots = append(snap.snapshots, snapshot) +} + +func (snap *snapshotCache) Delete(i int) { + snap.snapshotsRWL.Lock() + defer snap.snapshotsRWL.Unlock() + + copy(snap.snapshots[i:], snap.snapshots[i+1:]) + snap.snapshots = snap.snapshots[:len(snap.snapshots)-1] +} + +func (snap *snapshotCache) List(ready bool) []csi.Snapshot { + snap.snapshotsRWL.RLock() + defer snap.snapshotsRWL.RUnlock() + + snapshots := make([]csi.Snapshot, 0) + for _, v := range snap.snapshots { + if v.SnapshotCSI.GetReadyToUse() { + snapshots = append(snapshots, v.SnapshotCSI) + } + } + + return snapshots +} + +func (snap *snapshotCache) FindSnapshot(k, v string) (int, Snapshot) { + snap.snapshotsRWL.RLock() + defer snap.snapshotsRWL.RUnlock() + + snapshotIdx := -1 + for i, vi := range snap.snapshots { + switch k { + case "id": + if strings.EqualFold(v, vi.SnapshotCSI.GetSnapshotId()) { + return i, vi + } + case "sourceVolumeId": + if strings.EqualFold(v, vi.SnapshotCSI.SourceVolumeId) { + return i, vi + } + case "name": + if vi.Name == v { + return i, vi + } + } + } + + return snapshotIdx, Snapshot{} +} diff --git a/test/e2e/storage/drivers/csi-test/mock/service/controller.go b/test/e2e/storage/drivers/csi-test/mock/service/controller.go new file mode 100644 index 00000000000..a8192fedc0e --- /dev/null +++ b/test/e2e/storage/drivers/csi-test/mock/service/controller.go @@ -0,0 +1,834 @@ +package service + +import ( + "fmt" + "math" + "path" + "reflect" + "strconv" + + "github.com/container-storage-interface/spec/lib/go/csi" + log "github.com/sirupsen/logrus" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + MaxStorageCapacity = tib + ReadOnlyKey = "readonly" +) + +func (s *service) CreateVolume( + ctx context.Context, + req *csi.CreateVolumeRequest) ( + *csi.CreateVolumeResponse, error) { + + if len(req.Name) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume Name cannot be empty") + } + if req.VolumeCapabilities == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty") + } + if hookVal, hookMsg := s.execHook("CreateVolumeStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + // Check to see if the volume already exists. + if i, v := s.findVolByName(ctx, req.Name); i >= 0 { + // Requested volume name already exists, need to check if the existing volume's + // capacity is more or equal to new request's capacity. + if v.GetCapacityBytes() < req.GetCapacityRange().GetRequiredBytes() { + return nil, status.Error(codes.AlreadyExists, + fmt.Sprintf("Volume with name %s already exists", req.GetName())) + } + return &csi.CreateVolumeResponse{Volume: &v}, nil + } + + // If no capacity is specified then use 100GiB + capacity := gib100 + if cr := req.CapacityRange; cr != nil { + if rb := cr.RequiredBytes; rb > 0 { + capacity = rb + } + if lb := cr.LimitBytes; lb > 0 { + capacity = lb + } + } + // Check for maximum available capacity + if capacity >= MaxStorageCapacity { + return nil, status.Errorf(codes.OutOfRange, "Requested capacity %d exceeds maximum allowed %d", capacity, MaxStorageCapacity) + } + + var v csi.Volume + // Create volume from content source if provided. + if req.GetVolumeContentSource() != nil { + switch req.GetVolumeContentSource().GetType().(type) { + case *csi.VolumeContentSource_Snapshot: + sid := req.GetVolumeContentSource().GetSnapshot().GetSnapshotId() + // Check if the source snapshot exists. + if snapID, _ := s.snapshots.FindSnapshot("id", sid); snapID >= 0 { + v = s.newVolumeFromSnapshot(req.Name, capacity, snapID) + } else { + return nil, status.Errorf(codes.NotFound, "Requested source snapshot %s not found", sid) + } + case *csi.VolumeContentSource_Volume: + vid := req.GetVolumeContentSource().GetVolume().GetVolumeId() + // Check if the source volume exists. + if volID, _ := s.findVolNoLock("id", vid); volID >= 0 { + v = s.newVolumeFromVolume(req.Name, capacity, volID) + } else { + return nil, status.Errorf(codes.NotFound, "Requested source volume %s not found", vid) + } + } + } else { + v = s.newVolume(req.Name, capacity) + } + + // Add the created volume to the service's in-mem volume slice. + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + s.vols = append(s.vols, v) + MockVolumes[v.GetVolumeId()] = Volume{ + VolumeCSI: v, + NodeID: "", + ISStaged: false, + ISPublished: false, + StageTargetPath: "", + TargetPath: "", + } + + if hookVal, hookMsg := s.execHook("CreateVolumeEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return &csi.CreateVolumeResponse{Volume: &v}, nil +} + +func (s *service) DeleteVolume( + ctx context.Context, + req *csi.DeleteVolumeRequest) ( + *csi.DeleteVolumeResponse, error) { + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + // If the volume is not specified, return error + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if hookVal, hookMsg := s.execHook("DeleteVolumeStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + // If the volume does not exist then return an idempotent response. + i, _ := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return &csi.DeleteVolumeResponse{}, nil + } + + // This delete logic preserves order and prevents potential memory + // leaks. The slice's elements may not be pointers, but the structs + // themselves have fields that are. + copy(s.vols[i:], s.vols[i+1:]) + s.vols[len(s.vols)-1] = csi.Volume{} + s.vols = s.vols[:len(s.vols)-1] + log.WithField("volumeID", req.VolumeId).Debug("mock delete volume") + + if hookVal, hookMsg := s.execHook("DeleteVolumeEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + return &csi.DeleteVolumeResponse{}, nil +} + +func (s *service) ControllerPublishVolume( + ctx context.Context, + req *csi.ControllerPublishVolumeRequest) ( + *csi.ControllerPublishVolumeResponse, error) { + + if s.config.DisableAttach { + return nil, status.Error(codes.Unimplemented, "ControllerPublish is not supported") + } + + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + if len(req.NodeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Node ID cannot be empty") + } + if req.VolumeCapability == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty") + } + + if req.NodeId != s.nodeID { + return nil, status.Errorf(codes.NotFound, "Not matching Node ID %s to Mock Node ID %s", req.NodeId, s.nodeID) + } + + if hookVal, hookMsg := s.execHook("ControllerPublishVolumeStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // devPathKey is the key in the volume's attributes that is set to a + // mock device path if the volume has been published by the controller + // to the specified node. + devPathKey := path.Join(req.NodeId, "dev") + + // Check to see if the volume is already published. + if device := v.VolumeContext[devPathKey]; device != "" { + var volRo bool + var roVal string + if ro, ok := v.VolumeContext[ReadOnlyKey]; ok { + roVal = ro + } + + if roVal == "true" { + volRo = true + } else { + volRo = false + } + + // Check if readonly flag is compatible with the publish request. + if req.GetReadonly() != volRo { + return nil, status.Error(codes.AlreadyExists, "Volume published but has incompatible readonly flag") + } + + return &csi.ControllerPublishVolumeResponse{ + PublishContext: map[string]string{ + "device": device, + "readonly": roVal, + }, + }, nil + } + + // Check attach limit before publishing only if attach limit is set. + if s.config.AttachLimit > 0 && s.getAttachCount(devPathKey) >= s.config.AttachLimit { + return nil, status.Errorf(codes.ResourceExhausted, "Cannot attach any more volumes to this node") + } + + var roVal string + if req.GetReadonly() { + roVal = "true" + } else { + roVal = "false" + } + + // Publish the volume. + device := "/dev/mock" + v.VolumeContext[devPathKey] = device + v.VolumeContext[ReadOnlyKey] = roVal + s.vols[i] = v + + if volInfo, ok := MockVolumes[req.VolumeId]; ok { + volInfo.ISControllerPublished = true + MockVolumes[req.VolumeId] = volInfo + } + + if hookVal, hookMsg := s.execHook("ControllerPublishVolumeEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return &csi.ControllerPublishVolumeResponse{ + PublishContext: map[string]string{ + "device": device, + "readonly": roVal, + }, + }, nil +} + +func (s *service) ControllerUnpublishVolume( + ctx context.Context, + req *csi.ControllerUnpublishVolumeRequest) ( + *csi.ControllerUnpublishVolumeResponse, error) { + + if s.config.DisableAttach { + return nil, status.Error(codes.Unimplemented, "ControllerPublish is not supported") + } + + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + nodeID := req.NodeId + if len(nodeID) == 0 { + // If node id is empty, no failure as per Spec + nodeID = s.nodeID + } + + if req.NodeId != s.nodeID { + return nil, status.Errorf(codes.NotFound, "Node ID %s does not match to expected Node ID %s", req.NodeId, s.nodeID) + } + + if hookVal, hookMsg := s.execHook("ControllerUnpublishVolumeStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + // Not an error: a non-existent volume is not published. + // See also https://github.com/kubernetes-csi/external-attacher/pull/165 + return &csi.ControllerUnpublishVolumeResponse{}, nil + } + + // devPathKey is the key in the volume's attributes that is set to a + // mock device path if the volume has been published by the controller + // to the specified node. + devPathKey := path.Join(nodeID, "dev") + + // Check to see if the volume is already unpublished. + if v.VolumeContext[devPathKey] == "" { + return &csi.ControllerUnpublishVolumeResponse{}, nil + } + + // Unpublish the volume. + delete(v.VolumeContext, devPathKey) + delete(v.VolumeContext, ReadOnlyKey) + s.vols[i] = v + + if hookVal, hookMsg := s.execHook("ControllerUnpublishVolumeEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return &csi.ControllerUnpublishVolumeResponse{}, nil +} + +func (s *service) ValidateVolumeCapabilities( + ctx context.Context, + req *csi.ValidateVolumeCapabilitiesRequest) ( + *csi.ValidateVolumeCapabilitiesResponse, error) { + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + if len(req.VolumeCapabilities) == 0 { + return nil, status.Error(codes.InvalidArgument, req.VolumeId) + } + i, _ := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + if hookVal, hookMsg := s.execHook("ValidateVolumeCapabilities"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return &csi.ValidateVolumeCapabilitiesResponse{ + Confirmed: &csi.ValidateVolumeCapabilitiesResponse_Confirmed{ + VolumeContext: req.GetVolumeContext(), + VolumeCapabilities: req.GetVolumeCapabilities(), + Parameters: req.GetParameters(), + }, + }, nil +} + +func (s *service) ControllerGetVolume( + ctx context.Context, + req *csi.ControllerGetVolumeRequest) ( + *csi.ControllerGetVolumeResponse, error) { + + if hookVal, hookMsg := s.execHook("GetVolumeStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + resp := &csi.ControllerGetVolumeResponse{ + Status: &csi.ControllerGetVolumeResponse_VolumeStatus{ + VolumeCondition: &csi.VolumeCondition{}, + }, + } + i, v := s.findVolByID(ctx, req.VolumeId) + if i < 0 { + resp.Status.VolumeCondition.Abnormal = true + resp.Status.VolumeCondition.Message = "volume not found" + return resp, status.Error(codes.NotFound, req.VolumeId) + } + + resp.Volume = &v + if !s.config.DisableAttach { + resp.Status.PublishedNodeIds = []string{ + s.nodeID, + } + } + + if hookVal, hookMsg := s.execHook("GetVolumeEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return resp, nil +} + +func (s *service) ListVolumes( + ctx context.Context, + req *csi.ListVolumesRequest) ( + *csi.ListVolumesResponse, error) { + + if hookVal, hookMsg := s.execHook("ListVolumesStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + // Copy the mock volumes into a new slice in order to avoid + // locking the service's volume slice for the duration of the + // ListVolumes RPC. + var vols []csi.Volume + func() { + s.volsRWL.RLock() + defer s.volsRWL.RUnlock() + vols = make([]csi.Volume, len(s.vols)) + copy(vols, s.vols) + }() + + var ( + ulenVols = int32(len(vols)) + maxEntries = req.MaxEntries + startingToken int32 + ) + + if v := req.StartingToken; v != "" { + i, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d !< int32=%d", + startingToken, math.MaxUint32) + } + startingToken = int32(i) + } + + if startingToken > ulenVols { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d > len(vols)=%d", + startingToken, ulenVols) + } + + // Discern the number of remaining entries. + rem := ulenVols - startingToken + + // If maxEntries is 0 or greater than the number of remaining entries then + // set maxEntries to the number of remaining entries. + if maxEntries == 0 || maxEntries > rem { + maxEntries = rem + } + + var ( + i int + j = startingToken + entries = make( + []*csi.ListVolumesResponse_Entry, + maxEntries) + ) + + for i = 0; i < len(entries); i++ { + volumeStatus := &csi.ListVolumesResponse_VolumeStatus{ + VolumeCondition: &csi.VolumeCondition{}, + } + + if !s.config.DisableAttach { + volumeStatus.PublishedNodeIds = []string{ + s.nodeID, + } + } + + entries[i] = &csi.ListVolumesResponse_Entry{ + Volume: &vols[j], + Status: volumeStatus, + } + j++ + } + + var nextToken string + if n := startingToken + int32(i); n < ulenVols { + nextToken = fmt.Sprintf("%d", n) + } + + if hookVal, hookMsg := s.execHook("ListVolumesEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return &csi.ListVolumesResponse{ + Entries: entries, + NextToken: nextToken, + }, nil +} + +func (s *service) GetCapacity( + ctx context.Context, + req *csi.GetCapacityRequest) ( + *csi.GetCapacityResponse, error) { + + if hookVal, hookMsg := s.execHook("GetCapacity"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return &csi.GetCapacityResponse{ + AvailableCapacity: MaxStorageCapacity, + }, nil +} + +func (s *service) ControllerGetCapabilities( + ctx context.Context, + req *csi.ControllerGetCapabilitiesRequest) ( + *csi.ControllerGetCapabilitiesResponse, error) { + + if hookVal, hookMsg := s.execHook("ControllerGetCapabilitiesStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + caps := []*csi.ControllerServiceCapability{ + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_LIST_VOLUMES, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_LIST_VOLUMES_PUBLISHED_NODES, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_GET_CAPACITY, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_LIST_SNAPSHOTS, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_PUBLISH_READONLY, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_CLONE_VOLUME, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_GET_VOLUME, + }, + }, + }, + { + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_VOLUME_CONDITION, + }, + }, + }, + } + + if !s.config.DisableAttach { + caps = append(caps, &csi.ControllerServiceCapability{ + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME, + }, + }, + }) + } + + if !s.config.DisableControllerExpansion { + caps = append(caps, &csi.ControllerServiceCapability{ + Type: &csi.ControllerServiceCapability_Rpc{ + Rpc: &csi.ControllerServiceCapability_RPC{ + Type: csi.ControllerServiceCapability_RPC_EXPAND_VOLUME, + }, + }, + }) + } + + if hookVal, hookMsg := s.execHook("ControllerGetCapabilitiesEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return &csi.ControllerGetCapabilitiesResponse{ + Capabilities: caps, + }, nil +} + +func (s *service) CreateSnapshot(ctx context.Context, + req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) { + // Check arguments + if len(req.GetName()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot Name cannot be empty") + } + if len(req.GetSourceVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot SourceVolumeId cannot be empty") + } + + if hookVal, hookMsg := s.execHook("CreateSnapshotStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + // Check to see if the snapshot already exists. + if i, v := s.snapshots.FindSnapshot("name", req.GetName()); i >= 0 { + // Requested snapshot name already exists + if v.SnapshotCSI.GetSourceVolumeId() != req.GetSourceVolumeId() || !reflect.DeepEqual(v.Parameters, req.GetParameters()) { + return nil, status.Error(codes.AlreadyExists, + fmt.Sprintf("Snapshot with name %s already exists", req.GetName())) + } + return &csi.CreateSnapshotResponse{Snapshot: &v.SnapshotCSI}, nil + } + + // Create the snapshot and add it to the service's in-mem snapshot slice. + snapshot := s.newSnapshot(req.GetName(), req.GetSourceVolumeId(), req.GetParameters()) + s.snapshots.Add(snapshot) + + if hookVal, hookMsg := s.execHook("CreateSnapshotEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return &csi.CreateSnapshotResponse{Snapshot: &snapshot.SnapshotCSI}, nil +} + +func (s *service) DeleteSnapshot(ctx context.Context, + req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) { + + // If the snapshot is not specified, return error + if len(req.SnapshotId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Snapshot ID cannot be empty") + } + + if hookVal, hookMsg := s.execHook("DeleteSnapshotStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + // If the snapshot does not exist then return an idempotent response. + i, _ := s.snapshots.FindSnapshot("id", req.SnapshotId) + if i < 0 { + return &csi.DeleteSnapshotResponse{}, nil + } + + // This delete logic preserves order and prevents potential memory + // leaks. The slice's elements may not be pointers, but the structs + // themselves have fields that are. + s.snapshots.Delete(i) + log.WithField("SnapshotId", req.SnapshotId).Debug("mock delete snapshot") + + if hookVal, hookMsg := s.execHook("DeleteSnapshotEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return &csi.DeleteSnapshotResponse{}, nil +} + +func (s *service) ListSnapshots(ctx context.Context, + req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + + if hookVal, hookMsg := s.execHook("ListSnapshots"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + // case 1: SnapshotId is not empty, return snapshots that match the snapshot id. + if len(req.GetSnapshotId()) != 0 { + return getSnapshotById(s, req) + } + + // case 2: SourceVolumeId is not empty, return snapshots that match the source volume id. + if len(req.GetSourceVolumeId()) != 0 { + return getSnapshotByVolumeId(s, req) + } + + // case 3: no parameter is set, so we return all the snapshots. + return getAllSnapshots(s, req) +} + +func (s *service) ControllerExpandVolume( + ctx context.Context, + req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) { + if len(req.VolumeId) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if req.CapacityRange == nil { + return nil, status.Error(codes.InvalidArgument, "Request capacity cannot be empty") + } + + if hookVal, hookMsg := s.execHook("ControllerExpandVolumeStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + if s.config.DisableOnlineExpansion && MockVolumes[v.GetVolumeId()].ISControllerPublished { + return nil, status.Error(codes.FailedPrecondition, "volume is published and online volume expansion is not supported") + } + + requestBytes := req.CapacityRange.RequiredBytes + + if v.CapacityBytes > requestBytes { + return nil, status.Error(codes.InvalidArgument, "cannot change volume capacity to a smaller size") + } + + resp := &csi.ControllerExpandVolumeResponse{ + CapacityBytes: requestBytes, + NodeExpansionRequired: s.config.NodeExpansionRequired, + } + + // Check to see if the volume already satisfied request size. + if v.CapacityBytes == requestBytes { + log.WithField("volumeID", v.VolumeId).Infof("Volume capacity is already %d, no need to expand", requestBytes) + return resp, nil + } + + // Update volume's capacity to the requested size. + v.CapacityBytes = requestBytes + s.vols[i] = v + + if hookVal, hookMsg := s.execHook("ControllerExpandVolumeEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return resp, nil +} + +func getSnapshotById(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + if len(req.GetSnapshotId()) != 0 { + i, snapshot := s.snapshots.FindSnapshot("id", req.GetSnapshotId()) + if i < 0 { + return &csi.ListSnapshotsResponse{}, nil + } + + if len(req.GetSourceVolumeId()) != 0 { + if snapshot.SnapshotCSI.GetSourceVolumeId() != req.GetSourceVolumeId() { + return &csi.ListSnapshotsResponse{}, nil + } + } + + return &csi.ListSnapshotsResponse{ + Entries: []*csi.ListSnapshotsResponse_Entry{ + { + Snapshot: &snapshot.SnapshotCSI, + }, + }, + }, nil + } + return nil, nil +} + +func getSnapshotByVolumeId(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + if len(req.GetSourceVolumeId()) != 0 { + i, snapshot := s.snapshots.FindSnapshot("sourceVolumeId", req.SourceVolumeId) + if i < 0 { + return &csi.ListSnapshotsResponse{}, nil + } + return &csi.ListSnapshotsResponse{ + Entries: []*csi.ListSnapshotsResponse_Entry{ + { + Snapshot: &snapshot.SnapshotCSI, + }, + }, + }, nil + } + return nil, nil +} + +func getAllSnapshots(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapshotsResponse, error) { + // Copy the mock snapshots into a new slice in order to avoid + // locking the service's snapshot slice for the duration of the + // ListSnapshots RPC. + readyToUse := true + snapshots := s.snapshots.List(readyToUse) + + var ( + ulenSnapshots = int32(len(snapshots)) + maxEntries = req.MaxEntries + startingToken int32 + ) + + if v := req.StartingToken; v != "" { + i, err := strconv.ParseUint(v, 10, 32) + if err != nil { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d !< int32=%d", + startingToken, math.MaxUint32) + } + startingToken = int32(i) + } + + if startingToken > ulenSnapshots { + return nil, status.Errorf( + codes.Aborted, + "startingToken=%d > len(snapshots)=%d", + startingToken, ulenSnapshots) + } + + // Discern the number of remaining entries. + rem := ulenSnapshots - startingToken + + // If maxEntries is 0 or greater than the number of remaining entries then + // set maxEntries to the number of remaining entries. + if maxEntries == 0 || maxEntries > rem { + maxEntries = rem + } + + var ( + i int + j = startingToken + entries = make( + []*csi.ListSnapshotsResponse_Entry, + maxEntries) + ) + + for i = 0; i < len(entries); i++ { + entries[i] = &csi.ListSnapshotsResponse_Entry{ + Snapshot: &snapshots[j], + } + j++ + } + + var nextToken string + if n := startingToken + int32(i); n < ulenSnapshots { + nextToken = fmt.Sprintf("%d", n) + } + + return &csi.ListSnapshotsResponse{ + Entries: entries, + NextToken: nextToken, + }, nil +} diff --git a/test/e2e/storage/drivers/csi-test/mock/service/hooks-const.go b/test/e2e/storage/drivers/csi-test/mock/service/hooks-const.go new file mode 100644 index 00000000000..46eed6af7ca --- /dev/null +++ b/test/e2e/storage/drivers/csi-test/mock/service/hooks-const.go @@ -0,0 +1,24 @@ +package service + +// Predefinded constants for the JavaScript hooks, they must correspond to the +// error codes used by gRPC, see: +// https://github.com/grpc/grpc-go/blob/master/codes/codes.go +const ( + grpcJSCodes string = `OK = 0; + CANCELED = 1; + UNKNOWN = 2; + INVALIDARGUMENT = 3; + DEADLINEEXCEEDED = 4; + NOTFOUND = 5; + ALREADYEXISTS = 6; + PERMISSIONDENIED = 7; + RESOURCEEXHAUSTED = 8; + FAILEDPRECONDITION = 9; + ABORTED = 10; + OUTOFRANGE = 11; + UNIMPLEMENTED = 12; + INTERNAL = 13; + UNAVAILABLE = 14; + DATALOSS = 15; + UNAUTHENTICATED = 16` +) diff --git a/test/e2e/storage/drivers/csi-test/mock/service/identity.go b/test/e2e/storage/drivers/csi-test/mock/service/identity.go new file mode 100644 index 00000000000..837c8763c1c --- /dev/null +++ b/test/e2e/storage/drivers/csi-test/mock/service/identity.go @@ -0,0 +1,74 @@ +package service + +import ( + "golang.org/x/net/context" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/protobuf/ptypes/wrappers" +) + +func (s *service) GetPluginInfo( + ctx context.Context, + req *csi.GetPluginInfoRequest) ( + *csi.GetPluginInfoResponse, error) { + + return &csi.GetPluginInfoResponse{ + Name: s.config.DriverName, + VendorVersion: VendorVersion, + Manifest: Manifest, + }, nil +} + +func (s *service) Probe( + ctx context.Context, + req *csi.ProbeRequest) ( + *csi.ProbeResponse, error) { + + return &csi.ProbeResponse{ + Ready: &wrappers.BoolValue{Value: true}, + }, nil +} + +func (s *service) GetPluginCapabilities( + ctx context.Context, + req *csi.GetPluginCapabilitiesRequest) ( + *csi.GetPluginCapabilitiesResponse, error) { + + volExpType := csi.PluginCapability_VolumeExpansion_ONLINE + + if s.config.DisableOnlineExpansion { + volExpType = csi.PluginCapability_VolumeExpansion_OFFLINE + } + + capabilities := []*csi.PluginCapability{ + { + Type: &csi.PluginCapability_Service_{ + Service: &csi.PluginCapability_Service{ + Type: csi.PluginCapability_Service_CONTROLLER_SERVICE, + }, + }, + }, + { + Type: &csi.PluginCapability_VolumeExpansion_{ + VolumeExpansion: &csi.PluginCapability_VolumeExpansion{ + Type: volExpType, + }, + }, + }, + } + + if s.config.EnableTopology { + capabilities = append(capabilities, + &csi.PluginCapability{ + Type: &csi.PluginCapability_Service_{ + Service: &csi.PluginCapability_Service{ + Type: csi.PluginCapability_Service_VOLUME_ACCESSIBILITY_CONSTRAINTS, + }, + }, + }) + } + + return &csi.GetPluginCapabilitiesResponse{ + Capabilities: capabilities, + }, nil +} diff --git a/test/e2e/storage/drivers/csi-test/mock/service/node.go b/test/e2e/storage/drivers/csi-test/mock/service/node.go new file mode 100644 index 00000000000..7c509150181 --- /dev/null +++ b/test/e2e/storage/drivers/csi-test/mock/service/node.go @@ -0,0 +1,460 @@ +package service + +import ( + "fmt" + "os" + "path" + "strconv" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "golang.org/x/net/context" + + "github.com/container-storage-interface/spec/lib/go/csi" +) + +func (s *service) NodeStageVolume( + ctx context.Context, + req *csi.NodeStageVolumeRequest) ( + *csi.NodeStageVolumeResponse, error) { + + if hookVal, hookMsg := s.execHook("NodeStageVolumeStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + device, ok := req.PublishContext["device"] + if !ok { + if s.config.DisableAttach { + device = "mock device" + } else { + return nil, status.Error( + codes.InvalidArgument, + "stage volume info 'device' key required") + } + } + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if len(req.GetStagingTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Staging Target Path cannot be empty") + } + + if req.GetVolumeCapability() == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capability cannot be empty") + } + + exists, err := checkTargetExists(req.StagingTargetPath) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if !exists { + status.Errorf(codes.Internal, "staging target path %s does not exist", req.StagingTargetPath) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // nodeStgPathKey is the key in the volume's attributes that is set to a + // mock stage path if the volume has been published by the node + nodeStgPathKey := path.Join(s.nodeID, req.StagingTargetPath) + + // Check to see if the volume has already been staged. + if v.VolumeContext[nodeStgPathKey] != "" { + // TODO: Check for the capabilities to be equal. Return "ALREADY_EXISTS" + // if the capabilities don't match. + return &csi.NodeStageVolumeResponse{}, nil + } + + // Stage the volume. + v.VolumeContext[nodeStgPathKey] = device + s.vols[i] = v + + if hookVal, hookMsg := s.execHook("NodeStageVolumeEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return &csi.NodeStageVolumeResponse{}, nil +} + +func (s *service) NodeUnstageVolume( + ctx context.Context, + req *csi.NodeUnstageVolumeRequest) ( + *csi.NodeUnstageVolumeResponse, error) { + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if len(req.GetStagingTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Staging Target Path cannot be empty") + } + + if hookVal, hookMsg := s.execHook("NodeUnstageVolumeStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // nodeStgPathKey is the key in the volume's attributes that is set to a + // mock stage path if the volume has been published by the node + nodeStgPathKey := path.Join(s.nodeID, req.StagingTargetPath) + + // Check to see if the volume has already been unstaged. + if v.VolumeContext[nodeStgPathKey] == "" { + return &csi.NodeUnstageVolumeResponse{}, nil + } + + // Unpublish the volume. + delete(v.VolumeContext, nodeStgPathKey) + s.vols[i] = v + + if hookVal, hookMsg := s.execHook("NodeUnstageVolumeEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + return &csi.NodeUnstageVolumeResponse{}, nil +} + +func (s *service) NodePublishVolume( + ctx context.Context, + req *csi.NodePublishVolumeRequest) ( + *csi.NodePublishVolumeResponse, error) { + + if hookVal, hookMsg := s.execHook("NodePublishVolumeStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + ephemeralVolume := req.GetVolumeContext()["csi.storage.k8s.io/ephemeral"] == "true" + device, ok := req.PublishContext["device"] + if !ok { + if ephemeralVolume || s.config.DisableAttach { + device = "mock device" + } else { + return nil, status.Error( + codes.InvalidArgument, + "stage volume info 'device' key required") + } + } + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if len(req.GetTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Target Path cannot be empty") + } + + if req.GetVolumeCapability() == nil { + return nil, status.Error(codes.InvalidArgument, "Volume Capability cannot be empty") + } + + // May happen with old (or, at this time, even the current) Kubernetes + // although it shouldn't (https://github.com/kubernetes/kubernetes/issues/75535). + exists, err := checkTargetExists(req.TargetPath) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if !s.config.PermissiveTargetPath && exists { + status.Errorf(codes.Internal, "target path %s does exist", req.TargetPath) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 && !ephemeralVolume { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + if i >= 0 && ephemeralVolume { + return nil, status.Error(codes.AlreadyExists, req.VolumeId) + } + + // nodeMntPathKey is the key in the volume's attributes that is set to a + // mock mount path if the volume has been published by the node + nodeMntPathKey := path.Join(s.nodeID, req.TargetPath) + + // Check to see if the volume has already been published. + if v.VolumeContext[nodeMntPathKey] != "" { + + // Requests marked Readonly fail due to volumes published by + // the Mock driver supporting only RW mode. + if req.Readonly { + return nil, status.Error(codes.AlreadyExists, req.VolumeId) + } + + return &csi.NodePublishVolumeResponse{}, nil + } + + // Publish the volume. + if ephemeralVolume { + MockVolumes[req.VolumeId] = Volume{ + ISEphemeral: true, + } + } else { + if req.GetTargetPath() != "" { + exists, err := checkTargetExists(req.GetTargetPath()) + if err != nil { + return nil, status.Error(codes.Internal, err.Error()) + } + if !exists { + // If target path does not exist we need to create the directory where volume will be staged + if err = os.Mkdir(req.TargetPath, os.FileMode(0755)); err != nil { + msg := fmt.Sprintf("NodePublishVolume: could not create target dir %q: %v", req.TargetPath, err) + return nil, status.Error(codes.Internal, msg) + } + } + v.VolumeContext[nodeMntPathKey] = req.GetTargetPath() + } else { + v.VolumeContext[nodeMntPathKey] = device + } + s.vols[i] = v + } + if hookVal, hookMsg := s.execHook("NodePublishVolumeEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return &csi.NodePublishVolumeResponse{}, nil +} + +func (s *service) NodeUnpublishVolume( + ctx context.Context, + req *csi.NodeUnpublishVolumeRequest) ( + *csi.NodeUnpublishVolumeResponse, error) { + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + if len(req.GetTargetPath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Target Path cannot be empty") + } + if hookVal, hookMsg := s.execHook("NodeUnpublishVolumeStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + ephemeralVolume := MockVolumes[req.VolumeId].ISEphemeral + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 && !ephemeralVolume { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + if ephemeralVolume { + delete(MockVolumes, req.VolumeId) + } else { + // nodeMntPathKey is the key in the volume's attributes that is set to a + // mock mount path if the volume has been published by the node + nodeMntPathKey := path.Join(s.nodeID, req.TargetPath) + + // Check to see if the volume has already been unpublished. + if v.VolumeContext[nodeMntPathKey] == "" { + return &csi.NodeUnpublishVolumeResponse{}, nil + } + + // Delete any created paths + err := os.RemoveAll(v.VolumeContext[nodeMntPathKey]) + if err != nil { + return nil, status.Errorf(codes.Internal, "Unable to delete previously created target directory") + } + + // Unpublish the volume. + delete(v.VolumeContext, nodeMntPathKey) + s.vols[i] = v + } + if hookVal, hookMsg := s.execHook("NodeUnpublishVolumeEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return &csi.NodeUnpublishVolumeResponse{}, nil +} + +func (s *service) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandVolumeRequest) (*csi.NodeExpandVolumeResponse, error) { + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + if len(req.GetVolumePath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume Path cannot be empty") + } + if hookVal, hookMsg := s.execHook("NodeExpandVolumeStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + s.volsRWL.Lock() + defer s.volsRWL.Unlock() + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + return nil, status.Error(codes.NotFound, req.VolumeId) + } + + // TODO: NodeExpandVolume MUST be called after successful NodeStageVolume as we has STAGE_UNSTAGE_VOLUME node capacity. + resp := &csi.NodeExpandVolumeResponse{} + var requestCapacity int64 = 0 + if req.GetCapacityRange() != nil { + requestCapacity = req.CapacityRange.GetRequiredBytes() + resp.CapacityBytes = requestCapacity + } + + // fsCapacityKey is the key in the volume's attributes that is set to the file system's size. + fsCapacityKey := path.Join(s.nodeID, req.GetVolumePath(), "size") + // Update volume's fs capacity to requested size. + if requestCapacity > 0 { + v.VolumeContext[fsCapacityKey] = strconv.FormatInt(requestCapacity, 10) + s.vols[i] = v + } + if hookVal, hookMsg := s.execHook("NodeExpandVolumeEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + return resp, nil +} + +func (s *service) NodeGetCapabilities( + ctx context.Context, + req *csi.NodeGetCapabilitiesRequest) ( + *csi.NodeGetCapabilitiesResponse, error) { + + if hookVal, hookMsg := s.execHook("NodeGetCapabilities"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + capabilities := []*csi.NodeServiceCapability{ + { + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_UNKNOWN, + }, + }, + }, + { + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME, + }, + }, + }, + { + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_GET_VOLUME_STATS, + }, + }, + }, + { + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_VOLUME_CONDITION, + }, + }, + }, + } + if s.config.NodeExpansionRequired { + capabilities = append(capabilities, &csi.NodeServiceCapability{ + Type: &csi.NodeServiceCapability_Rpc{ + Rpc: &csi.NodeServiceCapability_RPC{ + Type: csi.NodeServiceCapability_RPC_EXPAND_VOLUME, + }, + }, + }) + } + + return &csi.NodeGetCapabilitiesResponse{ + Capabilities: capabilities, + }, nil +} + +func (s *service) NodeGetInfo(ctx context.Context, + req *csi.NodeGetInfoRequest) (*csi.NodeGetInfoResponse, error) { + if hookVal, hookMsg := s.execHook("NodeGetInfo"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + csiNodeResponse := &csi.NodeGetInfoResponse{ + NodeId: s.nodeID, + } + if s.config.AttachLimit > 0 { + csiNodeResponse.MaxVolumesPerNode = s.config.AttachLimit + } + if s.config.EnableTopology { + csiNodeResponse.AccessibleTopology = &csi.Topology{ + Segments: map[string]string{ + TopologyKey: TopologyValue, + }, + } + } + return csiNodeResponse, nil +} + +func (s *service) NodeGetVolumeStats(ctx context.Context, + req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { + + if hookVal, hookMsg := s.execHook("NodeGetVolumeStatsStart"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + resp := &csi.NodeGetVolumeStatsResponse{ + VolumeCondition: &csi.VolumeCondition{}, + } + + if len(req.GetVolumeId()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume ID cannot be empty") + } + + if len(req.GetVolumePath()) == 0 { + return nil, status.Error(codes.InvalidArgument, "Volume Path cannot be empty") + } + + i, v := s.findVolNoLock("id", req.VolumeId) + if i < 0 { + resp.VolumeCondition.Abnormal = true + resp.VolumeCondition.Message = "Volume not found" + return resp, status.Error(codes.NotFound, req.VolumeId) + } + + nodeMntPathKey := path.Join(s.nodeID, req.VolumePath) + + _, exists := v.VolumeContext[nodeMntPathKey] + if !exists { + msg := fmt.Sprintf("volume %q doest not exist on the specified path %q", req.VolumeId, req.VolumePath) + resp.VolumeCondition.Abnormal = true + resp.VolumeCondition.Message = msg + return resp, status.Errorf(codes.NotFound, msg) + } + + if hookVal, hookMsg := s.execHook("NodeGetVolumeStatsEnd"); hookVal != codes.OK { + return nil, status.Errorf(hookVal, hookMsg) + } + + resp.Usage = []*csi.VolumeUsage{ + { + Total: v.GetCapacityBytes(), + Unit: csi.VolumeUsage_BYTES, + }, + } + + return resp, nil +} + +// checkTargetExists checks if a given path exists. +func checkTargetExists(targetPath string) (bool, error) { + _, err := os.Stat(targetPath) + switch { + case err == nil: + return true, nil + case os.IsNotExist(err): + return false, nil + default: + return false, err + } +} diff --git a/test/e2e/storage/drivers/csi-test/mock/service/service.go b/test/e2e/storage/drivers/csi-test/mock/service/service.go new file mode 100644 index 00000000000..ff54ae9e506 --- /dev/null +++ b/test/e2e/storage/drivers/csi-test/mock/service/service.go @@ -0,0 +1,293 @@ +package service + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" + + "k8s.io/klog" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-csi/csi-test/v4/mock/cache" + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + + "github.com/golang/protobuf/ptypes" + + "github.com/robertkrimen/otto" +) + +const ( + // Name is the name of the CSI plug-in. + Name = "io.kubernetes.storage.mock" + + // VendorVersion is the version returned by GetPluginInfo. + VendorVersion = "0.3.0" + + // TopologyKey simulates a per-node topology. + TopologyKey = Name + "/node" + + // TopologyValue is the one, fixed node on which the driver runs. + TopologyValue = "some-mock-node" +) + +// Manifest is the SP's manifest. +var Manifest = map[string]string{ + "url": "https://github.com/kubernetes-csi/csi-test/mock", +} + +// JavaScript hooks to be run to perform various tests +type Hooks struct { + Globals string `yaml:"globals"` // will be executed once before all other scripts + CreateVolumeStart string `yaml:"createVolumeStart"` + CreateVolumeEnd string `yaml:"createVolumeEnd"` + DeleteVolumeStart string `yaml:"deleteVolumeStart"` + DeleteVolumeEnd string `yaml:"deleteVolumeEnd"` + ControllerPublishVolumeStart string `yaml:"controllerPublishVolumeStart"` + ControllerPublishVolumeEnd string `yaml:"controllerPublishVolumeEnd"` + ControllerUnpublishVolumeStart string `yaml:"controllerUnpublishVolumeStart"` + ControllerUnpublishVolumeEnd string `yaml:"controllerUnpublishVolumeEnd"` + ValidateVolumeCapabilities string `yaml:"validateVolumeCapabilities"` + ListVolumesStart string `yaml:"listVolumesStart"` + ListVolumesEnd string `yaml:"listVolumesEnd"` + GetCapacity string `yaml:"getCapacity"` + ControllerGetCapabilitiesStart string `yaml:"controllerGetCapabilitiesStart"` + ControllerGetCapabilitiesEnd string `yaml:"controllerGetCapabilitiesEnd"` + CreateSnapshotStart string `yaml:"createSnapshotStart"` + CreateSnapshotEnd string `yaml:"createSnapshotEnd"` + DeleteSnapshotStart string `yaml:"deleteSnapshotStart"` + DeleteSnapshotEnd string `yaml:"deleteSnapshotEnd"` + ListSnapshots string `yaml:"listSnapshots"` + ControllerExpandVolumeStart string `yaml:"controllerExpandVolumeStart"` + ControllerExpandVolumeEnd string `yaml:"controllerExpandVolumeEnd"` + NodeStageVolumeStart string `yaml:"nodeStageVolumeStart"` + NodeStageVolumeEnd string `yaml:"nodeStageVolumeEnd"` + NodeUnstageVolumeStart string `yaml:"nodeUnstageVolumeStart"` + NodeUnstageVolumeEnd string `yaml:"nodeUnstageVolumeEnd"` + NodePublishVolumeStart string `yaml:"nodePublishVolumeStart"` + NodePublishVolumeEnd string `yaml:"nodePublishVolumeEnd"` + NodeUnpublishVolumeStart string `yaml:"nodeUnpublishVolumeStart"` + NodeUnpublishVolumeEnd string `yaml:"nodeUnpublishVolumeEnd"` + NodeExpandVolumeStart string `yaml:"nodeExpandVolumeStart"` + NodeExpandVolumeEnd string `yaml:"nodeExpandVolumeEnd"` + NodeGetCapabilities string `yaml:"nodeGetCapabilities"` + NodeGetInfo string `yaml:"nodeGetInfo"` + NodeGetVolumeStatsStart string `yaml:"nodeGetVolumeStatsStart"` + NodeGetVolumeStatsEnd string `yaml:"nodeGetVolumeStatsEnd"` +} + +type Config struct { + DisableAttach bool + DriverName string + AttachLimit int64 + NodeExpansionRequired bool + DisableControllerExpansion bool + DisableOnlineExpansion bool + PermissiveTargetPath bool + EnableTopology bool + ExecHooks *Hooks +} + +// Service is the CSI Mock service provider. +type Service interface { + csi.ControllerServer + csi.IdentityServer + csi.NodeServer +} + +type service struct { + sync.Mutex + nodeID string + vols []csi.Volume + volsRWL sync.RWMutex + volsNID uint64 + snapshots cache.SnapshotCache + snapshotsNID uint64 + config Config + hooksVm *otto.Otto +} + +type Volume struct { + VolumeCSI csi.Volume + NodeID string + ISStaged bool + ISPublished bool + ISEphemeral bool + ISControllerPublished bool + StageTargetPath string + TargetPath string +} + +var MockVolumes map[string]Volume + +// New returns a new Service. +func New(config Config) Service { + s := &service{ + nodeID: config.DriverName, + config: config, + } + if config.ExecHooks != nil { + s.hooksVm = otto.New() + s.hooksVm.Run(grpcJSCodes) // set global variables with gRPC error codes + _, err := s.hooksVm.Run(s.config.ExecHooks.Globals) + if err != nil { + klog.Exitf("Error encountered in the global exec hook: %v. Exiting\n", err) + } + } + s.snapshots = cache.NewSnapshotCache() + s.vols = []csi.Volume{ + s.newVolume("Mock Volume 1", gib100), + s.newVolume("Mock Volume 2", gib100), + s.newVolume("Mock Volume 3", gib100), + } + MockVolumes = map[string]Volume{} + + s.snapshots.Add(s.newSnapshot("Mock Snapshot 1", "1", map[string]string{"Description": "snapshot 1"})) + s.snapshots.Add(s.newSnapshot("Mock Snapshot 2", "2", map[string]string{"Description": "snapshot 2"})) + s.snapshots.Add(s.newSnapshot("Mock Snapshot 3", "3", map[string]string{"Description": "snapshot 3"})) + + return s +} + +const ( + kib int64 = 1024 + mib int64 = kib * 1024 + gib int64 = mib * 1024 + gib100 int64 = gib * 100 + tib int64 = gib * 1024 + tib100 int64 = tib * 100 +) + +func (s *service) newVolume(name string, capcity int64) csi.Volume { + vol := csi.Volume{ + VolumeId: fmt.Sprintf("%d", atomic.AddUint64(&s.volsNID, 1)), + VolumeContext: map[string]string{"name": name}, + CapacityBytes: capcity, + } + s.setTopology(&vol) + return vol +} + +func (s *service) newVolumeFromSnapshot(name string, capacity int64, snapshotID int) csi.Volume { + vol := s.newVolume(name, capacity) + vol.ContentSource = &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Snapshot{ + Snapshot: &csi.VolumeContentSource_SnapshotSource{ + SnapshotId: fmt.Sprintf("%d", snapshotID), + }, + }, + } + s.setTopology(&vol) + return vol +} + +func (s *service) newVolumeFromVolume(name string, capacity int64, volumeID int) csi.Volume { + vol := s.newVolume(name, capacity) + vol.ContentSource = &csi.VolumeContentSource{ + Type: &csi.VolumeContentSource_Volume{ + Volume: &csi.VolumeContentSource_VolumeSource{ + VolumeId: fmt.Sprintf("%d", volumeID), + }, + }, + } + s.setTopology(&vol) + return vol +} + +func (s *service) setTopology(vol *csi.Volume) { + if s.config.EnableTopology { + vol.AccessibleTopology = []*csi.Topology{ + &csi.Topology{ + Segments: map[string]string{ + TopologyKey: TopologyValue, + }, + }, + } + } +} + +func (s *service) findVol(k, v string) (volIdx int, volInfo csi.Volume) { + s.volsRWL.RLock() + defer s.volsRWL.RUnlock() + return s.findVolNoLock(k, v) +} + +func (s *service) findVolNoLock(k, v string) (volIdx int, volInfo csi.Volume) { + volIdx = -1 + + for i, vi := range s.vols { + switch k { + case "id": + if strings.EqualFold(v, vi.GetVolumeId()) { + return i, vi + } + case "name": + if n, ok := vi.VolumeContext["name"]; ok && strings.EqualFold(v, n) { + return i, vi + } + } + } + + return +} + +func (s *service) findVolByName( + ctx context.Context, name string) (int, csi.Volume) { + + return s.findVol("name", name) +} + +func (s *service) findVolByID( + ctx context.Context, id string) (int, csi.Volume) { + + return s.findVol("id", id) +} + +func (s *service) newSnapshot(name, sourceVolumeId string, parameters map[string]string) cache.Snapshot { + + ptime := ptypes.TimestampNow() + return cache.Snapshot{ + Name: name, + Parameters: parameters, + SnapshotCSI: csi.Snapshot{ + SnapshotId: fmt.Sprintf("%d", atomic.AddUint64(&s.snapshotsNID, 1)), + CreationTime: ptime, + SourceVolumeId: sourceVolumeId, + ReadyToUse: true, + }, + } +} + +// getAttachCount returns the number of attached volumes on the node. +func (s *service) getAttachCount(devPathKey string) int64 { + var count int64 + for _, v := range s.vols { + if device := v.VolumeContext[devPathKey]; device != "" { + count++ + } + } + return count +} + +func (s *service) execHook(hookName string) (codes.Code, string) { + if s.hooksVm != nil { + script := reflect.ValueOf(*s.config.ExecHooks).FieldByName(hookName).String() + if len(script) > 0 { + result, err := s.hooksVm.Run(script) + if err != nil { + klog.Exitf("Exec hook %s error: %v; exiting\n", hookName, err) + } + rv, err := result.ToInteger() + if err == nil { + // Function returned an integer, use it + return codes.Code(rv), fmt.Sprintf("Exec hook %s returned non-OK code", hookName) + } else { + // Function returned non-integer data type, discard it + return codes.OK, "" + } + } + } + return codes.OK, "" +} From 700819609387e24e66abc82f301a5f987175fec4 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 24 Feb 2021 15:24:58 +0100 Subject: [PATCH 04/14] csi-test: import copyright clarification The code originates in csi-test and this copyright change was made by the original author in https://github.com/kubernetes-csi/csi-test/pull/324 --- .../storage/drivers/csi-test/driver/driver.go | 2 +- .../drivers/csi-test/driver/driver.mock.go | 23 +++++++++++++++---- .../storage/drivers/csi-test/driver/mock.go | 2 +- .../csi-test/mock/cache/SnapshotCache.go | 16 +++++++++++++ .../csi-test/mock/service/controller.go | 16 +++++++++++++ .../drivers/csi-test/mock/service/identity.go | 16 +++++++++++++ .../drivers/csi-test/mock/service/node.go | 16 +++++++++++++ .../drivers/csi-test/mock/service/service.go | 16 +++++++++++++ 8 files changed, 101 insertions(+), 6 deletions(-) diff --git a/test/e2e/storage/drivers/csi-test/driver/driver.go b/test/e2e/storage/drivers/csi-test/driver/driver.go index 33ffe99359d..de6c06e164a 100644 --- a/test/e2e/storage/drivers/csi-test/driver/driver.go +++ b/test/e2e/storage/drivers/csi-test/driver/driver.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Luis Pabón luis@portworx.com +Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/e2e/storage/drivers/csi-test/driver/driver.mock.go b/test/e2e/storage/drivers/csi-test/driver/driver.mock.go index 7eeaca0f022..a4800c32fe4 100644 --- a/test/e2e/storage/drivers/csi-test/driver/driver.mock.go +++ b/test/e2e/storage/drivers/csi-test/driver/driver.mock.go @@ -1,14 +1,29 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/container-storage-interface/spec/lib/go/csi (interfaces: IdentityServer,ControllerServer,NodeServer) +/* +Copyright 2021 The Kubernetes Authors. -// Package driver is a generated GoMock package. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package driver is a generated GoMock package, with required copyright +// header added manually. package driver import ( context "context" + reflect "reflect" + csi "github.com/container-storage-interface/spec/lib/go/csi" gomock "github.com/golang/mock/gomock" - reflect "reflect" ) // MockIdentityServer is a mock of IdentityServer interface diff --git a/test/e2e/storage/drivers/csi-test/driver/mock.go b/test/e2e/storage/drivers/csi-test/driver/mock.go index 7e2b5020104..6b8a08c26f7 100644 --- a/test/e2e/storage/drivers/csi-test/driver/mock.go +++ b/test/e2e/storage/drivers/csi-test/driver/mock.go @@ -1,5 +1,5 @@ /* -Copyright 2017 Luis Pabón luis@portworx.com +Copyright 2021 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/test/e2e/storage/drivers/csi-test/mock/cache/SnapshotCache.go b/test/e2e/storage/drivers/csi-test/mock/cache/SnapshotCache.go index 89835e11f20..f3569ede84a 100644 --- a/test/e2e/storage/drivers/csi-test/mock/cache/SnapshotCache.go +++ b/test/e2e/storage/drivers/csi-test/mock/cache/SnapshotCache.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package cache import ( diff --git a/test/e2e/storage/drivers/csi-test/mock/service/controller.go b/test/e2e/storage/drivers/csi-test/mock/service/controller.go index a8192fedc0e..21fd2edc03c 100644 --- a/test/e2e/storage/drivers/csi-test/mock/service/controller.go +++ b/test/e2e/storage/drivers/csi-test/mock/service/controller.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package service import ( diff --git a/test/e2e/storage/drivers/csi-test/mock/service/identity.go b/test/e2e/storage/drivers/csi-test/mock/service/identity.go index 837c8763c1c..2f375e79679 100644 --- a/test/e2e/storage/drivers/csi-test/mock/service/identity.go +++ b/test/e2e/storage/drivers/csi-test/mock/service/identity.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package service import ( diff --git a/test/e2e/storage/drivers/csi-test/mock/service/node.go b/test/e2e/storage/drivers/csi-test/mock/service/node.go index 7c509150181..e79d2561a34 100644 --- a/test/e2e/storage/drivers/csi-test/mock/service/node.go +++ b/test/e2e/storage/drivers/csi-test/mock/service/node.go @@ -1,3 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package service import ( diff --git a/test/e2e/storage/drivers/csi-test/mock/service/service.go b/test/e2e/storage/drivers/csi-test/mock/service/service.go index ff54ae9e506..bf6f416991a 100644 --- a/test/e2e/storage/drivers/csi-test/mock/service/service.go +++ b/test/e2e/storage/drivers/csi-test/mock/service/service.go @@ -1,3 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package service import ( From df6d3bc7dd1452400331d24bcfd012579375ce06 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 4 Dec 2020 15:20:45 +0100 Subject: [PATCH 05/14] CSI mock driver: fix faulty error message Caught by verify-typecheck.sh after importing the code into Kubernetes: ERROR(linux/arm): /home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service/controller.go:404:20: math.MaxUint32 (untyped int constant 4294967295) overflows int ERROR(linux/arm): /home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service/controller.go:795:20: math.MaxUint32 (untyped int constant 4294967295) overflows int ERROR(linux/386): /home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service/controller.go:404:20: math.MaxUint32 (untyped int constant 4294967295) overflows int ERROR(linux/386): /home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service/controller.go:795:20: math.MaxUint32 (untyped int constant 4294967295) overflows int ERROR(windows/386): /home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service/controller.go:404:20: math.MaxUint32 (untyped int constant 4294967295) overflows int ERROR(windows/386): /home/prow/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service/controller.go:795:20: math.MaxUint32 (untyped int constant 4294967295) overflows int Instead of producing our own error message, we can show the original value and the error from strconv. --- .../storage/drivers/csi-test/mock/service/controller.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/test/e2e/storage/drivers/csi-test/mock/service/controller.go b/test/e2e/storage/drivers/csi-test/mock/service/controller.go index 21fd2edc03c..7ff84d643b4 100644 --- a/test/e2e/storage/drivers/csi-test/mock/service/controller.go +++ b/test/e2e/storage/drivers/csi-test/mock/service/controller.go @@ -18,7 +18,6 @@ package service import ( "fmt" - "math" "path" "reflect" "strconv" @@ -412,8 +411,8 @@ func (s *service) ListVolumes( if err != nil { return nil, status.Errorf( codes.Aborted, - "startingToken=%d !< int32=%d", - startingToken, math.MaxUint32) + "startingToken=%s: %v", + v, err) } startingToken = int32(i) } @@ -801,8 +800,8 @@ func getAllSnapshots(s *service, req *csi.ListSnapshotsRequest) (*csi.ListSnapsh if err != nil { return nil, status.Errorf( codes.Aborted, - "startingToken=%d !< int32=%d", - startingToken, math.MaxUint32) + "startingToken=%s: %v", + v, err) } startingToken = int32(i) } From ab365c091cf5b86234316533e42342511c324b6f Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 5 Feb 2021 19:42:12 +0100 Subject: [PATCH 06/14] mock driver: replace logrus with klog klog now has structured logging. --- .../storage/drivers/csi-test/mock/service/controller.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/test/e2e/storage/drivers/csi-test/mock/service/controller.go b/test/e2e/storage/drivers/csi-test/mock/service/controller.go index 7ff84d643b4..fb38d8d02e9 100644 --- a/test/e2e/storage/drivers/csi-test/mock/service/controller.go +++ b/test/e2e/storage/drivers/csi-test/mock/service/controller.go @@ -23,10 +23,11 @@ import ( "strconv" "github.com/container-storage-interface/spec/lib/go/csi" - log "github.com/sirupsen/logrus" "golang.org/x/net/context" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + + "k8s.io/klog/v2" ) const ( @@ -148,7 +149,7 @@ func (s *service) DeleteVolume( copy(s.vols[i:], s.vols[i+1:]) s.vols[len(s.vols)-1] = csi.Volume{} s.vols = s.vols[:len(s.vols)-1] - log.WithField("volumeID", req.VolumeId).Debug("mock delete volume") + klog.V(5).InfoS("mock delete volume", "volumeID", req.VolumeId) if hookVal, hookMsg := s.execHook("DeleteVolumeEnd"); hookVal != codes.OK { return nil, status.Errorf(hookVal, hookMsg) @@ -656,7 +657,7 @@ func (s *service) DeleteSnapshot(ctx context.Context, // leaks. The slice's elements may not be pointers, but the structs // themselves have fields that are. s.snapshots.Delete(i) - log.WithField("SnapshotId", req.SnapshotId).Debug("mock delete snapshot") + klog.V(5).InfoS("mock delete snapshot", "SnapshotId", req.SnapshotId) if hookVal, hookMsg := s.execHook("DeleteSnapshotEnd"); hookVal != codes.OK { return nil, status.Errorf(hookVal, hookMsg) @@ -726,7 +727,7 @@ func (s *service) ControllerExpandVolume( // Check to see if the volume already satisfied request size. if v.CapacityBytes == requestBytes { - log.WithField("volumeID", v.VolumeId).Infof("Volume capacity is already %d, no need to expand", requestBytes) + klog.V(5).InfoS("volume capacity sufficient, no need to expand", "requested", requestBytes, "current", v.CapacityBytes, "volumeID", v.VolumeId) return resp, nil } From a2a34bb7447221215769e3f7b180b9ef1d81bce3 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 8 Feb 2021 20:16:13 +0100 Subject: [PATCH 07/14] mock driver fixes --- test/e2e/storage/drivers/csi-test/mock/service/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/storage/drivers/csi-test/mock/service/service.go b/test/e2e/storage/drivers/csi-test/mock/service/service.go index bf6f416991a..246bb374364 100644 --- a/test/e2e/storage/drivers/csi-test/mock/service/service.go +++ b/test/e2e/storage/drivers/csi-test/mock/service/service.go @@ -215,7 +215,7 @@ func (s *service) newVolumeFromVolume(name string, capacity int64, volumeID int) func (s *service) setTopology(vol *csi.Volume) { if s.config.EnableTopology { vol.AccessibleTopology = []*csi.Topology{ - &csi.Topology{ + { Segments: map[string]string{ TopologyKey: TopologyValue, }, From 92bac8afc190fe9a8e133099d5df01e933630ab9 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Mon, 8 Feb 2021 20:20:25 +0100 Subject: [PATCH 08/14] mock driver: fix no-op setDefaultCreds The function must modify the content of the "creds" pointer, not the pointer. Found via hack/verify-staticcheck.sh after importing the code into Kubernetes. It is uncertain whether this bug had any consequences. --- test/e2e/storage/drivers/csi-test/driver/driver.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/storage/drivers/csi-test/driver/driver.go b/test/e2e/storage/drivers/csi-test/driver/driver.go index de6c06e164a..0a61ae7c48e 100644 --- a/test/e2e/storage/drivers/csi-test/driver/driver.go +++ b/test/e2e/storage/drivers/csi-test/driver/driver.go @@ -174,7 +174,7 @@ func stop(lock *sync.Mutex, wg *sync.WaitGroup, server *grpc.Server, running boo // setDefaultCreds sets the default credentials, given a CSICreds instance. func setDefaultCreds(creds *CSICreds) { - creds = &CSICreds{ + *creds = CSICreds{ CreateVolumeSecret: "secretval1", DeleteVolumeSecret: "secretval2", ControllerPublishVolumeSecret: "secretval3", From 3adcf11b45770dc2a6ef6231b453733ae43641c5 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 4 Dec 2020 12:55:36 +0100 Subject: [PATCH 09/14] e2e storage: use embedded mock CSI driver This replaces embedding of JavaScript code into the mock driver that runs inside the cluster with Go callbacks which run inside the e2e.test suite itself. In contrast to the JavaScript hooks, they have direct access to all parameters and can fabricate arbitrary responses, not just error codes. Because the callbacks run in the same process as the test itself, it is possible to set up two-way communication via shared variables or channels. This opens the door for writing better tests. Some of the existing tests that poll mock driver output could be simplified, but that can be addressed later. For now, only tests using hooks use embedding. How gRPC calls are retrieved is abstracted behind the CSIMockTestDriver interface, so tests don't need to be modified when switching between embedding and remote mock driver. --- test/e2e/storage/csi_mock_volume.go | 264 ++++++------- .../csi-test/driver/driver-controller.go | 110 ------ .../drivers/csi-test/driver/driver-node.go | 109 ------ .../storage/drivers/csi-test/driver/driver.go | 32 +- .../storage/drivers/csi-test/driver/mock.go | 25 +- .../csi-test/mock/service/controller.go | 8 +- .../csi-test/mock/service/hooks-const.go | 24 -- .../drivers/csi-test/mock/service/node.go | 36 +- .../drivers/csi-test/mock/service/service.go | 113 ++---- test/e2e/storage/drivers/csi.go | 354 +++++++++++++++--- test/e2e/storage/drivers/proxy/io.go | 82 ++++ test/e2e/storage/drivers/proxy/portproxy.go | 344 +++++++++++++++++ .../storage-csi/mock/csi-mock-driver.yaml | 5 - .../storage-csi/mock/csi-mock-proxy.yaml | 105 ++++++ 14 files changed, 1027 insertions(+), 584 deletions(-) delete mode 100644 test/e2e/storage/drivers/csi-test/driver/driver-controller.go delete mode 100644 test/e2e/storage/drivers/csi-test/driver/driver-node.go delete mode 100644 test/e2e/storage/drivers/csi-test/mock/service/hooks-const.go create mode 100644 test/e2e/storage/drivers/proxy/io.go create mode 100644 test/e2e/storage/drivers/proxy/portproxy.go create mode 100644 test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 36f673f6fd4..7f38af408ec 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -19,15 +19,16 @@ package storage import ( "context" "crypto/sha256" - "encoding/json" "errors" "fmt" "math/rand" "strconv" "strings" + "sync/atomic" "time" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" storagev1alpha1 "k8s.io/api/storage/v1alpha1" @@ -75,13 +76,6 @@ const ( // How log to wait for kubelet to unstage a volume after a pod is deleted csiUnstageWaitTimeout = 1 * time.Minute - - // Name of CSI driver pod name (it's in a StatefulSet with a stable name) - driverPodName = "csi-mockplugin-0" - // Name of CSI driver container name - driverContainerName = "mock" - // Prefix of the mock driver grpc log - grpcCallPrefix = "gRPCCall:" ) // csiCall represents an expected call from Kubernetes to CSI mock driver and @@ -113,7 +107,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { // just disable resizing on driver it overrides enableResizing flag for CSI mock driver disableResizingOnDriver bool enableSnapshot bool - javascriptHooks map[string]string + hooks *drivers.Hooks tokenRequests []storagev1.TokenRequest requiresRepublish *bool fsGroupPolicy *storagev1.FSGroupPolicy @@ -127,7 +121,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { pvcs []*v1.PersistentVolumeClaim sc map[string]*storagev1.StorageClass vsc map[string]*unstructured.Unstructured - driver storageframework.TestDriver + driver drivers.MockCSITestDriver provisioner string tp testParameters } @@ -155,12 +149,29 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { EnableResizing: tp.enableResizing, EnableNodeExpansion: tp.enableNodeExpansion, EnableSnapshot: tp.enableSnapshot, - JavascriptHooks: tp.javascriptHooks, TokenRequests: tp.tokenRequests, RequiresRepublish: tp.requiresRepublish, FSGroupPolicy: tp.fsGroupPolicy, } + // At the moment, only tests which need hooks are + // using the embedded CSI mock driver. The rest run + // the driver inside the cluster although they could + // changed to use embedding merely by setting + // driverOpts.embedded to true. + // + // Not enabling it for all tests minimizes + // the risk that the introduction of embedded breaks + // some existings tests and avoids a dependency + // on port forwarding, which is important if some of + // these tests are supposed to become part of + // conformance testing (port forwarding isn't + // currently required). + if tp.hooks != nil { + driverOpts.Embedded = true + driverOpts.Hooks = *tp.hooks + } + // this just disable resizing on driver, keeping resizing on SC enabled. if tp.disableResizingOnDriver { driverOpts.EnableResizing = false @@ -188,10 +199,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { createPod := func(ephemeral bool) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) { ginkgo.By("Creating pod") - var sc *storagev1.StorageClass - if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok { - sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") - } + sc := m.driver.GetDynamicProvisionStorageClass(m.config, "") scTest := testsuites.StorageClassTest{ Name: m.driver.GetDriverInfo().Name, Timeouts: f.Timeouts, @@ -237,10 +245,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { createPodWithFSGroup := func(fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) { ginkgo.By("Creating pod with fsGroup") nodeSelection := m.config.ClientNodeSelection - var sc *storagev1.StorageClass - if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok { - sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") - } + sc := m.driver.GetDynamicProvisionStorageClass(m.config, "") scTest := testsuites.StorageClassTest{ Name: m.driver.GetDriverInfo().Name, Provisioner: sc.Provisioner, @@ -514,7 +519,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { framework.ExpectNoError(err, "while deleting") ginkgo.By("Checking CSI driver logs") - err = checkPodLogs(m.cs, m.config.DriverNamespace.Name, driverPodName, driverContainerName, pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled, false, 1) + err = checkPodLogs(m.driver.GetCalls, pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled, false, 1) framework.ExpectNoError(err) }) } @@ -727,19 +732,19 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { }) ginkgo.Context("CSI NodeStage error cases [Slow]", func() { - // Global variable in all scripts (called before each test) - globalScript := `counter=0; console.log("globals loaded", OK, INVALIDARGUMENT)` trackedCalls := []string{ "NodeStageVolume", "NodeUnstageVolume", } tests := []struct { - name string - expectPodRunning bool - expectedCalls []csiCall - nodeStageScript string - nodeUnstageScript string + name string + expectPodRunning bool + expectedCalls []csiCall + + // Called for each NodeStateVolume calls, with counter incremented atomically before + // the invocation (i.e. first value will be 1). + nodeStageHook func(counter int64) error }{ { // This is already tested elsewhere, adding simple good case here to test the test framework. @@ -749,7 +754,6 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { {expectedMethod: "NodeStageVolume", expectedError: codes.OK, deletePod: true}, {expectedMethod: "NodeUnstageVolume", expectedError: codes.OK}, }, - nodeStageScript: `OK;`, }, { // Kubelet should repeat NodeStage as long as the pod exists @@ -762,7 +766,12 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { {expectedMethod: "NodeUnstageVolume", expectedError: codes.OK}, }, // Fail first 3 NodeStage requests, 4th succeeds - nodeStageScript: `console.log("Counter:", ++counter); if (counter < 4) { INVALIDARGUMENT; } else { OK; }`, + nodeStageHook: func(counter int64) error { + if counter < 4 { + return status.Error(codes.InvalidArgument, "fake error") + } + return nil + }, }, { // Kubelet should repeat NodeStage as long as the pod exists @@ -775,7 +784,12 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { {expectedMethod: "NodeUnstageVolume", expectedError: codes.OK}, }, // Fail first 3 NodeStage requests, 4th succeeds - nodeStageScript: `console.log("Counter:", ++counter); if (counter < 4) { DEADLINEEXCEEDED; } else { OK; }`, + nodeStageHook: func(counter int64) error { + if counter < 4 { + return status.Error(codes.DeadlineExceeded, "fake error") + } + return nil + }, }, { // After NodeUnstage with ephemeral error, the driver may continue staging the volume. @@ -789,7 +803,9 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { {expectedMethod: "NodeStageVolume", expectedError: codes.DeadlineExceeded, deletePod: true}, {expectedMethod: "NodeUnstageVolume", expectedError: codes.OK}, }, - nodeStageScript: `DEADLINEEXCEEDED;`, + nodeStageHook: func(counter int64) error { + return status.Error(codes.DeadlineExceeded, "fake error") + }, }, { // After NodeUnstage with final error, kubelet can be sure the volume is not staged. @@ -801,21 +817,23 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { // This matches all repeated NodeStage calls with InvalidArgument error (due to exp. backoff). {expectedMethod: "NodeStageVolume", expectedError: codes.InvalidArgument, deletePod: true}, }, - nodeStageScript: `INVALIDARGUMENT;`, + // nodeStageScript: `INVALIDARGUMENT;`, + nodeStageHook: func(counter int64) error { + return status.Error(codes.InvalidArgument, "fake error") + }, }, } for _, t := range tests { test := t ginkgo.It(test.name, func() { - scripts := map[string]string{ - "globals": globalScript, - "nodeStageVolumeStart": test.nodeStageScript, - "nodeUnstageVolumeStart": test.nodeUnstageScript, + var hooks *drivers.Hooks + if test.nodeStageHook != nil { + hooks = createPreHook("NodeStageVolume", test.nodeStageHook) } init(testParameters{ - disableAttach: true, - registerDriver: true, - javascriptHooks: scripts, + disableAttach: true, + registerDriver: true, + hooks: hooks, }) defer cleanup() @@ -836,7 +854,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { framework.Failf("timed out waiting for the CSI call that indicates that the pod can be deleted: %v", test.expectedCalls) } time.Sleep(1 * time.Second) - _, index, err := compareCSICalls(trackedCalls, test.expectedCalls, m.cs, m.config.DriverNamespace.Name, driverPodName, driverContainerName) + _, index, err := compareCSICalls(trackedCalls, test.expectedCalls, m.driver.GetCalls) framework.ExpectNoError(err, "while waiting for initial CSI calls") if index == 0 { // No CSI call received yet @@ -860,7 +878,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { ginkgo.By("Waiting for all remaining expected CSI calls") err = wait.Poll(time.Second, csiUnstageWaitTimeout, func() (done bool, err error) { - _, index, err := compareCSICalls(trackedCalls, test.expectedCalls, m.cs, m.config.DriverNamespace.Name, driverPodName, driverContainerName) + _, index, err := compareCSICalls(trackedCalls, test.expectedCalls, m.driver.GetCalls) if err != nil { return true, err } @@ -946,11 +964,12 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } if test.resourceExhausted { - params.javascriptHooks = map[string]string{ - "globals": `counter=0; console.log("globals loaded", OK, INVALIDARGUMENT)`, - // Every second call returns RESOURCEEXHAUSTED, starting with the first one. - "createVolumeStart": `console.log("Counter:", ++counter); if (counter % 2) { RESOURCEEXHAUSTED; } else { OK; }`, - } + params.hooks = createPreHook("CreateVolume", func(counter int64) error { + if counter%2 != 0 { + return status.Error(codes.ResourceExhausted, "fake error") + } + return nil + }) } init(params) @@ -1006,9 +1025,9 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { expected = append(expected, normal...) } - var calls []mockCSICall + var calls []drivers.MockCSICall err = wait.PollImmediateUntil(time.Second, func() (done bool, err error) { - c, index, err := compareCSICalls(deterministicCalls, expected, m.cs, m.config.DriverNamespace.Name, driverPodName, driverContainerName) + c, index, err := compareCSICalls(deterministicCalls, expected, m.driver.GetCalls) if err != nil { return true, fmt.Errorf("error waiting for expected CSI calls: %s", err) } @@ -1221,31 +1240,32 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { }) ginkgo.Context("CSI Volume Snapshots [Feature:VolumeSnapshotDataSource]", func() { - // Global variable in all scripts (called before each test) - globalScript := `counter=0; console.log("globals loaded", OK, DEADLINEEXCEEDED)` tests := []struct { - name string - createVolumeScript string - createSnapshotScript string + name string + createSnapshotHook func(counter int64) error }{ { - name: "volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists", - createVolumeScript: `OK`, - createSnapshotScript: `console.log("Counter:", ++counter); if (counter < 8) { DEADLINEEXCEEDED; } else { OK; }`, + name: "volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists", + createSnapshotHook: func(counter int64) error { + if counter < 8 { + return status.Error(codes.DeadlineExceeded, "fake error") + } + return nil + }, }, } for _, test := range tests { + test := test ginkgo.It(test.name, func() { - scripts := map[string]string{ - "globals": globalScript, - "createVolumeStart": test.createVolumeScript, - "createSnapshotStart": test.createSnapshotScript, + var hooks *drivers.Hooks + if test.createSnapshotHook != nil { + hooks = createPreHook("CreateSnapshot", test.createSnapshotHook) } init(testParameters{ - disableAttach: true, - registerDriver: true, - enableSnapshot: true, - javascriptHooks: scripts, + disableAttach: true, + registerDriver: true, + enableSnapshot: true, + hooks: hooks, }) sDriver, ok := m.driver.(storageframework.SnapshottableTestDriver) if !ok { @@ -1256,10 +1276,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { defer cancel() defer cleanup() - var sc *storagev1.StorageClass - if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok { - sc = dDriver.GetDynamicProvisionStorageClass(m.config, "") - } + sc := m.driver.GetDynamicProvisionStorageClass(m.config, "") ginkgo.By("Creating storage class") class, err := m.cs.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create class: %v", err) @@ -1402,7 +1419,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { framework.ExpectNoError(err, "while deleting") ginkgo.By("Checking CSI driver logs") - err = checkPodLogs(m.cs, m.config.DriverNamespace.Name, driverPodName, driverContainerName, pod, false, false, false, test.deployCSIDriverObject && csiServiceAccountTokenEnabled, numNodePublishVolume) + err = checkPodLogs(m.driver.GetCalls, pod, false, false, false, test.deployCSIDriverObject && csiServiceAccountTokenEnabled, numNodePublishVolume) framework.ExpectNoError(err) }) } @@ -1507,33 +1524,30 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { annotations interface{} ) - // Global variable in all scripts (called before each test) - globalScript := `counter=0; console.log("globals loaded", OK, DEADLINEEXCEEDED)` tests := []struct { - name string - createVolumeScript string - createSnapshotScript string + name string + createSnapshotHook func(counter int64) error }{ { // volume snapshot should be created using secrets successfully even if there is a failure in the first few attempts, - name: "volume snapshot create/delete with secrets", - createVolumeScript: `OK`, + name: "volume snapshot create/delete with secrets", // Fail the first 8 calls to create snapshot and succeed the 9th call. - createSnapshotScript: `console.log("Counter:", ++counter); if (counter < 8) { DEADLINEEXCEEDED; } else { OK; }`, + createSnapshotHook: func(counter int64) error { + if counter < 8 { + return status.Error(codes.DeadlineExceeded, "fake error") + } + return nil + }, }, } for _, test := range tests { ginkgo.It(test.name, func() { - scripts := map[string]string{ - "globals": globalScript, - "createVolumeStart": test.createVolumeScript, - "createSnapshotStart": test.createSnapshotScript, - } + hooks := createPreHook("CreateSnapshot", test.createSnapshotHook) init(testParameters{ - disableAttach: true, - registerDriver: true, - enableSnapshot: true, - javascriptHooks: scripts, + disableAttach: true, + registerDriver: true, + enableSnapshot: true, + hooks: hooks, }) sDriver, ok := m.driver.(storageframework.SnapshottableTestDriver) @@ -1895,24 +1909,9 @@ func startBusyBoxPodWithVolumeSource(cs clientset.Interface, volumeSource v1.Vol return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) } -// Dummy structure that parses just volume_attributes and error code out of logged CSI call -type mockCSICall struct { - json string // full log entry - - Method string - Request struct { - VolumeContext map[string]string `json:"volume_context"` - } - FullError struct { - Code codes.Code `json:"code"` - Message string `json:"message"` - } - Error string -} - // checkPodLogs tests that NodePublish was called with expected volume_context and (for ephemeral inline volumes) // has the matching NodeUnpublish -func checkPodLogs(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled, csiServiceAccountTokenEnabled bool, expectedNumNodePublish int) error { +func checkPodLogs(getCalls func() ([]drivers.MockCSICall, error), pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled, csiServiceAccountTokenEnabled bool, expectedNumNodePublish int) error { expectedAttributes := map[string]string{} if expectPodInfo { expectedAttributes["csi.storage.k8s.io/pod.name"] = pod.Name @@ -1934,10 +1933,11 @@ func checkPodLogs(cs clientset.Interface, namespace, driverPodName, driverContai foundAttributes := sets.NewString() numNodePublishVolume := 0 numNodeUnpublishVolume := 0 - calls, err := parseMockLogs(cs, namespace, driverPodName, driverContainerName) + calls, err := getCalls() if err != nil { return err } + for _, call := range calls { switch call.Method { case "NodePublishVolume": @@ -1970,39 +1970,6 @@ func checkPodLogs(cs clientset.Interface, namespace, driverPodName, driverContai return nil } -func parseMockLogs(cs clientset.Interface, namespace, driverPodName, driverContainerName string) ([]mockCSICall, error) { - // Load logs of driver pod - log, err := e2epod.GetPodLogs(cs, namespace, driverPodName, driverContainerName) - if err != nil { - return nil, fmt.Errorf("could not load CSI driver logs: %s", err) - } - - logLines := strings.Split(log, "\n") - var calls []mockCSICall - for _, line := range logLines { - index := strings.Index(line, grpcCallPrefix) - if index == -1 { - continue - } - line = line[index+len(grpcCallPrefix):] - call := mockCSICall{ - json: string(line), - } - err := json.Unmarshal([]byte(line), &call) - if err != nil { - framework.Logf("Could not parse CSI driver log line %q: %s", line, err) - continue - } - - // Trim gRPC service name, i.e. "/csi.v1.Identity/Probe" -> "Probe" - methodParts := strings.Split(call.Method, "/") - call.Method = methodParts[len(methodParts)-1] - - calls = append(calls, call) - } - return calls, nil -} - // compareCSICalls compares expectedCalls with logs of the mock driver. // It returns index of the first expectedCall that was *not* received // yet or error when calls do not match. @@ -2011,8 +1978,8 @@ func parseMockLogs(cs clientset.Interface, namespace, driverPodName, driverConta // // Only permanent errors are returned. Other errors are logged and no // calls are returned. The caller is expected to retry. -func compareCSICalls(trackedCalls []string, expectedCallSequence []csiCall, cs clientset.Interface, namespace, driverPodName, driverContainerName string) ([]mockCSICall, int, error) { - allCalls, err := parseMockLogs(cs, namespace, driverPodName, driverContainerName) +func compareCSICalls(trackedCalls []string, expectedCallSequence []csiCall, getCalls func() ([]drivers.MockCSICall, error)) ([]drivers.MockCSICall, int, error) { + allCalls, err := getCalls() if err != nil { framework.Logf("intermittent (?) log retrieval error, proceeding without output: %v", err) return nil, 0, nil @@ -2020,8 +1987,8 @@ func compareCSICalls(trackedCalls []string, expectedCallSequence []csiCall, cs c // Remove all repeated and ignored calls tracked := sets.NewString(trackedCalls...) - var calls []mockCSICall - var last mockCSICall + var calls []drivers.MockCSICall + var last drivers.MockCSICall for _, c := range allCalls { if !tracked.Has(c.Method) { continue @@ -2145,3 +2112,20 @@ func checkDeleteSnapshotSecrets(cs clientset.Interface, annotations interface{}) return err } + +// createPreHook counts invocations of a certain method (identified by a substring in the full gRPC method name). +func createPreHook(method string, callback func(counter int64) error) *drivers.Hooks { + var counter int64 + + return &drivers.Hooks{ + Pre: func() func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) { + return func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) { + if strings.Contains(fullMethod, method) { + counter := atomic.AddInt64(&counter, 1) + return nil, callback(counter) + } + return nil, nil + } + }(), + } +} diff --git a/test/e2e/storage/drivers/csi-test/driver/driver-controller.go b/test/e2e/storage/drivers/csi-test/driver/driver-controller.go deleted file mode 100644 index 1d8d2bd771e..00000000000 --- a/test/e2e/storage/drivers/csi-test/driver/driver-controller.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2019 Kubernetes Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package driver - -import ( - "context" - "net" - "sync" - - "google.golang.org/grpc/reflection" - - csi "github.com/container-storage-interface/spec/lib/go/csi" - "google.golang.org/grpc" -) - -// CSIDriverControllerServer is the Controller service component of the driver. -type CSIDriverControllerServer struct { - Controller csi.ControllerServer - Identity csi.IdentityServer -} - -// CSIDriverController is the CSI Driver Controller backend. -type CSIDriverController struct { - listener net.Listener - server *grpc.Server - controllerServer *CSIDriverControllerServer - wg sync.WaitGroup - running bool - lock sync.Mutex - creds *CSICreds -} - -func NewCSIDriverController(controllerServer *CSIDriverControllerServer) *CSIDriverController { - return &CSIDriverController{ - controllerServer: controllerServer, - } -} - -func (c *CSIDriverController) goServe(started chan<- bool) { - goServe(c.server, &c.wg, c.listener, started) -} - -func (c *CSIDriverController) Address() string { - return c.listener.Addr().String() -} - -func (c *CSIDriverController) Start(l net.Listener) error { - c.lock.Lock() - defer c.lock.Unlock() - - // Set listener. - c.listener = l - - // Create a new grpc server. - c.server = grpc.NewServer( - grpc.UnaryInterceptor(c.callInterceptor), - ) - - if c.controllerServer.Controller != nil { - csi.RegisterControllerServer(c.server, c.controllerServer.Controller) - } - if c.controllerServer.Identity != nil { - csi.RegisterIdentityServer(c.server, c.controllerServer.Identity) - } - - reflection.Register(c.server) - - waitForServer := make(chan bool) - c.goServe(waitForServer) - <-waitForServer - c.running = true - return nil -} - -func (c *CSIDriverController) Stop() { - stop(&c.lock, &c.wg, c.server, c.running) -} - -func (c *CSIDriverController) Close() { - c.server.Stop() -} - -func (c *CSIDriverController) IsRunning() bool { - c.lock.Lock() - defer c.lock.Unlock() - - return c.running -} - -func (c *CSIDriverController) SetDefaultCreds() { - setDefaultCreds(c.creds) -} - -func (c *CSIDriverController) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - return callInterceptor(ctx, c.creds, req, info, handler) -} diff --git a/test/e2e/storage/drivers/csi-test/driver/driver-node.go b/test/e2e/storage/drivers/csi-test/driver/driver-node.go deleted file mode 100644 index 7720bfc493a..00000000000 --- a/test/e2e/storage/drivers/csi-test/driver/driver-node.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2019 Kubernetes Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package driver - -import ( - context "context" - "net" - "sync" - - csi "github.com/container-storage-interface/spec/lib/go/csi" - "google.golang.org/grpc" - "google.golang.org/grpc/reflection" -) - -// CSIDriverNodeServer is the Node service component of the driver. -type CSIDriverNodeServer struct { - Node csi.NodeServer - Identity csi.IdentityServer -} - -// CSIDriverNode is the CSI Driver Node backend. -type CSIDriverNode struct { - listener net.Listener - server *grpc.Server - nodeServer *CSIDriverNodeServer - wg sync.WaitGroup - running bool - lock sync.Mutex - creds *CSICreds -} - -func NewCSIDriverNode(nodeServer *CSIDriverNodeServer) *CSIDriverNode { - return &CSIDriverNode{ - nodeServer: nodeServer, - } -} - -func (c *CSIDriverNode) goServe(started chan<- bool) { - goServe(c.server, &c.wg, c.listener, started) -} - -func (c *CSIDriverNode) Address() string { - return c.listener.Addr().String() -} - -func (c *CSIDriverNode) Start(l net.Listener) error { - c.lock.Lock() - defer c.lock.Unlock() - - // Set listener. - c.listener = l - - // Create a new grpc server. - c.server = grpc.NewServer( - grpc.UnaryInterceptor(c.callInterceptor), - ) - - if c.nodeServer.Node != nil { - csi.RegisterNodeServer(c.server, c.nodeServer.Node) - } - if c.nodeServer.Identity != nil { - csi.RegisterIdentityServer(c.server, c.nodeServer.Identity) - } - - reflection.Register(c.server) - - waitForServer := make(chan bool) - c.goServe(waitForServer) - <-waitForServer - c.running = true - return nil -} - -func (c *CSIDriverNode) Stop() { - stop(&c.lock, &c.wg, c.server, c.running) -} - -func (c *CSIDriverNode) Close() { - c.server.Stop() -} - -func (c *CSIDriverNode) IsRunning() bool { - c.lock.Lock() - defer c.lock.Unlock() - - return c.running -} - -func (c *CSIDriverNode) SetDefaultCreds() { - setDefaultCreds(c.creds) -} - -func (c *CSIDriverNode) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - return callInterceptor(ctx, c.creds, req, info, handler) -} diff --git a/test/e2e/storage/drivers/csi-test/driver/driver.go b/test/e2e/storage/drivers/csi-test/driver/driver.go index 0a61ae7c48e..ceddd7174ec 100644 --- a/test/e2e/storage/drivers/csi-test/driver/driver.go +++ b/test/e2e/storage/drivers/csi-test/driver/driver.go @@ -27,11 +27,10 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "k8s.io/klog" + "k8s.io/klog/v2" "github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc" - "google.golang.org/grpc/reflection" ) var ( @@ -75,8 +74,11 @@ type CSIDriver struct { running bool lock sync.Mutex creds *CSICreds + logGRPC LogGRPC } +type LogGRPC func(method string, request, reply interface{}, err error) + func NewCSIDriver(servers *CSIDriverServers) *CSIDriver { return &CSIDriver{ servers: servers, @@ -90,7 +92,12 @@ func (c *CSIDriver) goServe(started chan<- bool) { func (c *CSIDriver) Address() string { return c.listener.Addr().String() } -func (c *CSIDriver) Start(l net.Listener) error { + +// Start runs a gRPC server with all enabled services. If an interceptor +// is give, then it will be used. Otherwise, an interceptor which +// handles simple credential checks and logs gRPC calls in JSON format +// will be used. +func (c *CSIDriver) Start(l net.Listener, interceptor grpc.UnaryServerInterceptor) error { c.lock.Lock() defer c.lock.Unlock() @@ -98,9 +105,10 @@ func (c *CSIDriver) Start(l net.Listener) error { c.listener = l // Create a new grpc server - c.server = grpc.NewServer( - grpc.UnaryInterceptor(c.callInterceptor), - ) + if interceptor == nil { + interceptor = c.callInterceptor + } + c.server = grpc.NewServer(grpc.UnaryInterceptor(interceptor)) // Register Mock servers if c.servers.Controller != nil { @@ -112,7 +120,6 @@ func (c *CSIDriver) Start(l net.Listener) error { if c.servers.Node != nil { csi.RegisterNodeServer(c.server, c.servers.Node) } - reflection.Register(c.server) // Start listening for requests waitForServer := make(chan bool) @@ -142,10 +149,6 @@ func (c *CSIDriver) SetDefaultCreds() { setDefaultCreds(c.creds) } -func (c *CSIDriver) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - return callInterceptor(ctx, c.creds, req, info, handler) -} - // goServe starts a grpc server. func goServe(server *grpc.Server, wg *sync.WaitGroup, listener net.Listener, started chan<- bool) { wg.Add(1) @@ -187,14 +190,17 @@ func setDefaultCreds(creds *CSICreds) { } } -func callInterceptor(ctx context.Context, creds *CSICreds, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - err := authInterceptor(creds, req) +func (c *CSIDriver) callInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + err := authInterceptor(c.creds, req) if err != nil { logGRPC(info.FullMethod, req, nil, err) return nil, err } rsp, err := handler(ctx, req) logGRPC(info.FullMethod, req, rsp, err) + if c.logGRPC != nil { + c.logGRPC(info.FullMethod, req, rsp, err) + } return rsp, err } diff --git a/test/e2e/storage/drivers/csi-test/driver/mock.go b/test/e2e/storage/drivers/csi-test/driver/mock.go index 6b8a08c26f7..c6560f99ed3 100644 --- a/test/e2e/storage/drivers/csi-test/driver/mock.go +++ b/test/e2e/storage/drivers/csi-test/driver/mock.go @@ -19,7 +19,6 @@ package driver import ( "net" - "github.com/kubernetes-csi/csi-test/v4/utils" "google.golang.org/grpc" ) @@ -31,10 +30,11 @@ type MockCSIDriverServers struct { type MockCSIDriver struct { CSIDriver - conn *grpc.ClientConn + conn *grpc.ClientConn + interceptor grpc.UnaryServerInterceptor } -func NewMockCSIDriver(servers *MockCSIDriverServers) *MockCSIDriver { +func NewMockCSIDriver(servers *MockCSIDriverServers, interceptor grpc.UnaryServerInterceptor) *MockCSIDriver { return &MockCSIDriver{ CSIDriver: CSIDriver{ servers: &CSIDriverServers{ @@ -43,6 +43,7 @@ func NewMockCSIDriver(servers *MockCSIDriverServers) *MockCSIDriver { Identity: servers.Identity, }, }, + interceptor: interceptor, } } @@ -53,7 +54,7 @@ func (m *MockCSIDriver) StartOnAddress(network, address string) error { return err } - if err := m.CSIDriver.Start(l); err != nil { + if err := m.CSIDriver.Start(l, m.interceptor); err != nil { l.Close() return err } @@ -67,22 +68,6 @@ func (m *MockCSIDriver) Start() error { return m.StartOnAddress("tcp", "127.0.0.1:0") } -func (m *MockCSIDriver) Nexus() (*grpc.ClientConn, error) { - // Start server - err := m.Start() - if err != nil { - return nil, err - } - - // Create a client connection - m.conn, err = utils.Connect(m.Address(), grpc.WithInsecure()) - if err != nil { - return nil, err - } - - return m.conn, nil -} - func (m *MockCSIDriver) Close() { m.conn.Close() m.server.Stop() diff --git a/test/e2e/storage/drivers/csi-test/mock/service/controller.go b/test/e2e/storage/drivers/csi-test/mock/service/controller.go index fb38d8d02e9..7e8c1abad1f 100644 --- a/test/e2e/storage/drivers/csi-test/mock/service/controller.go +++ b/test/e2e/storage/drivers/csi-test/mock/service/controller.go @@ -46,9 +46,7 @@ func (s *service) CreateVolume( if req.VolumeCapabilities == nil { return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty") } - if hookVal, hookMsg := s.execHook("CreateVolumeStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) - } + // Check to see if the volume already exists. if i, v := s.findVolByName(ctx, req.Name); i >= 0 { // Requested volume name already exists, need to check if the existing volume's @@ -610,10 +608,6 @@ func (s *service) CreateSnapshot(ctx context.Context, return nil, status.Error(codes.InvalidArgument, "Snapshot SourceVolumeId cannot be empty") } - if hookVal, hookMsg := s.execHook("CreateSnapshotStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) - } - // Check to see if the snapshot already exists. if i, v := s.snapshots.FindSnapshot("name", req.GetName()); i >= 0 { // Requested snapshot name already exists diff --git a/test/e2e/storage/drivers/csi-test/mock/service/hooks-const.go b/test/e2e/storage/drivers/csi-test/mock/service/hooks-const.go deleted file mode 100644 index 46eed6af7ca..00000000000 --- a/test/e2e/storage/drivers/csi-test/mock/service/hooks-const.go +++ /dev/null @@ -1,24 +0,0 @@ -package service - -// Predefinded constants for the JavaScript hooks, they must correspond to the -// error codes used by gRPC, see: -// https://github.com/grpc/grpc-go/blob/master/codes/codes.go -const ( - grpcJSCodes string = `OK = 0; - CANCELED = 1; - UNKNOWN = 2; - INVALIDARGUMENT = 3; - DEADLINEEXCEEDED = 4; - NOTFOUND = 5; - ALREADYEXISTS = 6; - PERMISSIONDENIED = 7; - RESOURCEEXHAUSTED = 8; - FAILEDPRECONDITION = 9; - ABORTED = 10; - OUTOFRANGE = 11; - UNIMPLEMENTED = 12; - INTERNAL = 13; - UNAVAILABLE = 14; - DATALOSS = 15; - UNAUTHENTICATED = 16` -) diff --git a/test/e2e/storage/drivers/csi-test/mock/service/node.go b/test/e2e/storage/drivers/csi-test/mock/service/node.go index e79d2561a34..ddffea048af 100644 --- a/test/e2e/storage/drivers/csi-test/mock/service/node.go +++ b/test/e2e/storage/drivers/csi-test/mock/service/node.go @@ -18,7 +18,6 @@ package service import ( "fmt" - "os" "path" "strconv" @@ -35,10 +34,6 @@ func (s *service) NodeStageVolume( req *csi.NodeStageVolumeRequest) ( *csi.NodeStageVolumeResponse, error) { - if hookVal, hookMsg := s.execHook("NodeStageVolumeStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) - } - device, ok := req.PublishContext["device"] if !ok { if s.config.DisableAttach { @@ -62,7 +57,7 @@ func (s *service) NodeStageVolume( return nil, status.Error(codes.InvalidArgument, "Volume Capability cannot be empty") } - exists, err := checkTargetExists(req.StagingTargetPath) + exists, err := s.config.IO.DirExists(req.StagingTargetPath) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -113,10 +108,6 @@ func (s *service) NodeUnstageVolume( return nil, status.Error(codes.InvalidArgument, "Staging Target Path cannot be empty") } - if hookVal, hookMsg := s.execHook("NodeUnstageVolumeStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) - } - s.volsRWL.Lock() defer s.volsRWL.Unlock() @@ -178,7 +169,7 @@ func (s *service) NodePublishVolume( // May happen with old (or, at this time, even the current) Kubernetes // although it shouldn't (https://github.com/kubernetes/kubernetes/issues/75535). - exists, err := checkTargetExists(req.TargetPath) + exists, err := s.config.IO.DirExists(req.TargetPath) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } @@ -220,13 +211,13 @@ func (s *service) NodePublishVolume( } } else { if req.GetTargetPath() != "" { - exists, err := checkTargetExists(req.GetTargetPath()) + exists, err := s.config.IO.DirExists(req.GetTargetPath()) if err != nil { return nil, status.Error(codes.Internal, err.Error()) } if !exists { // If target path does not exist we need to create the directory where volume will be staged - if err = os.Mkdir(req.TargetPath, os.FileMode(0755)); err != nil { + if err = s.config.IO.Mkdir(req.TargetPath); err != nil { msg := fmt.Sprintf("NodePublishVolume: could not create target dir %q: %v", req.TargetPath, err) return nil, status.Error(codes.Internal, msg) } @@ -281,7 +272,7 @@ func (s *service) NodeUnpublishVolume( } // Delete any created paths - err := os.RemoveAll(v.VolumeContext[nodeMntPathKey]) + err := s.config.IO.RemoveAll(v.VolumeContext[nodeMntPathKey]) if err != nil { return nil, status.Errorf(codes.Internal, "Unable to delete previously created target directory") } @@ -415,10 +406,6 @@ func (s *service) NodeGetInfo(ctx context.Context, func (s *service) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVolumeStatsRequest) (*csi.NodeGetVolumeStatsResponse, error) { - if hookVal, hookMsg := s.execHook("NodeGetVolumeStatsStart"); hookVal != codes.OK { - return nil, status.Errorf(hookVal, hookMsg) - } - resp := &csi.NodeGetVolumeStatsResponse{ VolumeCondition: &csi.VolumeCondition{}, } @@ -461,16 +448,3 @@ func (s *service) NodeGetVolumeStats(ctx context.Context, return resp, nil } - -// checkTargetExists checks if a given path exists. -func checkTargetExists(targetPath string) (bool, error) { - _, err := os.Stat(targetPath) - switch { - case err == nil: - return true, nil - case os.IsNotExist(err): - return false, nil - default: - return false, err - } -} diff --git a/test/e2e/storage/drivers/csi-test/mock/service/service.go b/test/e2e/storage/drivers/csi-test/mock/service/service.go index 246bb374364..93edbf300ed 100644 --- a/test/e2e/storage/drivers/csi-test/mock/service/service.go +++ b/test/e2e/storage/drivers/csi-test/mock/service/service.go @@ -18,21 +18,17 @@ package service import ( "fmt" - "reflect" + "os" "strings" "sync" "sync/atomic" - "k8s.io/klog" - "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/kubernetes-csi/csi-test/v4/mock/cache" "golang.org/x/net/context" "google.golang.org/grpc/codes" + "k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/cache" "github.com/golang/protobuf/ptypes" - - "github.com/robertkrimen/otto" ) const ( @@ -51,47 +47,7 @@ const ( // Manifest is the SP's manifest. var Manifest = map[string]string{ - "url": "https://github.com/kubernetes-csi/csi-test/mock", -} - -// JavaScript hooks to be run to perform various tests -type Hooks struct { - Globals string `yaml:"globals"` // will be executed once before all other scripts - CreateVolumeStart string `yaml:"createVolumeStart"` - CreateVolumeEnd string `yaml:"createVolumeEnd"` - DeleteVolumeStart string `yaml:"deleteVolumeStart"` - DeleteVolumeEnd string `yaml:"deleteVolumeEnd"` - ControllerPublishVolumeStart string `yaml:"controllerPublishVolumeStart"` - ControllerPublishVolumeEnd string `yaml:"controllerPublishVolumeEnd"` - ControllerUnpublishVolumeStart string `yaml:"controllerUnpublishVolumeStart"` - ControllerUnpublishVolumeEnd string `yaml:"controllerUnpublishVolumeEnd"` - ValidateVolumeCapabilities string `yaml:"validateVolumeCapabilities"` - ListVolumesStart string `yaml:"listVolumesStart"` - ListVolumesEnd string `yaml:"listVolumesEnd"` - GetCapacity string `yaml:"getCapacity"` - ControllerGetCapabilitiesStart string `yaml:"controllerGetCapabilitiesStart"` - ControllerGetCapabilitiesEnd string `yaml:"controllerGetCapabilitiesEnd"` - CreateSnapshotStart string `yaml:"createSnapshotStart"` - CreateSnapshotEnd string `yaml:"createSnapshotEnd"` - DeleteSnapshotStart string `yaml:"deleteSnapshotStart"` - DeleteSnapshotEnd string `yaml:"deleteSnapshotEnd"` - ListSnapshots string `yaml:"listSnapshots"` - ControllerExpandVolumeStart string `yaml:"controllerExpandVolumeStart"` - ControllerExpandVolumeEnd string `yaml:"controllerExpandVolumeEnd"` - NodeStageVolumeStart string `yaml:"nodeStageVolumeStart"` - NodeStageVolumeEnd string `yaml:"nodeStageVolumeEnd"` - NodeUnstageVolumeStart string `yaml:"nodeUnstageVolumeStart"` - NodeUnstageVolumeEnd string `yaml:"nodeUnstageVolumeEnd"` - NodePublishVolumeStart string `yaml:"nodePublishVolumeStart"` - NodePublishVolumeEnd string `yaml:"nodePublishVolumeEnd"` - NodeUnpublishVolumeStart string `yaml:"nodeUnpublishVolumeStart"` - NodeUnpublishVolumeEnd string `yaml:"nodeUnpublishVolumeEnd"` - NodeExpandVolumeStart string `yaml:"nodeExpandVolumeStart"` - NodeExpandVolumeEnd string `yaml:"nodeExpandVolumeEnd"` - NodeGetCapabilities string `yaml:"nodeGetCapabilities"` - NodeGetInfo string `yaml:"nodeGetInfo"` - NodeGetVolumeStatsStart string `yaml:"nodeGetVolumeStatsStart"` - NodeGetVolumeStatsEnd string `yaml:"nodeGetVolumeStatsEnd"` + "url": "https://k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock", } type Config struct { @@ -103,7 +59,41 @@ type Config struct { DisableOnlineExpansion bool PermissiveTargetPath bool EnableTopology bool - ExecHooks *Hooks + IO DirIO +} + +// DirIO is an abstraction over direct os calls. +type DirIO interface { + // DirExists returns false if the path doesn't exist, true if it exists and is a directory, an error otherwise. + DirExists(path string) (bool, error) + // Mkdir creates the directory, but not its parents, with 0755 permissions. + Mkdir(path string) error + // RemoveAll removes the path and everything contained inside it. It's not an error if the path does not exist. + RemoveAll(path string) error +} + +type OSDirIO struct{} + +func (o OSDirIO) DirExists(path string) (bool, error) { + info, err := os.Stat(path) + switch { + case err == nil && !info.IsDir(): + return false, fmt.Errorf("%s: not a directory", path) + case err == nil: + return true, nil + case os.IsNotExist(err): + return false, nil + default: + return false, err + } +} + +func (o OSDirIO) Mkdir(path string) error { + return os.Mkdir(path, os.FileMode(0755)) +} + +func (o OSDirIO) RemoveAll(path string) error { + return os.RemoveAll(path) } // Service is the CSI Mock service provider. @@ -122,7 +112,6 @@ type service struct { snapshots cache.SnapshotCache snapshotsNID uint64 config Config - hooksVm *otto.Otto } type Volume struct { @@ -144,13 +133,8 @@ func New(config Config) Service { nodeID: config.DriverName, config: config, } - if config.ExecHooks != nil { - s.hooksVm = otto.New() - s.hooksVm.Run(grpcJSCodes) // set global variables with gRPC error codes - _, err := s.hooksVm.Run(s.config.ExecHooks.Globals) - if err != nil { - klog.Exitf("Error encountered in the global exec hook: %v. Exiting\n", err) - } + if s.config.IO == nil { + s.config.IO = OSDirIO{} } s.snapshots = cache.NewSnapshotCache() s.vols = []csi.Volume{ @@ -288,22 +272,5 @@ func (s *service) getAttachCount(devPathKey string) int64 { } func (s *service) execHook(hookName string) (codes.Code, string) { - if s.hooksVm != nil { - script := reflect.ValueOf(*s.config.ExecHooks).FieldByName(hookName).String() - if len(script) > 0 { - result, err := s.hooksVm.Run(script) - if err != nil { - klog.Exitf("Exec hook %s error: %v; exiting\n", hookName, err) - } - rv, err := result.ToInteger() - if err == nil { - // Function returned an integer, use it - return codes.Code(rv), fmt.Sprintf("Exec hook %s returned non-OK code", hookName) - } else { - // Function returned non-integer data type, discard it - return codes.OK, "" - } - } - } return codes.OK, "" } diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index f6c2c253611..17c61ababa1 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -37,13 +37,16 @@ package drivers import ( "context" + "encoding/json" + "errors" "fmt" "strconv" + "strings" + "sync" "time" - "gopkg.in/yaml.v2" - "github.com/onsi/ginkgo" + "google.golang.org/grpc/codes" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -51,14 +54,20 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" + mockdriver "k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/driver" + mockservice "k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service" + "k8s.io/kubernetes/test/e2e/storage/drivers/proxy" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" "k8s.io/kubernetes/test/e2e/storage/utils" + + "google.golang.org/grpc" ) const ( @@ -232,10 +241,42 @@ type mockCSIDriver struct { attachLimit int enableTopology bool enableNodeExpansion bool - javascriptHooks map[string]string + hooks Hooks tokenRequests []storagev1.TokenRequest requiresRepublish *bool fsGroupPolicy *storagev1.FSGroupPolicy + embedded bool + calls MockCSICalls + embeddedCSIDriver *mockdriver.CSIDriver + + // Additional values set during PrepareTest + clientSet kubernetes.Interface + driverNamespace *v1.Namespace +} + +// Hooks to be run to execute while handling gRPC calls. +// +// At the moment, only generic pre- and post-function call +// hooks are implemented. Those hooks can cast the request and +// response values if needed. More hooks inside specific +// functions could be added if needed. +type Hooks struct { + // Pre is called before invoking the mock driver's implementation of a method. + // If either a non-nil reply or error are returned, then those are returned to the caller. + Pre func(ctx context.Context, method string, request interface{}) (reply interface{}, err error) + + // Post is called after invoking the mock driver's implementation of a method. + // What it returns is used as actual result. + Post func(ctx context.Context, method string, request, reply interface{}, err error) (finalReply interface{}, finalErr error) +} + +// MockCSITestDriver provides additional functions specific to the CSI mock driver. +type MockCSITestDriver interface { + storageframework.DynamicPVTestDriver + + // GetCalls returns all currently observed gRPC calls. Only valid + // after PrepareTest. + GetCalls() ([]MockCSICall, error) } // CSIMockDriverOpts defines options used for csi driver @@ -249,10 +290,94 @@ type CSIMockDriverOpts struct { EnableResizing bool EnableNodeExpansion bool EnableSnapshot bool - JavascriptHooks map[string]string TokenRequests []storagev1.TokenRequest RequiresRepublish *bool FSGroupPolicy *storagev1.FSGroupPolicy + + // Embedded defines whether the CSI mock driver runs + // inside the cluster (false, the default) or just a proxy + // runs inside the cluster and all gRPC calls are handled + // inside the e2e.test binary. + Embedded bool + + // Hooks that will be called if (and only if!) the embedded + // mock driver is used. Beware that hooks are invoked + // asynchronously in different goroutines. + Hooks Hooks +} + +// Dummy structure that parses just volume_attributes and error code out of logged CSI call +type MockCSICall struct { + json string // full log entry + + Method string + Request struct { + VolumeContext map[string]string `json:"volume_context"` + } + FullError struct { + Code codes.Code `json:"code"` + Message string `json:"message"` + } + Error string +} + +// MockCSICalls is a Thread-safe storage for MockCSICall instances. +type MockCSICalls struct { + calls []MockCSICall + mutex sync.Mutex +} + +// Get returns all currently recorded calls. +func (c *MockCSICalls) Get() []MockCSICall { + c.mutex.Lock() + defer c.mutex.Unlock() + + return c.calls[:] +} + +// Add appens one new call at the end. +func (c *MockCSICalls) Add(call MockCSICall) { + c.mutex.Lock() + defer c.mutex.Unlock() + + c.calls = append(c.calls, call) +} + +// LogGRPC takes individual parameters from the mock CSI driver and adds them. +func (c *MockCSICalls) LogGRPC(method string, request, reply interface{}, err error) { + // Encoding to JSON and decoding mirrors the traditional way of capturing calls. + // Probably could be simplified now... + logMessage := struct { + Method string + Request interface{} + Response interface{} + // Error as string, for backward compatibility. + // "" on no error. + Error string + // Full error dump, to be able to parse out full gRPC error code and message separately in a test. + FullError error + }{ + Method: method, + Request: request, + Response: reply, + FullError: err, + } + + if err != nil { + logMessage.Error = err.Error() + } + + msg, _ := json.Marshal(logMessage) + call := MockCSICall{ + json: string(msg), + } + json.Unmarshal(msg, &call) + + // Trim gRPC service name, i.e. "/csi.v1.Identity/Probe" -> "Probe" + methodParts := strings.Split(call.Method, "/") + call.Method = methodParts[len(methodParts)-1] + + c.Add(call) } var _ storageframework.TestDriver = &mockCSIDriver{} @@ -260,7 +385,7 @@ var _ storageframework.DynamicPVTestDriver = &mockCSIDriver{} var _ storageframework.SnapshottableTestDriver = &mockCSIDriver{} // InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface -func InitMockCSIDriver(driverOpts CSIMockDriverOpts) storageframework.TestDriver { +func InitMockCSIDriver(driverOpts CSIMockDriverOpts) MockCSITestDriver { driverManifests := []string{ "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml", @@ -268,7 +393,11 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) storageframework.TestDriver "test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml", "test/e2e/testing-manifests/storage-csi/mock/csi-mock-rbac.yaml", "test/e2e/testing-manifests/storage-csi/mock/csi-storageclass.yaml", - "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml", + } + if driverOpts.Embedded { + driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml") + } else { + driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml") } if driverOpts.RegisterDriver { @@ -309,10 +438,11 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) storageframework.TestDriver attachable: !driverOpts.DisableAttach, attachLimit: driverOpts.AttachLimit, enableNodeExpansion: driverOpts.EnableNodeExpansion, - javascriptHooks: driverOpts.JavascriptHooks, tokenRequests: driverOpts.TokenRequests, requiresRepublish: driverOpts.RequiresRepublish, fsGroupPolicy: driverOpts.FSGroupPolicy, + embedded: driverOpts.Embedded, + hooks: driverOpts.Hooks, } } @@ -340,62 +470,108 @@ func (m *mockCSIDriver) GetSnapshotClass(config *storageframework.PerTestConfig, } func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { + m.clientSet = f.ClientSet + // Create secondary namespace which will be used for creating driver - driverNamespace := utils.CreateDriverNamespace(f) - driverns := driverNamespace.Name + m.driverNamespace = utils.CreateDriverNamespace(f) + driverns := m.driverNamespace.Name testns := f.Namespace.Name - ginkgo.By("deploying csi mock driver") - cancelLogging := utils.StartPodLogs(f, driverNamespace) + if m.embedded { + ginkgo.By("deploying csi mock proxy") + } else { + ginkgo.By("deploying csi mock driver") + } + cancelLogging := utils.StartPodLogs(f, m.driverNamespace) cs := f.ClientSet // pods should be scheduled on the node node, err := e2enode.GetRandomReadySchedulableNode(cs) framework.ExpectNoError(err) + + embeddedCleanup := func() {} + containerArgs := []string{} + if m.embedded { + // Run embedded CSI driver. + // + // For now we start exactly one instance which implements controller, + // node and identity services. It matches with the one pod that we run + // inside the cluster. The name and namespace of that one is deterministic, + // so we know what to connect to. + // + // Long-term we could also deploy one central controller and multiple + // node instances, with knowledge about provisioned volumes shared in + // this process. + podname := "csi-mockplugin-0" + containername := "mock" + ctx, cancel := context.WithCancel(context.Background()) + serviceConfig := mockservice.Config{ + DisableAttach: !m.attachable, + DriverName: "csi-mock-" + f.UniqueName, + AttachLimit: int64(m.attachLimit), + NodeExpansionRequired: m.enableNodeExpansion, + EnableTopology: m.enableTopology, + IO: proxy.PodDirIO{ + F: f, + Namespace: m.driverNamespace.Name, + PodName: podname, + ContainerName: "busybox", + }, + } + s := mockservice.New(serviceConfig) + servers := &mockdriver.CSIDriverServers{ + Controller: s, + Identity: s, + Node: s, + } + m.embeddedCSIDriver = mockdriver.NewCSIDriver(servers) + + l, err := proxy.Listen(ctx, f.ClientSet, f.ClientConfig(), + proxy.Addr{ + Namespace: m.driverNamespace.Name, + PodName: podname, + ContainerName: containername, + Port: 9000, + }, + ) + framework.ExpectNoError(err, "start connecting to proxy pod") + err = m.embeddedCSIDriver.Start(l, m.interceptGRPC) + framework.ExpectNoError(err, "start mock driver") + + embeddedCleanup = func() { + // Kill all goroutines and delete resources of the mock driver. + m.embeddedCSIDriver.Stop() + l.Close() + cancel() + } + } else { + // When using the mock driver inside the cluster it has to be reconfigured + // via command line parameters. + containerArgs = append(containerArgs, "--name=csi-mock-"+f.UniqueName) + + if !m.attachable { + containerArgs = append(containerArgs, "--disable-attach") + } + + if m.enableTopology { + containerArgs = append(containerArgs, "--enable-topology") + } + + if m.attachLimit > 0 { + containerArgs = append(containerArgs, "--attach-limit", strconv.Itoa(m.attachLimit)) + } + + if m.enableNodeExpansion { + containerArgs = append(containerArgs, "--node-expand-required=true") + } + } + config := &storageframework.PerTestConfig{ Driver: m, Prefix: "mock", Framework: f, ClientNodeSelection: e2epod.NodeSelection{Name: node.Name}, - DriverNamespace: driverNamespace, - } - - containerArgs := []string{"--name=csi-mock-" + f.UniqueName} - if !m.attachable { - containerArgs = append(containerArgs, "--disable-attach") - } - - if m.enableTopology { - containerArgs = append(containerArgs, "--enable-topology") - } - - if m.attachLimit > 0 { - containerArgs = append(containerArgs, "--attach-limit", strconv.Itoa(m.attachLimit)) - } - - if m.enableNodeExpansion { - containerArgs = append(containerArgs, "--node-expand-required=true") - } - - // Create a config map with javascript hooks. Create it even when javascriptHooks - // are empty, so we can unconditionally add it to the mock pod. - const hooksConfigMapName = "mock-driver-hooks" - hooksYaml, err := yaml.Marshal(m.javascriptHooks) - framework.ExpectNoError(err) - hooks := &v1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: hooksConfigMapName, - }, - Data: map[string]string{ - "hooks.yaml": string(hooksYaml), - }, - } - - _, err = f.ClientSet.CoreV1().ConfigMaps(driverns).Create(context.TODO(), hooks, metav1.CreateOptions{}) - framework.ExpectNoError(err) - - if len(m.javascriptHooks) > 0 { - containerArgs = append(containerArgs, "--hooks-file=/etc/hooks/hooks.yaml") + DriverNamespace: m.driverNamespace, } o := utils.PatchCSIOptions{ @@ -416,7 +592,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.P RequiresRepublish: m.requiresRepublish, FSGroupPolicy: m.fsGroupPolicy, } - cleanup, err := utils.CreateFromManifests(f, driverNamespace, func(item interface{}) error { + cleanup, err := utils.CreateFromManifests(f, m.driverNamespace, func(item interface{}) error { return utils.PatchCSIDeployment(f, o, item) }, m.manifests...) @@ -424,7 +600,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.P framework.Failf("deploying csi mock driver: %v", err) } - cleanupFunc := generateDriverCleanupFunc( + driverCleanupFunc := generateDriverCleanupFunc( f, "mock", testns, @@ -432,9 +608,83 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.P cleanup, cancelLogging) + cleanupFunc := func() { + embeddedCleanup() + driverCleanupFunc() + } + return config, cleanupFunc } +func (m *mockCSIDriver) interceptGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + defer func() { + // Always log the call and its final result, + // regardless whether the result was from the real + // implementation or a hook. + m.calls.LogGRPC(info.FullMethod, req, resp, err) + }() + + if m.hooks.Pre != nil { + resp, err = m.hooks.Pre(ctx, info.FullMethod, req) + if resp != nil || err != nil { + return + } + } + resp, err = handler(ctx, req) + if m.hooks.Post != nil { + resp, err = m.hooks.Post(ctx, info.FullMethod, req, resp, err) + } + return +} + +func (m *mockCSIDriver) GetCalls() ([]MockCSICall, error) { + if m.embedded { + return m.calls.Get(), nil + } + + if m.driverNamespace == nil { + return nil, errors.New("PrepareTest not called yet") + } + + // Name of CSI driver pod name (it's in a StatefulSet with a stable name) + driverPodName := "csi-mockplugin-0" + // Name of CSI driver container name + driverContainerName := "mock" + // Prefix of the mock driver grpc log + grpcCallPrefix := "gRPCCall:" + + // Load logs of driver pod + log, err := e2epod.GetPodLogs(m.clientSet, m.driverNamespace.Name, driverPodName, driverContainerName) + if err != nil { + return nil, fmt.Errorf("could not load CSI driver logs: %s", err) + } + + logLines := strings.Split(log, "\n") + var calls []MockCSICall + for _, line := range logLines { + index := strings.Index(line, grpcCallPrefix) + if index == -1 { + continue + } + line = line[index+len(grpcCallPrefix):] + call := MockCSICall{ + json: string(line), + } + err := json.Unmarshal([]byte(line), &call) + if err != nil { + framework.Logf("Could not parse CSI driver log line %q: %s", line, err) + continue + } + + // Trim gRPC service name, i.e. "/csi.v1.Identity/Probe" -> "Probe" + methodParts := strings.Split(call.Method, "/") + call.Method = methodParts[len(methodParts)-1] + + calls = append(calls, call) + } + return calls, nil +} + // gce-pd type gcePDCSIDriver struct { driverInfo storageframework.DriverInfo diff --git a/test/e2e/storage/drivers/proxy/io.go b/test/e2e/storage/drivers/proxy/io.go new file mode 100644 index 00000000000..74fb6e1fa48 --- /dev/null +++ b/test/e2e/storage/drivers/proxy/io.go @@ -0,0 +1,82 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "fmt" + + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock/service" +) + +type PodDirIO struct { + F *framework.Framework + Namespace string + PodName string + ContainerName string +} + +var _ service.DirIO = PodDirIO{} + +func (p PodDirIO) DirExists(path string) (bool, error) { + stdout, stderr, err := p.execute([]string{ + "sh", + "-c", + fmt.Sprintf("if ! [ -e '%s' ]; then echo notexist; elif [ -d '%s' ]; then echo dir; else echo nodir; fi", path, path), + }) + if err != nil { + return false, fmt.Errorf("error executing dir test commands: stderr=%q, %v", stderr, err) + } + switch stdout { + case "notexist": + return false, nil + case "nodir": + return false, fmt.Errorf("%s: not a directory", path) + case "dir": + return true, nil + default: + return false, fmt.Errorf("unexpected output from dir test commands: %q", stdout) + } +} + +func (p PodDirIO) Mkdir(path string) error { + _, stderr, err := p.execute([]string{"mkdir", path}) + if err != nil { + return fmt.Errorf("mkdir %q: stderr=%q, %v", path, stderr, err) + } + return nil +} + +func (p PodDirIO) RemoveAll(path string) error { + _, stderr, err := p.execute([]string{"rm", "-rf", path}) + if err != nil { + return fmt.Errorf("rm -rf %q: stderr=%q, %v", path, stderr, err) + } + return nil +} + +func (p PodDirIO) execute(command []string) (string, string, error) { + return p.F.ExecWithOptions(framework.ExecOptions{ + Command: command, + Namespace: p.Namespace, + PodName: p.PodName, + ContainerName: p.ContainerName, + CaptureStdout: true, + CaptureStderr: true, + Quiet: true, + }) +} diff --git a/test/e2e/storage/drivers/proxy/portproxy.go b/test/e2e/storage/drivers/proxy/portproxy.go new file mode 100644 index 00000000000..9050cac075e --- /dev/null +++ b/test/e2e/storage/drivers/proxy/portproxy.go @@ -0,0 +1,344 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "sync/atomic" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" + "k8s.io/klog/v2" +) + +// Maximum number of forwarded connections. In practice we don't +// need more than one per sidecar and kubelet. Keeping this reasonably +// small ensures that we don't establish connections through the apiserver +// and the remote kernel which then arent' needed. +const maxConcurrentConnections = 10 + +// Listen creates a listener which returns new connections whenever someone connects +// to a socat or mock driver proxy instance running inside the given pod. +// +// socat must by started with ",fork TCP-LISTEN:,reuseport" +// for this to work. "" can be anything that accepts connections, +// for example "UNIX-LISTEN:/csi/csi.sock". In this mode, socat will +// accept exactly one connection on the given port for each connection +// that socat itself accepted. +// +// Listening stops when the context is done or Close() is called. +func Listen(ctx context.Context, clientset kubernetes.Interface, restConfig *rest.Config, addr Addr) (net.Listener, error) { + // We connect through port forwarding. Strictly + // speaking this is overkill because we don't need a local + // port. But this way we can reuse client-go/tools/portforward + // instead of having to replicate handleConnection + // in our own code. + restClient := clientset.CoreV1().RESTClient() + if restConfig.GroupVersion == nil { + restConfig.GroupVersion = &schema.GroupVersion{} + } + if restConfig.NegotiatedSerializer == nil { + restConfig.NegotiatedSerializer = scheme.Codecs + } + + // The setup code around the actual portforward is from + // https://github.com/kubernetes/kubernetes/blob/c652ffbe4a29143623a1aaec39f745575f7e43ad/staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go + req := restClient.Post(). + Resource("pods"). + Namespace(addr.Namespace). + Name(addr.PodName). + SubResource("portforward") + transport, upgrader, err := spdy.RoundTripperFor(restConfig) + if err != nil { + return nil, fmt.Errorf("create round tripper: %v", err) + } + dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL()) + + prefix := fmt.Sprintf("port forwarding for %s", addr) + ctx, cancel := context.WithCancel(ctx) + l := &listener{ + connections: make(chan *connection), + ctx: ctx, + cancel: cancel, + addr: addr, + } + + // Port forwarding is allowed to fail and will be restarted when it does. + prepareForwarding := func() (*portforward.PortForwarder, error) { + pod, err := clientset.CoreV1().Pods(addr.Namespace).Get(ctx, addr.PodName, metav1.GetOptions{}) + if err != nil { + return nil, err + } + for i, status := range pod.Status.ContainerStatuses { + if pod.Spec.Containers[i].Name == addr.ContainerName && + status.State.Running == nil { + return nil, fmt.Errorf("container %q is not running", addr.ContainerName) + } + } + readyChannel := make(chan struct{}) + fw, err := portforward.New(dialer, + []string{fmt.Sprintf("0:%d", addr.Port)}, + ctx.Done(), + readyChannel, + klogWriter(false, prefix), + klogWriter(true, prefix)) + if err != nil { + return nil, err + } + return fw, nil + } + + var connectionsCreated, connectionsClosed int32 + + runForwarding := func(fw *portforward.PortForwarder) { + defer fw.Close() + klog.V(5).Infof("%s: starting connection polling", prefix) + defer klog.V(5).Infof("%s: connection polling ended", prefix) + + failed := make(chan struct{}) + go func() { + defer close(failed) + klog.V(5).Infof("%s: starting port forwarding", prefix) + defer klog.V(5).Infof("%s: port forwarding ended", prefix) + + err := fw.ForwardPorts() + if err != nil { + if ctx.Err() == nil { + // Something failed unexpectedly. + klog.Errorf("%s: %v", prefix, err) + } else { + // Context is done, log error anyway. + klog.V(5).Infof("%s: %v", prefix, err) + } + } + }() + + // Wait for port forwarding to be ready. + select { + case <-ctx.Done(): + return + case <-failed: + // The reason was logged above. + return + case <-fw.Ready: + // Proceed... + } + + // This delay determines how quickly we notice when someone has + // connected inside the cluster. With socat, we cannot make this too small + // because otherwise we get many rejected connections. With the mock + // driver as proxy that doesn't happen as long as we don't + // ask for too many concurrent connections because the mock driver + // keeps the listening port open at all times and the Linux + // kernel automatically accepts our connection requests. + tryConnect := time.NewTicker(100 * time.Millisecond) + defer tryConnect.Stop() + for { + select { + case <-ctx.Done(): + return + case <-failed: + // The reason was logged above. + return + case <-tryConnect.C: + currentClosed := atomic.LoadInt32(&connectionsClosed) + openConnections := connectionsCreated - currentClosed + if openConnections >= maxConcurrentConnections { + break + } + + // Check whether we can establish a connection through the + // forwarded port. + ports, err := fw.GetPorts() + if err != nil { + // We checked for "port forwarding ready" above, so this + // shouldn't happen. + klog.Errorf("%s: no forwarded ports: %v", prefix, err) + return + } + + // We don't want to be blocked to long because we need to check + // for a port forwarding failure occasionally. + timeout := 10 * time.Second + deadline, ok := ctx.Deadline() + if ok { + untilDeadline := deadline.Sub(time.Now()) + if untilDeadline < timeout { + timeout = untilDeadline + } + } + + klog.V(5).Infof("%s: trying to create a new connection #%d, %d open", prefix, connectionsCreated, openConnections) + c, err := net.DialTimeout("tcp", fmt.Sprintf("localhost:%d", ports[0].Local), timeout) + if err != nil { + klog.V(5).Infof("%s: no connection: %v", prefix, err) + break + } + // Make the connection available to Accept below. + klog.V(5).Infof("%s: created a new connection #%d", prefix, connectionsCreated) + l.connections <- &connection{ + Conn: c, + addr: addr, + counter: connectionsCreated, + closed: &connectionsClosed, + } + connectionsCreated++ + } + } + } + + // Portforwarding and polling for connections run in the background. + go func() { + for { + fw, err := prepareForwarding() + if err == nil { + runForwarding(fw) + } else { + if apierrors.IsNotFound(err) { + // This is normal, the pod isn't running yet. Log with lower severity. + klog.V(5).Infof("prepare forwarding %s: %v", addr, err) + } else { + klog.Errorf("prepare forwarding %s: %v", addr, err) + } + } + + select { + case <-ctx.Done(): + return + // Sleep a bit before restarting. This is + // where we potentially wait for the pod to + // start. + case <-time.After(1 * time.Second): + } + } + }() + + return l, nil +} + +// Addr contains all relevant parameters for a certain port in a pod. +// The container must be running before connections are attempted. +type Addr struct { + Namespace, PodName, ContainerName string + Port int +} + +var _ net.Addr = Addr{} + +func (a Addr) Network() string { + return "port-forwarding" +} + +func (a Addr) String() string { + return fmt.Sprintf("%s/%s:%d", a.Namespace, a.PodName, a.Port) +} + +type listener struct { + addr Addr + connections chan *connection + ctx context.Context + cancel func() +} + +var _ net.Listener = &listener{} + +func (l *listener) Close() error { + klog.V(5).Infof("forward listener for %s: closing", l.addr) + l.cancel() + return nil +} + +func (l *listener) Accept() (net.Conn, error) { + select { + case <-l.ctx.Done(): + return nil, errors.New("listening was stopped") + case c := <-l.connections: + klog.V(5).Infof("forward listener for %s: got a new connection #%d", l.addr, c.counter) + return c, nil + } +} + +type connection struct { + net.Conn + addr Addr + counter int32 + closed *int32 + mutex sync.Mutex +} + +func (c *connection) Read(b []byte) (int, error) { + n, err := c.Conn.Read(b) + if errors.Is(err, io.EOF) { + klog.V(5).Infof("forward connection #%d for %s: remote side closed the stream", c.counter, c.addr) + } + return n, err +} + +func (c *connection) Write(b []byte) (int, error) { + n, err := c.Conn.Write(b) + if errors.Is(err, io.EOF) { + klog.V(5).Infof("forward connection #%d for %s: remote side closed the stream", c.counter, c.addr) + } + return n, err +} + +func (c *connection) Close() error { + c.mutex.Lock() + defer c.mutex.Unlock() + if c.closed != nil { + // Do the logging and book-keeping only once. The function itself may be called more than once. + klog.V(5).Infof("forward connection #%d for %s: closing our side", c.counter, c.addr) + atomic.AddInt32(c.closed, 1) + c.closed = nil + } + return c.Conn.Close() +} + +func (l *listener) Addr() net.Addr { + return l.addr +} + +func klogWriter(isError bool, prefix string) io.Writer { + reader, writer := io.Pipe() + go func() { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + text := scanner.Text() + if isError { + klog.Errorf("%s: %s", prefix, text) + } else { + klog.V(5).Infof("%s: %s", prefix, text) + } + } + }() + + return writer +} diff --git a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml index 7ec5f427b3e..fe3f062afb4 100644 --- a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml +++ b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml @@ -70,8 +70,6 @@ spec: volumeMounts: - mountPath: /csi name: socket-dir - - mountPath: /etc/hooks - name: hooks - mountPath: /var/lib/kubelet/pods name: kubelet-pods-dir - mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi @@ -95,6 +93,3 @@ spec: path: /var/lib/kubelet/plugins_registry type: Directory name: registration-dir - - name: hooks - configMap: - name: mock-driver-hooks diff --git a/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml new file mode 100644 index 00000000000..7892392931e --- /dev/null +++ b/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml @@ -0,0 +1,105 @@ +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-mockplugin +spec: + selector: + matchLabels: + app: csi-mockplugin + replicas: 1 + template: + metadata: + labels: + app: csi-mockplugin + spec: + serviceAccountName: csi-mock + containers: + - name: csi-provisioner + image: k8s.gcr.io/sig-storage/csi-provisioner:v2.1.0 + args: + - "--csi-address=$(ADDRESS)" + # Topology support is needed for the pod rescheduling test + # ("storage capacity" in csi_mock_volume.go). + - "--feature-gates=Topology=true" + - "-v=5" + - "--timeout=1m" + # Needed for fsGroup support. + - "--default-fstype=ext4" + # We don't need much concurrency and having many gouroutines + # makes klog.Fatal during shutdown very long. + - "--worker-threads=5" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - mountPath: /csi + name: socket-dir + - name: driver-registrar + image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.1.0 + args: + - --v=5 + - --csi-address=/csi/csi.sock + - --kubelet-registration-path=/var/lib/kubelet/plugins/csi-mock/csi.sock + - --timeout=1m + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /registration + name: registration-dir + - name: mock + image: k8s.gcr.io/sig-storage/mock-driver:v4.1.0 + args: + # -v3 shows when connections get established. Higher log levels print information about + # transferred bytes, but cannot print message content (no gRPC parsing), so this is usually + # not interesting. + - -v=3 + - -proxy-endpoint=tcp://:9000 + env: + - name: CSI_ENDPOINT + value: /csi/csi.sock + ports: + - containerPort: 9000 + name: socat + volumeMounts: + - mountPath: /csi + name: socket-dir + # The busybox container is needed for running shell commands which + # test for directories or create them. It needs additional privileges + # for that. + - name: busybox + image: k8s.gcr.io/busybox + securityContext: + privileged: true + command: + - sleep + - "100000" + volumeMounts: + - mountPath: /var/lib/kubelet/pods + name: kubelet-pods-dir + - mountPath: /var/lib/kubelet/plugins/kubernetes.io/csi + name: kubelet-csi-dir + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-mock + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/pods + type: Directory + # mock driver doesn't make mounts and therefore doesn't need mount propagation. + # mountPropagation: Bidirectional + name: kubelet-pods-dir + - hostPath: + path: /var/lib/kubelet/plugins/kubernetes.io/csi + type: DirectoryOrCreate + name: kubelet-csi-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir From d43308e64c24b4cfaed3ad7931c0997dcb5e864a Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 26 Feb 2021 17:47:00 +0100 Subject: [PATCH 10/14] e2e storage: simpler port forwarding Instead of trying to use the client-go portforward package as-is it is simpler to copy some code from it and then use the http stream directly. That way we don't need to go through a local listening socket and error handling and logging becomes simpler. --- test/e2e/storage/drivers/proxy/portproxy.go | 188 +++++++++++--------- 1 file changed, 99 insertions(+), 89 deletions(-) diff --git a/test/e2e/storage/drivers/proxy/portproxy.go b/test/e2e/storage/drivers/proxy/portproxy.go index 9050cac075e..7f1d604882c 100644 --- a/test/e2e/storage/drivers/proxy/portproxy.go +++ b/test/e2e/storage/drivers/proxy/portproxy.go @@ -17,20 +17,23 @@ limitations under the License. package proxy import ( - "bufio" "context" "errors" "fmt" "io" + "io/ioutil" "net" "net/http" + "strconv" "sync" "sync/atomic" "time" + v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -92,7 +95,7 @@ func Listen(ctx context.Context, clientset kubernetes.Interface, restConfig *res } // Port forwarding is allowed to fail and will be restarted when it does. - prepareForwarding := func() (*portforward.PortForwarder, error) { + prepareForwarding := func() (*remotePort, error) { pod, err := clientset.CoreV1().Pods(addr.Namespace).Get(ctx, addr.PodName, metav1.GetOptions{}) if err != nil { return nil, err @@ -103,55 +106,24 @@ func Listen(ctx context.Context, clientset kubernetes.Interface, restConfig *res return nil, fmt.Errorf("container %q is not running", addr.ContainerName) } } - readyChannel := make(chan struct{}) - fw, err := portforward.New(dialer, - []string{fmt.Sprintf("0:%d", addr.Port)}, - ctx.Done(), - readyChannel, - klogWriter(false, prefix), - klogWriter(true, prefix)) + + streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name) if err != nil { - return nil, err + return nil, fmt.Errorf("dialer failed: %v", err) } - return fw, nil + rp := &remotePort{ + streamConn: streamConn, + } + return rp, nil } var connectionsCreated, connectionsClosed int32 - runForwarding := func(fw *portforward.PortForwarder) { - defer fw.Close() + runForwarding := func(rp *remotePort) { + defer rp.Close() klog.V(5).Infof("%s: starting connection polling", prefix) defer klog.V(5).Infof("%s: connection polling ended", prefix) - failed := make(chan struct{}) - go func() { - defer close(failed) - klog.V(5).Infof("%s: starting port forwarding", prefix) - defer klog.V(5).Infof("%s: port forwarding ended", prefix) - - err := fw.ForwardPorts() - if err != nil { - if ctx.Err() == nil { - // Something failed unexpectedly. - klog.Errorf("%s: %v", prefix, err) - } else { - // Context is done, log error anyway. - klog.V(5).Infof("%s: %v", prefix, err) - } - } - }() - - // Wait for port forwarding to be ready. - select { - case <-ctx.Done(): - return - case <-failed: - // The reason was logged above. - return - case <-fw.Ready: - // Proceed... - } - // This delay determines how quickly we notice when someone has // connected inside the cluster. With socat, we cannot make this too small // because otherwise we get many rejected connections. With the mock @@ -165,9 +137,6 @@ func Listen(ctx context.Context, clientset kubernetes.Interface, restConfig *res select { case <-ctx.Done(): return - case <-failed: - // The reason was logged above. - return case <-tryConnect.C: currentClosed := atomic.LoadInt32(&connectionsClosed) openConnections := connectionsCreated - currentClosed @@ -175,29 +144,8 @@ func Listen(ctx context.Context, clientset kubernetes.Interface, restConfig *res break } - // Check whether we can establish a connection through the - // forwarded port. - ports, err := fw.GetPorts() - if err != nil { - // We checked for "port forwarding ready" above, so this - // shouldn't happen. - klog.Errorf("%s: no forwarded ports: %v", prefix, err) - return - } - - // We don't want to be blocked to long because we need to check - // for a port forwarding failure occasionally. - timeout := 10 * time.Second - deadline, ok := ctx.Deadline() - if ok { - untilDeadline := deadline.Sub(time.Now()) - if untilDeadline < timeout { - timeout = untilDeadline - } - } - klog.V(5).Infof("%s: trying to create a new connection #%d, %d open", prefix, connectionsCreated, openConnections) - c, err := net.DialTimeout("tcp", fmt.Sprintf("localhost:%d", ports[0].Local), timeout) + stream, err := rp.dial(ctx, prefix, addr.Port) if err != nil { klog.V(5).Infof("%s: no connection: %v", prefix, err) break @@ -205,7 +153,7 @@ func Listen(ctx context.Context, clientset kubernetes.Interface, restConfig *res // Make the connection available to Accept below. klog.V(5).Infof("%s: created a new connection #%d", prefix, connectionsCreated) l.connections <- &connection{ - Conn: c, + stream: stream, addr: addr, counter: connectionsCreated, closed: &connectionsClosed, @@ -261,6 +209,63 @@ func (a Addr) String() string { return fmt.Sprintf("%s/%s:%d", a.Namespace, a.PodName, a.Port) } +// remotePort is a stripped down version of client-go/tools/portforward minus +// the local listeners. +type remotePort struct { + streamConn httpstream.Connection + + requestIDLock sync.Mutex + requestID int +} + +func (rp *remotePort) dial(ctx context.Context, prefix string, port int) (httpstream.Stream, error) { + requestID := rp.nextRequestID() + + // create error stream + headers := http.Header{} + headers.Set(v1.StreamType, v1.StreamTypeError) + headers.Set(v1.PortHeader, fmt.Sprintf("%d", port)) + headers.Set(v1.PortForwardRequestIDHeader, strconv.Itoa(requestID)) + + // We're not writing to this stream, just reading an error message from it. + // This happens asynchronously. + errorStream, err := rp.streamConn.CreateStream(headers) + if err != nil { + return nil, fmt.Errorf("error creating error stream: %v", err) + } + errorStream.Close() + go func() { + message, err := ioutil.ReadAll(errorStream) + switch { + case err != nil: + klog.Errorf("%s: error reading from error stream: %v", prefix, err) + case len(message) > 0: + klog.Errorf("%s: an error occurred connecting to the remote port: %v", prefix, string(message)) + } + }() + + // create data stream + headers.Set(v1.StreamType, v1.StreamTypeData) + dataStream, err := rp.streamConn.CreateStream(headers) + if err != nil { + return nil, fmt.Errorf("error creating data stream: %v", err) + } + + return dataStream, nil +} + +func (rp *remotePort) Close() { + rp.streamConn.Close() +} + +func (rp *remotePort) nextRequestID() int { + rp.requestIDLock.Lock() + defer rp.requestIDLock.Unlock() + id := rp.requestID + rp.requestID++ + return id +} + type listener struct { addr Addr connections chan *connection @@ -287,15 +292,37 @@ func (l *listener) Accept() (net.Conn, error) { } type connection struct { - net.Conn + stream httpstream.Stream addr Addr counter int32 closed *int32 mutex sync.Mutex } +var _ net.Conn = &connection{} + +func (c *connection) LocalAddr() net.Addr { + return c.addr +} + +func (c *connection) RemoteAddr() net.Addr { + return c.addr +} + +func (c *connection) SetDeadline(t time.Time) error { + return nil +} + +func (c *connection) SetReadDeadline(t time.Time) error { + return nil +} + +func (c *connection) SetWriteDeadline(t time.Time) error { + return nil +} + func (c *connection) Read(b []byte) (int, error) { - n, err := c.Conn.Read(b) + n, err := c.stream.Read(b) if errors.Is(err, io.EOF) { klog.V(5).Infof("forward connection #%d for %s: remote side closed the stream", c.counter, c.addr) } @@ -303,7 +330,7 @@ func (c *connection) Read(b []byte) (int, error) { } func (c *connection) Write(b []byte) (int, error) { - n, err := c.Conn.Write(b) + n, err := c.stream.Write(b) if errors.Is(err, io.EOF) { klog.V(5).Infof("forward connection #%d for %s: remote side closed the stream", c.counter, c.addr) } @@ -319,26 +346,9 @@ func (c *connection) Close() error { atomic.AddInt32(c.closed, 1) c.closed = nil } - return c.Conn.Close() + return c.stream.Close() } func (l *listener) Addr() net.Addr { return l.addr } - -func klogWriter(isError bool, prefix string) io.Writer { - reader, writer := io.Pipe() - go func() { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - text := scanner.Text() - if isError { - klog.Errorf("%s: %s", prefix, text) - } else { - klog.V(5).Infof("%s: %s", prefix, text) - } - } - }() - - return writer -} From baecaa82091a899e747e573b5d8c1457168b7099 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 26 Feb 2021 20:46:53 +0100 Subject: [PATCH 11/14] e2e test: log gRPC calls in embedded CSI driver It is useful to see all calls as they occur. The output format is the more readable JSON representation. --- test/e2e/storage/drivers/csi.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index 17c61ababa1..d82e50025d9 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -56,6 +56,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -75,6 +76,9 @@ const ( GCEPDCSIDriverName = "pd.csi.storage.gke.io" // GCEPDCSIZoneTopologyKey is the key of GCE Persistent Disk CSI zone topology GCEPDCSIZoneTopologyKey = "topology.gke.io/zone" + + // Prefix of the mock driver grpc log + grpcCallPrefix = "gRPCCall:" ) // hostpathCSI @@ -373,6 +377,8 @@ func (c *MockCSICalls) LogGRPC(method string, request, reply interface{}, err er } json.Unmarshal(msg, &call) + klog.Infof("%s %s", grpcCallPrefix, string(msg)) + // Trim gRPC service name, i.e. "/csi.v1.Identity/Probe" -> "Probe" methodParts := strings.Split(call.Method, "/") call.Method = methodParts[len(methodParts)-1] @@ -650,8 +656,6 @@ func (m *mockCSIDriver) GetCalls() ([]MockCSICall, error) { driverPodName := "csi-mockplugin-0" // Name of CSI driver container name driverContainerName := "mock" - // Prefix of the mock driver grpc log - grpcCallPrefix := "gRPCCall:" // Load logs of driver pod log, err := e2epod.GetPodLogs(m.clientSet, m.driverNamespace.Name, driverPodName, driverContainerName) From 5089af1f23c7c7de357b1a46429c9424d9a0aa2e Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 26 Feb 2021 20:48:02 +0100 Subject: [PATCH 12/14] e2e test: relax CSI call expectations NodeUnstageVolume and DeleteVolume are not necessarily ordered and in practice, DeleteVolume was indeed encountered first after changing the timing by embedding the CSI driver: Kubernetes e2e suite: [sig-storage] CSI mock volume storage capacity exhausted, immediate binding expand_less 7m34s test/e2e/storage/csi_mock_volume.go:953 Feb 26 18:34:04.037: while waiting for all CSI calls Unexpected error: <*errors.errorString | 0xc00322b9c0>: { s: "error waiting for expected CSI calls: Unexpected CSI call 3: expected NodeUnstageVolume (0), got DeleteVolume (0)", } error waiting for expected CSI calls: Unexpected CSI call 3: expected NodeUnstageVolume (0), got DeleteVolume (0) occurred test/e2e/storage/csi_mock_volume.go:1045 --- test/e2e/storage/csi_mock_volume.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 7f38af408ec..b785f5d416b 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -927,9 +927,9 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { createVolume := "CreateVolume" deleteVolume := "DeleteVolume" // publishVolume := "NodePublishVolume" - unpublishVolume := "NodeUnpublishVolume" + // unpublishVolume := "NodeUnpublishVolume" // stageVolume := "NodeStageVolume" - unstageVolume := "NodeUnstageVolume" + // unstageVolume := "NodeUnstageVolume" // These calls are assumed to occur in this order for // each test run. NodeStageVolume and @@ -939,12 +939,17 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { // (https://github.com/kubernetes/kubernetes/issues/90250). // Therefore they are temporarily commented out until // that issue is resolved. + // + // NodeUnpublishVolume and NodeUnstageVolume are racing + // with DeleteVolume, so we cannot assume a deterministic + // order and have to ignore them + // (https://github.com/kubernetes/kubernetes/issues/94108). deterministicCalls := []string{ createVolume, // stageVolume, // publishVolume, - unpublishVolume, - unstageVolume, + // unpublishVolume, + // unstageVolume, deleteVolume, } From 06ffdbc784a0795121e893deefc22fa9d1716751 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Sat, 27 Feb 2021 08:45:28 +0100 Subject: [PATCH 13/14] e2e test: use one connection per stream Sharing the same connection for multiple streams should have worked, but ran into unexpected timeouts: I0227 08:07:49.754263 80029 portproxy.go:109] container "mock" in pod csi-mock-volumes-4037-2061/csi-mockplugin-0 is running E0227 08:07:49.779359 80029 portproxy.go:178] prepare forwarding csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: dialer failed: unable to upgrade connection: pod not found ("csi-mockplugin-0_csi-mock-volumes-4037-2061") I0227 08:07:50.782705 80029 portproxy.go:109] container "mock" in pod csi-mock-volumes-4037-2061/csi-mockplugin-0 is running I0227 08:07:50.809326 80029 portproxy.go:125] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: starting connection polling I0227 08:07:50.909544 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #0, 0 open I0227 08:07:50.912436 80029 portproxy.go:155] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: created a new connection #0 I0227 08:07:50.912503 80029 portproxy.go:286] forward listener for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: got a new connection #0 I0227 08:07:50.913161 80029 portproxy.go:322] forward connection #0 for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: remote side closed the stream E0227 08:07:50.913324 80029 portproxy.go:242] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: an error occurred connecting to the remote port: error forwarding port 9000 to pod 66662ea1ab30b4193dac0102c49be840971d337c802cc0c8bbc074214522bd13, uid : failed to execute portforward in network namespace "/var/run/netns/cni-c15e4e36-dad9-8316-c301-33af9dad5717": failed to dial 9000: dial tcp4 127.0.0.1:9000: connect: connection refused I0227 08:07:50.913371 80029 portproxy.go:340] forward connection #0 for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: closing our side W0227 08:07:50.913487 80029 server.go:669] grpc: Server.Serve failed to create ServerTransport: connection error: desc = "transport: http2Server.HandleStreams failed to receive the preface from client: EOF" I0227 08:07:51.009519 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #1, 0 open I0227 08:07:51.011912 80029 portproxy.go:155] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: created a new connection #1 I0227 08:07:51.011973 80029 portproxy.go:286] forward listener for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: got a new connection #1 I0227 08:07:51.013677 80029 portproxy.go:322] forward connection #1 for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: remote side closed the stream I0227 08:07:51.013720 80029 portproxy.go:340] forward connection #1 for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: closing our side W0227 08:07:51.013794 80029 server.go:669] grpc: Server.Serve failed to create ServerTransport: connection error: desc = "transport: http2Server.HandleStreams failed to receive the preface from client: EOF" E0227 08:07:51.017026 80029 portproxy.go:242] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: an error occurred connecting to the remote port: error forwarding port 9000 to pod 66662ea1ab30b4193dac0102c49be840971d337c802cc0c8bbc074214522bd13, uid : failed to execute portforward in network namespace "/var/run/netns/cni-c15e4e36-dad9-8316-c301-33af9dad5717": failed to dial 9000: dial tcp4 127.0.0.1:9000: connect: connection refused I0227 08:07:51.109515 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #2, 0 open I0227 08:07:51.111479 80029 portproxy.go:155] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: created a new connection #2 I0227 08:07:51.111519 80029 portproxy.go:286] forward listener for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: got a new connection #2 I0227 08:07:51.209519 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #3, 1 open I0227 08:07:51.766305 80029 csi.go:377] gRPC call: {"Method":"/csi.v1.Identity/Probe","Request":{},"Response":{"ready":{"value":true}},"Error":"","FullError":null} I0227 08:07:51.768304 80029 csi.go:377] gRPC call: {"Method":"/csi.v1.Identity/GetPluginInfo","Request":{},"Response":{"name":"csi-mock-csi-mock-volumes-4037","vendor_version":"0.3.0","manifest":{"url":"https://k8s.io/kubernetes/test/e2e/storage/drivers/csi-test/mock"}},"Error":"","FullError":null} I0227 08:07:51.770494 80029 csi.go:377] gRPC call: {"Method":"/csi.v1.Identity/GetPluginCapabilities","Request":{},"Response":{"capabilities":[{"Type":{"Service":{"type":1}}},{"Type":{"VolumeExpansion":{"type":1}}},{"Type":{"Service":{"type":2}}}]},"Error":"","FullError":null} I0227 08:07:51.772899 80029 csi.go:377] gRPC call: {"Method":"/csi.v1.Controller/ControllerGetCapabilities","Request":{},"Response":{"capabilities":[{"Type":{"Rpc":{"type":1}}},{"Type":{"Rpc":{"type":3}}},{"Type":{"Rpc":{"type":10}}},{"Type":{"Rpc":{"type":4}}},{"Type":{"Rpc":{"type":6}}},{"Type":{"Rpc":{"type":5}}},{"Type":{"Rpc":{"type":8}}},{"Type":{"Rpc":{"type":7}}},{"Type":{"Rpc":{"type":12}}},{"Type":{"Rpc":{"type":11}}},{"Type":{"Rpc":{"type":9}}}]},"Error":"","FullError":null} I0227 08:08:21.209901 80029 portproxy.go:151] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: no connection: error creating error stream: Timeout occurred I0227 08:08:21.209980 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #3, 1 open I0227 08:08:51.211522 80029 portproxy.go:151] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: no connection: error creating data stream: Timeout occurred I0227 08:08:51.211566 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #3, 1 open I0227 08:08:51.213451 80029 portproxy.go:155] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: created a new connection #3 I0227 08:08:51.213498 80029 portproxy.go:286] forward listener for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: got a new connection #3 I0227 08:08:51.309540 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #4, 2 open I0227 08:08:52.215358 80029 portproxy.go:322] forward connection #3 for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: remote side closed the stream I0227 08:08:52.215475 80029 portproxy.go:340] forward connection #3 for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: closing our side I0227 08:09:21.310003 80029 portproxy.go:151] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: no connection: error creating error stream: Timeout occurred I0227 08:09:21.310086 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #4, 1 open I0227 08:09:51.311854 80029 portproxy.go:151] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: no connection: error creating data stream: Timeout occurred I0227 08:09:51.311908 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #4, 1 open I0227 08:09:51.314415 80029 portproxy.go:155] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: created a new connection #4 I0227 08:09:51.314497 80029 portproxy.go:286] forward listener for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: got a new connection #4 I0227 08:09:51.409527 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #5, 2 open I0227 08:09:52.326203 80029 portproxy.go:322] forward connection #4 for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: remote side closed the stream I0227 08:09:52.326277 80029 portproxy.go:340] forward connection #4 for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: closing our side I0227 08:10:21.409892 80029 portproxy.go:151] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: no connection: error creating error stream: Timeout occurred I0227 08:10:21.409954 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #5, 1 open I0227 08:10:51.411455 80029 portproxy.go:151] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: no connection: error creating data stream: Timeout occurred I0227 08:10:51.411557 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #5, 1 open I0227 08:10:51.413229 80029 portproxy.go:155] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: created a new connection #5 I0227 08:10:51.413274 80029 portproxy.go:286] forward listener for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: got a new connection #5 I0227 08:10:51.509508 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #6, 2 open I0227 08:10:52.414862 80029 portproxy.go:322] forward connection #5 for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: remote side closed the stream I0227 08:10:52.414931 80029 portproxy.go:340] forward connection #5 for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: closing our side I0227 08:11:21.509879 80029 portproxy.go:151] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: no connection: error creating error stream: Timeout occurred I0227 08:11:21.509934 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #6, 1 open I0227 08:11:51.511519 80029 portproxy.go:151] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: no connection: error creating data stream: Timeout occurred I0227 08:11:51.511568 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #6, 1 open I0227 08:11:51.513519 80029 portproxy.go:155] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: created a new connection #6 I0227 08:11:51.513571 80029 portproxy.go:286] forward listener for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: got a new connection #6 I0227 08:11:51.609504 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #7, 2 open I0227 08:11:52.517799 80029 portproxy.go:322] forward connection #6 for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: remote side closed the stream I0227 08:11:52.517918 80029 portproxy.go:340] forward connection #6 for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: closing our side I0227 08:12:21.609856 80029 portproxy.go:151] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: no connection: error creating error stream: Timeout occurred I0227 08:12:21.609909 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #7, 1 open I0227 08:12:51.611494 80029 portproxy.go:151] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: no connection: error creating data stream: Timeout occurred I0227 08:12:51.611555 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #7, 1 open I0227 08:12:51.613289 80029 portproxy.go:155] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: created a new connection #7 I0227 08:12:51.613343 80029 portproxy.go:286] forward listener for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: got a new connection #7 I0227 08:12:51.709535 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #8, 2 open I0227 08:12:52.615858 80029 portproxy.go:322] forward connection #7 for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: remote side closed the stream I0227 08:12:52.615989 80029 portproxy.go:340] forward connection #7 for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: closing our side W0227 08:12:52.616116 80029 server.go:669] grpc: Server.Serve failed to create ServerTransport: connection error: desc = "transport: http2Server.HandleStreams failed to receive the preface from client: EOF" I0227 08:13:21.709934 80029 portproxy.go:151] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: no connection: error creating error stream: Timeout occurred I0227 08:13:21.709997 80029 portproxy.go:148] port forwarding for csi-mock-volumes-4037-2061/csi-mockplugin-0:9000: trying to create a new connection #8, 1 open Feb 27 08:13:30.916: FAIL: Failed to register CSIDriver csi-mock-csi-mock-volumes-4037 Unexpected error: <*errors.errorString | 0xc002666220>: { s: "error waiting for CSI driver csi-mock-csi-mock-volumes-4037 registration on node kind-worker2: timed out waiting for the condition", } error waiting for CSI driver csi-mock-csi-mock-volumes-4037 registration on node kind-worker2: timed out waiting for the condition occurred --- test/e2e/storage/drivers/proxy/portproxy.go | 110 ++++++++------------ 1 file changed, 46 insertions(+), 64 deletions(-) diff --git a/test/e2e/storage/drivers/proxy/portproxy.go b/test/e2e/storage/drivers/proxy/portproxy.go index 7f1d604882c..aef56974ad8 100644 --- a/test/e2e/storage/drivers/proxy/portproxy.go +++ b/test/e2e/storage/drivers/proxy/portproxy.go @@ -24,13 +24,11 @@ import ( "io/ioutil" "net" "net/http" - "strconv" "sync" "sync/atomic" "time" v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/httpstream" @@ -94,35 +92,11 @@ func Listen(ctx context.Context, clientset kubernetes.Interface, restConfig *res addr: addr, } - // Port forwarding is allowed to fail and will be restarted when it does. - prepareForwarding := func() (*remotePort, error) { - pod, err := clientset.CoreV1().Pods(addr.Namespace).Get(ctx, addr.PodName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - for i, status := range pod.Status.ContainerStatuses { - if pod.Spec.Containers[i].Name == addr.ContainerName && - status.State.Running == nil { - return nil, fmt.Errorf("container %q is not running", addr.ContainerName) - } - } - - streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name) - if err != nil { - return nil, fmt.Errorf("dialer failed: %v", err) - } - rp := &remotePort{ - streamConn: streamConn, - } - return rp, nil - } - var connectionsCreated, connectionsClosed int32 - runForwarding := func(rp *remotePort) { - defer rp.Close() - klog.V(5).Infof("%s: starting connection polling", prefix) - defer klog.V(5).Infof("%s: connection polling ended", prefix) + runForwarding := func() { + klog.V(2).Infof("%s: starting connection polling", prefix) + defer klog.V(2).Infof("%s: connection polling ended", prefix) // This delay determines how quickly we notice when someone has // connected inside the cluster. With socat, we cannot make this too small @@ -145,9 +119,9 @@ func Listen(ctx context.Context, clientset kubernetes.Interface, restConfig *res } klog.V(5).Infof("%s: trying to create a new connection #%d, %d open", prefix, connectionsCreated, openConnections) - stream, err := rp.dial(ctx, prefix, addr.Port) + stream, err := dial(ctx, fmt.Sprintf("%s #%d", prefix, connectionsCreated), dialer, addr.Port) if err != nil { - klog.V(5).Infof("%s: no connection: %v", prefix, err) + klog.Errorf("%s: no connection: %v", prefix, err) break } // Make the connection available to Accept below. @@ -166,18 +140,24 @@ func Listen(ctx context.Context, clientset kubernetes.Interface, restConfig *res // Portforwarding and polling for connections run in the background. go func() { for { - fw, err := prepareForwarding() - if err == nil { - runForwarding(fw) - } else { - if apierrors.IsNotFound(err) { - // This is normal, the pod isn't running yet. Log with lower severity. - klog.V(5).Infof("prepare forwarding %s: %v", addr, err) - } else { - klog.Errorf("prepare forwarding %s: %v", addr, err) + running := false + pod, err := clientset.CoreV1().Pods(addr.Namespace).Get(ctx, addr.PodName, metav1.GetOptions{}) + if err != nil { + klog.V(5).Infof("checking for container %q in pod %s/%s: %v", addr.ContainerName, addr.Namespace, addr.PodName, err) + } + for i, status := range pod.Status.ContainerStatuses { + if pod.Spec.Containers[i].Name == addr.ContainerName && + status.State.Running != nil { + running = true + break } } + if running { + klog.V(2).Infof("container %q in pod %s/%s is running", addr.ContainerName, addr.Namespace, addr.PodName) + runForwarding() + } + select { case <-ctx.Done(): return @@ -209,27 +189,32 @@ func (a Addr) String() string { return fmt.Sprintf("%s/%s:%d", a.Namespace, a.PodName, a.Port) } -// remotePort is a stripped down version of client-go/tools/portforward minus -// the local listeners. -type remotePort struct { +type stream struct { + httpstream.Stream streamConn httpstream.Connection - - requestIDLock sync.Mutex - requestID int } -func (rp *remotePort) dial(ctx context.Context, prefix string, port int) (httpstream.Stream, error) { - requestID := rp.nextRequestID() +func dial(ctx context.Context, prefix string, dialer httpstream.Dialer, port int) (s *stream, finalErr error) { + streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name) + if err != nil { + return nil, fmt.Errorf("dialer failed: %v", err) + } + requestID := "1" + defer func() { + if finalErr != nil { + streamConn.Close() + } + }() // create error stream headers := http.Header{} headers.Set(v1.StreamType, v1.StreamTypeError) headers.Set(v1.PortHeader, fmt.Sprintf("%d", port)) - headers.Set(v1.PortForwardRequestIDHeader, strconv.Itoa(requestID)) + headers.Set(v1.PortForwardRequestIDHeader, requestID) // We're not writing to this stream, just reading an error message from it. // This happens asynchronously. - errorStream, err := rp.streamConn.CreateStream(headers) + errorStream, err := streamConn.CreateStream(headers) if err != nil { return nil, fmt.Errorf("error creating error stream: %v", err) } @@ -246,24 +231,20 @@ func (rp *remotePort) dial(ctx context.Context, prefix string, port int) (httpst // create data stream headers.Set(v1.StreamType, v1.StreamTypeData) - dataStream, err := rp.streamConn.CreateStream(headers) + dataStream, err := streamConn.CreateStream(headers) if err != nil { return nil, fmt.Errorf("error creating data stream: %v", err) } - return dataStream, nil + return &stream{ + Stream: dataStream, + streamConn: streamConn, + }, nil } -func (rp *remotePort) Close() { - rp.streamConn.Close() -} - -func (rp *remotePort) nextRequestID() int { - rp.requestIDLock.Lock() - defer rp.requestIDLock.Unlock() - id := rp.requestID - rp.requestID++ - return id +func (s *stream) Close() { + s.Stream.Close() + s.streamConn.Close() } type listener struct { @@ -292,7 +273,7 @@ func (l *listener) Accept() (net.Conn, error) { } type connection struct { - stream httpstream.Stream + stream *stream addr Addr counter int32 closed *int32 @@ -346,7 +327,8 @@ func (c *connection) Close() error { atomic.AddInt32(c.closed, 1) c.closed = nil } - return c.stream.Close() + c.stream.Close() + return nil } func (l *listener) Addr() net.Addr { From 9ef648d9360afafa847633149032e0ed4e29a556 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 5 Feb 2021 15:58:36 +0100 Subject: [PATCH 14/14] generated files due to modified dependencies --- go.mod | 1 + vendor/modules.txt | 1 + 2 files changed, 2 insertions(+) diff --git a/go.mod b/go.mod index 1adb5034cd9..bb5d34ebd09 100644 --- a/go.mod +++ b/go.mod @@ -53,6 +53,7 @@ require ( github.com/gogo/protobuf v1.3.2 github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e github.com/golang/mock v1.4.4 + github.com/golang/protobuf v1.4.3 github.com/google/cadvisor v0.38.8 github.com/google/go-cmp v0.5.2 github.com/google/gofuzz v1.1.0 diff --git a/vendor/modules.txt b/vendor/modules.txt index a4fa1590d82..304177c1316 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -499,6 +499,7 @@ github.com/golang/groupcache/lru # github.com/golang/mock => github.com/golang/mock v1.4.4 github.com/golang/mock/gomock # github.com/golang/protobuf v1.4.3 => github.com/golang/protobuf v1.4.3 +## explicit # github.com/golang/protobuf => github.com/golang/protobuf v1.4.3 github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto