Move mount/fake.go to mount/fake_mount.go

This patch moves fake.go to mount_fake.go, and follows to principle of
always returning a discrete type rather than an Interface. All callers
of "FakeMounter" are changed to instead use "NewFakeMounter()". The
FakeMounter "Log" struct member is changed to not be exported, and
instead only access through a new "GetLog()" method.
This commit is contained in:
Travis Rhoden 2019-08-26 22:52:08 -06:00
parent 47dc1d6af1
commit 1fd8921546
No known key found for this signature in database
GPG Key ID: 6B4B921EC4ECF91A
37 changed files with 161 additions and 185 deletions

View File

@ -31,8 +31,8 @@ import (
)
func fakeContainerMgrMountInt() mount.Interface {
return &mount.FakeMounter{
MountPoints: []mount.MountPoint{
return mount.NewFakeMounter(
[]mount.MountPoint{
{
Device: "cgroup",
Type: "cgroup",
@ -53,8 +53,7 @@ func fakeContainerMgrMountInt() mount.Interface {
Type: "cgroup",
Opts: []string{"rw", "relatime", "memory"},
},
},
}
})
}
func TestCgroupMountValidationSuccess(t *testing.T) {
@ -64,8 +63,8 @@ func TestCgroupMountValidationSuccess(t *testing.T) {
}
func TestCgroupMountValidationMemoryMissing(t *testing.T) {
mountInt := &mount.FakeMounter{
MountPoints: []mount.MountPoint{
mountInt := mount.NewFakeMounter(
[]mount.MountPoint{
{
Device: "cgroup",
Type: "cgroup",
@ -81,15 +80,14 @@ func TestCgroupMountValidationMemoryMissing(t *testing.T) {
Type: "cgroup",
Opts: []string{"rw", "relatime", "cpuacct"},
},
},
}
})
_, err := validateSystemRequirements(mountInt)
assert.Error(t, err)
}
func TestCgroupMountValidationMultipleSubsystem(t *testing.T) {
mountInt := &mount.FakeMounter{
MountPoints: []mount.MountPoint{
mountInt := mount.NewFakeMounter(
[]mount.MountPoint{
{
Device: "cgroup",
Type: "cgroup",
@ -105,8 +103,7 @@ func TestCgroupMountValidationMultipleSubsystem(t *testing.T) {
Type: "cgroup",
Opts: []string{"rw", "relatime", "cpuacct"},
},
},
}
})
_, err := validateSystemRequirements(mountInt)
assert.Nil(t, err)
}
@ -118,8 +115,8 @@ func TestSoftRequirementsValidationSuccess(t *testing.T) {
defer os.RemoveAll(tempDir)
req.NoError(ioutil.WriteFile(path.Join(tempDir, "cpu.cfs_period_us"), []byte("0"), os.ModePerm))
req.NoError(ioutil.WriteFile(path.Join(tempDir, "cpu.cfs_quota_us"), []byte("0"), os.ModePerm))
mountInt := &mount.FakeMounter{
MountPoints: []mount.MountPoint{
mountInt := mount.NewFakeMounter(
[]mount.MountPoint{
{
Device: "cgroup",
Type: "cgroup",
@ -136,8 +133,7 @@ func TestSoftRequirementsValidationSuccess(t *testing.T) {
Type: "cgroup",
Opts: []string{"rw", "relatime", "cpuacct", "memory"},
},
},
}
})
f, err := validateSystemRequirements(mountInt)
assert.NoError(t, err)
assert.True(t, f.cpuHardcapping, "cpu hardcapping is expected to be enabled")

View File

@ -161,7 +161,7 @@ func newTestKubeletWithImageList(
kubelet.kubeClient = fakeKubeClient
kubelet.heartbeatClient = fakeKubeClient
kubelet.os = &containertest.FakeOS{}
kubelet.mounter = &mount.FakeMounter{}
kubelet.mounter = mount.NewFakeMounter(nil)
kubelet.hostutil = hostutil.NewFakeHostUtil(nil)
kubelet.subpather = &subpath.FakeSubpath{}

View File

@ -129,7 +129,7 @@ func TestRunOnce(t *testing.T) {
kb.evictionManager = evictionManager
kb.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler)
kb.mounter = &mount.FakeMounter{}
kb.mounter = mount.NewFakeMounter(nil)
if err := kb.setupDataDirs(); err != nil {
t.Errorf("Failed to init data dirs: %v", err)
}

View File

@ -84,7 +84,7 @@ func Test_Run_Positive_DoNothing(t *testing.T) {
asw,
hasAddedPods,
oex,
&mount.FakeMounter{},
mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil),
volumePluginMgr,
kubeletPodsDir)
@ -128,7 +128,7 @@ func Test_Run_Positive_VolumeAttachAndMount(t *testing.T) {
asw,
hasAddedPods,
oex,
&mount.FakeMounter{},
mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil),
volumePluginMgr,
kubeletPodsDir)
@ -206,7 +206,7 @@ func Test_Run_Positive_VolumeMountControllerAttachEnabled(t *testing.T) {
asw,
hasAddedPods,
oex,
&mount.FakeMounter{},
mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil),
volumePluginMgr,
kubeletPodsDir)
@ -285,7 +285,7 @@ func Test_Run_Positive_VolumeAttachMountUnmountDetach(t *testing.T) {
asw,
hasAddedPods,
oex,
&mount.FakeMounter{},
mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil),
volumePluginMgr,
kubeletPodsDir)
@ -375,7 +375,7 @@ func Test_Run_Positive_VolumeUnmountControllerAttachEnabled(t *testing.T) {
asw,
hasAddedPods,
oex,
&mount.FakeMounter{},
mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil),
volumePluginMgr,
kubeletPodsDir)
@ -502,7 +502,7 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
asw,
hasAddedPods,
oex,
&mount.FakeMounter{},
mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil),
volumePluginMgr,
kubeletPodsDir)
@ -608,7 +608,7 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
asw,
hasAddedPods,
oex,
&mount.FakeMounter{},
mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil),
volumePluginMgr,
kubeletPodsDir)
@ -709,7 +709,7 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
asw,
hasAddedPods,
oex,
&mount.FakeMounter{},
mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil),
volumePluginMgr,
kubeletPodsDir)
@ -823,7 +823,7 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
asw,
hasAddedPods,
oex,
&mount.FakeMounter{},
mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil),
volumePluginMgr,
kubeletPodsDir)
@ -1096,7 +1096,7 @@ func Test_Run_Positive_VolumeFSResizeControllerAttachEnabled(t *testing.T) {
asw,
hasAddedPods,
oex,
&mount.FakeMounter{},
mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil),
volumePluginMgr,
kubeletPodsDir)
@ -1278,7 +1278,7 @@ func Test_Run_Positive_VolumeMountControllerAttachEnabledRace(t *testing.T) {
asw,
hasAddedPods,
oex,
&mount.FakeMounter{},
mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil),
volumePluginMgr,
kubeletPodsDir)

View File

@ -302,7 +302,7 @@ func newTestVolumeManager(tmpDir string, podManager kubepod.Manager, kubeClient
kubeClient,
plugMgr,
&containertest.FakeRuntime{},
&mount.FakeMounter{},
mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil),
"",
fakeRecorder,

View File

@ -5,7 +5,8 @@ go_library(
srcs = [
"doc.go",
"exec.go",
"fake.go",
"fake_exec.go",
"fake_mounter.go",
"mount.go",
"mount_helper_common.go",
"mount_helper_unix.go",

View File

@ -19,35 +19,18 @@ package mount
import "k8s.io/utils/exec"
// NewOSExec returns a new Exec interface implementation based on exec()
func NewOSExec() Exec {
return &osExec{}
func NewOSExec() *OSExec {
return &OSExec{}
}
// Real implementation of Exec interface that uses simple utils.Exec
type osExec struct{}
// OSExec is an implementation of Exec interface that uses simple utils.Exec
type OSExec struct{}
var _ Exec = &osExec{}
var _ Exec = &OSExec{}
func (e *osExec) Run(cmd string, args ...string) ([]byte, error) {
// Run exucutes the given cmd and arges and returns stdout and stderr as a
// combined byte stream
func (e *OSExec) Run(cmd string, args ...string) ([]byte, error) {
exe := exec.New()
return exe.Command(cmd, args...).CombinedOutput()
}
// NewFakeExec returns a new FakeExec
func NewFakeExec(run runHook) *FakeExec {
return &FakeExec{runHook: run}
}
// FakeExec for testing.
type FakeExec struct {
runHook runHook
}
type runHook func(cmd string, args ...string) ([]byte, error)
// Run executes the command using the optional runhook, if given
func (f *FakeExec) Run(cmd string, args ...string) ([]byte, error) {
if f.runHook != nil {
return f.runHook(cmd, args...)
}
return nil, nil
}

View File

@ -16,23 +16,6 @@ limitations under the License.
package mount
import "k8s.io/utils/exec"
// NewOSExec returns a new Exec interface implementation based on exec()
func NewOSExec() Exec {
return &osExec{}
}
// Real implementation of Exec interface that uses simple utils.Exec
type osExec struct{}
var _ Exec = &osExec{}
func (e *osExec) Run(cmd string, args ...string) ([]byte, error) {
exe := exec.New()
return exe.Command(cmd, args...).CombinedOutput()
}
// NewFakeExec returns a new FakeExec
func NewFakeExec(run runHook) *FakeExec {
return &FakeExec{runHook: run}

View File

@ -27,7 +27,7 @@ import (
// FakeMounter implements mount.Interface for tests.
type FakeMounter struct {
MountPoints []MountPoint
Log []FakeAction
log []FakeAction
// Error to return for a path when calling IsLikelyNotMountPoint
MountCheckErrors map[string]error
// Some tests run things in parallel, make sure the mounter does not produce
@ -55,12 +55,26 @@ type FakeAction struct {
FSType string // applies only to "mount" actions
}
func NewFakeMounter(mps []MountPoint) *FakeMounter {
return &FakeMounter{
MountPoints: mps,
}
}
// ResetLog clears all the log entries in FakeMounter
func (f *FakeMounter) ResetLog() {
f.mutex.Lock()
defer f.mutex.Unlock()
f.Log = []FakeAction{}
f.log = []FakeAction{}
}
// GetLog returns the slice of FakeActions taken by the mounter
func (f *FakeMounter) GetLog() []FakeAction {
f.mutex.Lock()
defer f.mutex.Unlock()
return f.log
}
// Mount records the mount event and updates the in-memory mount points for FakeMounter
@ -102,7 +116,7 @@ func (f *FakeMounter) Mount(source string, target string, fstype string, options
}
f.MountPoints = append(f.MountPoints, MountPoint{Device: source, Path: absTarget, Type: fstype, Opts: opts})
klog.V(5).Infof("Fake mounter: mounted %s to %s", source, absTarget)
f.Log = append(f.Log, FakeAction{Action: FakeActionMount, Target: absTarget, Source: source, FSType: fstype})
f.log = append(f.log, FakeAction{Action: FakeActionMount, Target: absTarget, Source: source, FSType: fstype})
return nil
}
@ -133,7 +147,7 @@ func (f *FakeMounter) Unmount(target string) error {
newMountpoints = append(newMountpoints, MountPoint{Device: mp.Device, Path: mp.Path, Type: mp.Type})
}
f.MountPoints = newMountpoints
f.Log = append(f.Log, FakeAction{Action: FakeActionUnmount, Target: absTarget})
f.log = append(f.log, FakeAction{Action: FakeActionUnmount, Target: absTarget})
delete(f.MountCheckErrors, target)
return nil
}

View File

@ -93,10 +93,10 @@ func TestDoCleanupMountPoint(t *testing.T) {
t.Fatalf("failed to prepare test: %v", err)
}
fake := &FakeMounter{
MountPoints: []MountPoint{mountPoint},
MountCheckErrors: map[string]error{mountPoint.Path: mountError},
}
fake := NewFakeMounter(
[]MountPoint{mountPoint},
)
fake.MountCheckErrors = map[string]error{mountPoint.Path: mountError}
err = doCleanupMountPoint(mountPoint.Path, fake, true, tt.corruptedMnt)
if tt.expectErr {

View File

@ -73,15 +73,14 @@ func mountPointsEqual(a, b *MountPoint) bool {
}
func TestGetMountRefs(t *testing.T) {
fm := &FakeMounter{
MountPoints: []MountPoint{
fm := NewFakeMounter(
[]MountPoint{
{Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd"},
{Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd-in-pod"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd2"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod1"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod2"},
},
}
})
tests := []struct {
mountPath string
@ -137,14 +136,13 @@ func setEquivalent(set1, set2 []string) bool {
}
func TestGetDeviceNameFromMount(t *testing.T) {
fm := &FakeMounter{
MountPoints: []MountPoint{
fm := NewFakeMounter(
[]MountPoint{
{Device: "/dev/disk/by-path/prefix-lun-1",
Path: "/mnt/111"},
{Device: "/dev/disk/by-path/prefix-lun-1",
Path: "/mnt/222"},
},
}
})
tests := []struct {
mountPath string
@ -166,15 +164,14 @@ func TestGetDeviceNameFromMount(t *testing.T) {
}
func TestGetMountRefsByDev(t *testing.T) {
fm := &FakeMounter{
MountPoints: []MountPoint{
fm := NewFakeMounter(
[]MountPoint{
{Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd"},
{Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd-in-pod"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd2"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod1"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod2"},
},
}
})
tests := []struct {
mountPath string

View File

@ -224,7 +224,7 @@ func TestIsLikelyNotMountPoint(t *testing.T) {
}
func TestFormatAndMount(t *testing.T) {
fakeMounter := ErrorMounter{&FakeMounter{}, 0, nil}
fakeMounter := ErrorMounter{NewFakeMounter(nil), 0, nil}
execCallback := func(cmd string, args ...string) ([]byte, error) {
for j := range args {
if strings.Contains(args[j], "Get-Disk -Number") {

View File

@ -208,7 +208,7 @@ func TestSafeFormatAndMount(t *testing.T) {
return []byte(script.output), script.err
}
fakeMounter := ErrorMounter{&FakeMounter{}, 0, test.mountErrs}
fakeMounter := ErrorMounter{NewFakeMounter(nil), 0, test.mountErrs}
fakeExec := NewFakeExec(execCallback)
mounter := SafeFormatAndMount{
Interface: &fakeMounter,

View File

@ -125,7 +125,7 @@ func TestPlugin(t *testing.T) {
},
}
fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{}
fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -314,7 +314,7 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
},
}
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, mount.NewFakeMounter(nil))
if err != nil {
t.Errorf("Error creating new mounter:%v", err)
}
@ -322,7 +322,7 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
t.Errorf("Volume Mounter can be type-assert to Unmounter")
}
unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{}, mount.NewFakeMounter(nil))
if err != nil {
t.Errorf("Error creating new unmounter:%v", err)
}
@ -361,7 +361,7 @@ func TestMountOptions(t *testing.T) {
}
fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{}
fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromPersistentVolume(pv, false), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {

View File

@ -138,7 +138,7 @@ func testPlugin(t *testing.T, tmpDir string, volumeHost volume.VolumeHost) {
},
},
}
fake := &mount.FakeMounter{}
fake := mount.NewFakeMounter(nil)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake)
if err != nil {
@ -164,7 +164,7 @@ func testPlugin(t *testing.T, tmpDir string, volumeHost volume.VolumeHost) {
}
}
unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), mount.NewFakeMounter(nil))
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
@ -260,7 +260,7 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
},
},
}
fake := &mount.FakeMounter{}
fake := mount.NewFakeMounter(nil)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake)
if err != nil {
@ -270,7 +270,7 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
t.Errorf("Volume Mounter can be type-assert to Unmounter")
}
unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), mount.NewFakeMounter(nil))
if err != nil {
t.Errorf("MounterInternal() failed: %v", err)
}

View File

@ -21,7 +21,7 @@ import (
"path/filepath"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
@ -75,7 +75,7 @@ func TestPlugin(t *testing.T) {
},
},
}
mounter, err := plug.(*cephfsPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets")
mounter, err := plug.(*cephfsPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), mount.NewFakeMounter(nil), "secrets")
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
@ -97,7 +97,7 @@ func TestPlugin(t *testing.T) {
t.Errorf("SetUp() failed: %v", err)
}
}
unmounter, err := plug.(*cephfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
unmounter, err := plug.(*cephfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), mount.NewFakeMounter(nil))
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}

View File

@ -25,7 +25,7 @@ import (
"testing"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
@ -155,7 +155,7 @@ func TestPlugin(t *testing.T) {
},
},
}
mounter, err := plug.(*cinderPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{})
mounter, err := plug.(*cinderPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{0}, mount.NewFakeMounter(nil))
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
@ -179,7 +179,7 @@ func TestPlugin(t *testing.T) {
}
}
unmounter, err := plug.(*cinderPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{})
unmounter, err := plug.(*cinderPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{0}, mount.NewFakeMounter(nil))
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}

View File

@ -21,7 +21,7 @@ import (
"os"
"path/filepath"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"

View File

@ -23,7 +23,7 @@ import (
"path/filepath"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@ -117,7 +117,7 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: config.medium}},
}
physicalMounter = mount.FakeMounter{}
physicalMounter = mount.NewFakeMounter(nil)
mountDetector = fakeMountDetector{}
pod = &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -148,7 +148,7 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
mounter, err := plug.(*emptyDirPlugin).newMounterInternal(volume.NewSpecFromVolume(spec),
pod,
&physicalMounter,
physicalMounter,
&mountDetector,
volume.VolumeOptions{})
if err != nil {
@ -186,12 +186,13 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
t.Errorf("Volume directory was created unexpectedly")
}
log := physicalMounter.GetLog()
// Check the number of mounts performed during setup
if e, a := config.expectedSetupMounts, len(physicalMounter.Log); e != a {
if e, a := config.expectedSetupMounts, len(log); e != a {
t.Errorf("Expected %v physicalMounter calls during setup, got %v", e, a)
} else if config.expectedSetupMounts == 1 &&
(physicalMounter.Log[0].Action != mount.FakeActionMount || (physicalMounter.Log[0].FSType != "tmpfs" && physicalMounter.Log[0].FSType != "hugetlbfs")) {
t.Errorf("Unexpected physicalMounter action during setup: %#v", physicalMounter.Log[0])
(log[0].Action != mount.FakeActionMount || (log[0].FSType != "tmpfs" && log[0].FSType != "hugetlbfs")) {
t.Errorf("Unexpected physicalMounter action during setup: %#v", log[0])
}
physicalMounter.ResetLog()
@ -201,7 +202,7 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
teardownMedium = v1.StorageMediumMemory
}
unmounterMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown}
unmounter, err := plug.(*emptyDirPlugin).newUnmounterInternal(volumeName, types.UID("poduid"), &physicalMounter, unmounterMountDetector)
unmounter, err := plug.(*emptyDirPlugin).newUnmounterInternal(volumeName, types.UID("poduid"), physicalMounter, unmounterMountDetector)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
@ -219,11 +220,12 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
t.Errorf("TearDown() failed: %v", err)
}
log = physicalMounter.GetLog()
// Check the number of physicalMounter calls during tardown
if e, a := config.expectedTeardownMounts, len(physicalMounter.Log); e != a {
if e, a := config.expectedTeardownMounts, len(log); e != a {
t.Errorf("Expected %v physicalMounter calls during teardown, got %v", e, a)
} else if config.expectedTeardownMounts == 1 && physicalMounter.Log[0].Action != mount.FakeActionUnmount {
t.Errorf("Unexpected physicalMounter action during teardown: %#v", physicalMounter.Log[0])
} else if config.expectedTeardownMounts == 1 && log[0].Action != mount.FakeActionUnmount {
t.Errorf("Unexpected physicalMounter action during teardown: %#v", log[0])
}
physicalMounter.ResetLog()
}

View File

@ -24,7 +24,7 @@ import (
"strings"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
@ -163,7 +163,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
}
fakeManager := newFakeDiskManager()
defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{}
fakeMounter := mount.NewFakeMounter(nil)
fakeExec := mount.NewFakeExec(nil)
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec)
if err != nil {
@ -226,7 +226,7 @@ func doTestPluginNilMounter(t *testing.T, spec *volume.Spec) {
}
fakeManager := newFakeDiskManager()
defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{}
fakeMounter := mount.NewFakeMounter(nil)
fakeExec := mount.NewFakeExec(nil)
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec)
if err == nil {
@ -437,14 +437,13 @@ func Test_ConstructVolumeSpec(t *testing.T) {
if runtime.GOOS == "darwin" {
t.Skipf("Test_ConstructVolumeSpec is not supported on GOOS=%s", runtime.GOOS)
}
fm := &mount.FakeMounter{
MountPoints: []mount.MountPoint{
fm := mount.NewFakeMounter(
[]mount.MountPoint{
{Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"},
{Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/50060e801049cfd1-lun-0"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000"},
},
}
})
mountPaths := []string{
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1",
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2",
@ -488,11 +487,10 @@ func Test_ConstructVolumeSpec(t *testing.T) {
}
func Test_ConstructVolumeSpecNoRefs(t *testing.T) {
fm := &mount.FakeMounter{
MountPoints: []mount.MountPoint{
fm := mount.NewFakeMounter(
[]mount.MountPoint{
{Device: "/dev/sdd", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"},
},
}
})
mountPaths := []string{
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1",
}

View File

@ -19,7 +19,7 @@ package flexvolume
import (
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
@ -42,7 +42,7 @@ func TestSetUpAt(tt *testing.T) {
ServiceAccountName: "my-sa",
},
}
mounter := &mount.FakeMounter{}
mounter := mount.NewFakeMounter(nil)
plugin, rootDir := testPlugin(t)
plugin.unsupportedCommands = []string{"unsupportedCmd"}

View File

@ -28,7 +28,7 @@ func TestTearDownAt(tt *testing.T) {
t := harness.For(tt)
defer t.Close()
mounter := &mount.FakeMounter{}
mounter := mount.NewFakeMounter(nil)
plugin, rootDir := testPlugin(t)
plugin.runner = fakeRunner(

View File

@ -21,7 +21,7 @@ import (
"os"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
@ -153,7 +153,7 @@ func TestPlugin(t *testing.T) {
},
}
fakeManager := &fakeFlockerUtil{}
fakeMounter := &mount.FakeMounter{}
fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*flockerPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)

View File

@ -128,7 +128,7 @@ func TestPlugin(t *testing.T) {
},
}
fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{}
fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*gcePersistentDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -271,7 +271,7 @@ func TestMountOptions(t *testing.T) {
}
fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{}
fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*gcePersistentDiskPlugin).newMounterInternal(volume.NewSpecFromPersistentVolume(pv, false), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {

View File

@ -23,7 +23,7 @@ import (
"testing"
gapi "github.com/heketi/heketi/pkg/glusterfs/api"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
@ -106,7 +106,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
ep := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}}}}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*glusterfsPlugin).newMounterInternal(spec, ep, pod, &mount.FakeMounter{})
mounter, err := plug.(*glusterfsPlugin).newMounterInternal(spec, ep, pod, mount.NewFakeMounter(nil))
volumePath := mounter.GetPath()
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -128,7 +128,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
t.Errorf("SetUp() failed: %v", err)
}
}
unmounter, err := plug.(*glusterfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
unmounter, err := plug.(*glusterfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), mount.NewFakeMounter(nil))
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}

View File

@ -22,7 +22,7 @@ import (
"strings"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
@ -159,7 +159,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
}
fakeManager := NewFakeDiskManager()
defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{}
fakeMounter := mount.NewFakeMounter(nil)
fakeExec := mount.NewFakeExec(nil)
mounter, err := plug.(*iscsiPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec, nil)
if err != nil {

View File

@ -570,7 +570,7 @@ func TestMountOptions(t *testing.T) {
}
// Wrap with FakeMounter.
fakeMounter := &mount.FakeMounter{}
fakeMounter := mount.NewFakeMounter(nil)
mounter.(*localVolumeMounter).mounter = fakeMounter
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {

View File

@ -21,7 +21,7 @@ import (
"os"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
@ -108,7 +108,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
if err != nil {
t.Errorf("Can't find the plugin by name")
}
fake := &mount.FakeMounter{}
fake := mount.NewFakeMounter(nil)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*nfsPlugin).newMounterInternal(spec, pod, fake)
if err != nil {
@ -135,11 +135,12 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
if mounter.(*nfsMounter).readOnly {
t.Errorf("The volume source should not be read-only and it is.")
}
if len(fake.Log) != 1 {
t.Errorf("Mount was not called exactly one time. It was called %d times.", len(fake.Log))
log := fake.GetLog()
if len(log) != 1 {
t.Errorf("Mount was not called exactly one time. It was called %d times.", len(log))
} else {
if fake.Log[0].Action != mount.FakeActionMount {
t.Errorf("Unexpected mounter action: %#v", fake.Log[0])
if log[0].Action != mount.FakeActionMount {
t.Errorf("Unexpected mounter action: %#v", log[0])
}
}
fake.ResetLog()
@ -159,11 +160,12 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
if len(fake.Log) != 1 {
t.Errorf("Unmount was not called exactly one time. It was called %d times.", len(fake.Log))
log = fake.GetLog()
if len(log) != 1 {
t.Errorf("Unmount was not called exactly one time. It was called %d times.", len(log))
} else {
if fake.Log[0].Action != mount.FakeActionUnmount {
t.Errorf("Unexpected unmounter action: %#v", fake.Log[0])
if log[0].Action != mount.FakeActionUnmount {
t.Errorf("Unexpected unmounter action: %#v", log[0])
}
}

View File

@ -22,7 +22,7 @@ import (
"path/filepath"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
@ -148,7 +148,7 @@ func TestPlugin(t *testing.T) {
}
fakeManager := &fakePortworxManager{}
// Test Mounter
fakeMounter := &mount.FakeMounter{}
fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*portworxVolumePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)

View File

@ -21,7 +21,7 @@ import (
"os"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
@ -89,7 +89,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*quobytePlugin).newMounterInternal(spec, pod, &mount.FakeMounter{})
mounter, err := plug.(*quobytePlugin).newMounterInternal(spec, pod, mount.NewFakeMounter(nil))
volumePath := mounter.GetPath()
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
@ -104,7 +104,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
t.Errorf("Expected success, got: %v", err)
}
unmounter, err := plug.(*quobytePlugin).newUnmounterInternal("vol", types.UID("poduid"), &mount.FakeMounter{})
unmounter, err := plug.(*quobytePlugin).newUnmounterInternal("vol", types.UID("poduid"), mount.NewFakeMounter(nil))
if err != nil {
t.Errorf("Failed to make a new unmounter: %v", err)
}

View File

@ -27,7 +27,7 @@ import (
"testing"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@ -234,11 +234,12 @@ func (fake *fakeDiskManager) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize
// checkMounterLog checks fakeMounter must have expected logs, and the last action msut equal to expectedAction.
func checkMounterLog(t *testing.T, fakeMounter *mount.FakeMounter, expected int, expectedAction mount.FakeAction) {
if len(fakeMounter.Log) != expected {
t.Fatalf("fakeMounter should have %d logs, actual: %d", expected, len(fakeMounter.Log))
log := fakeMounter.GetLog()
if len(log) != expected {
t.Fatalf("fakeMounter should have %d logs, actual: %d", expected, len(log))
}
lastIndex := len(fakeMounter.Log) - 1
lastAction := fakeMounter.Log[lastIndex]
lastIndex := len(log) - 1
lastAction := log[lastIndex]
if !reflect.DeepEqual(expectedAction, lastAction) {
t.Fatalf("fakeMounter.Log[%d] should be %#v, not: %#v", lastIndex, expectedAction, lastAction)
}

View File

@ -22,7 +22,7 @@ import (
"path/filepath"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
@ -194,7 +194,7 @@ func TestPlugin(t *testing.T) {
t.Errorf("Couldn't get secret from %v/%v", pod.Namespace, secretName)
}
mounter, err := plug.(*storageosPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, apiCfg, fakeManager, &mount.FakeMounter{}, mount.NewFakeExec(nil))
mounter, err := plug.(*storageosPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, apiCfg, fakeManager, mount.NewFakeMounter(nil), mount.NewFakeExec(nil))
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
@ -231,7 +231,7 @@ func TestPlugin(t *testing.T) {
// Test Unmounter
fakeManager = &fakePDManager{}
unmounter, err := plug.(*storageosPlugin).newUnmounterInternal("vol1-pvname", types.UID("poduid"), fakeManager, &mount.FakeMounter{}, mount.NewFakeExec(nil))
unmounter, err := plug.(*storageosPlugin).newUnmounterInternal("vol1-pvname", types.UID("poduid"), fakeManager, mount.NewFakeMounter(nil), mount.NewFakeExec(nil))
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
@ -372,7 +372,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
fakeManager := &fakePDManager{}
fakeConfig := &fakeConfig{}
apiCfg := fakeConfig.GetAPIConfig()
mounter, err := plug.(*storageosPlugin).newMounterInternal(spec, pod, apiCfg, fakeManager, &mount.FakeMounter{}, mount.NewFakeExec(nil))
mounter, err := plug.(*storageosPlugin).newMounterInternal(spec, pod, apiCfg, fakeManager, mount.NewFakeMounter(nil), mount.NewFakeExec(nil))
if err != nil {
t.Fatalf("error creating a new internal mounter:%v", err)
}

View File

@ -21,7 +21,7 @@ import (
"os"
storageostypes "github.com/storageos/go-api/types"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
@ -221,7 +221,7 @@ func TestAttachVolume(t *testing.T) {
volName: testVolName,
volNamespace: testNamespace,
manager: util,
mounter: &mount.FakeMounter{},
mounter: mount.NewFakeMounter(nil),
plugin: plug.(*storageosPlugin),
},
deviceDir: tmpDir,

View File

@ -109,7 +109,7 @@ func NewFakeVolumeHostWithCSINodeName(rootDir string, kubeClient clientset.Inter
func newFakeVolumeHost(rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, cloud cloudprovider.Interface, pathToTypeMap map[string]hostutil.FileType) *fakeVolumeHost {
host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, cloud: cloud}
host.mounter = &mount.FakeMounter{}
host.mounter = mount.NewFakeMounter(nil)
host.hostUtil = hostutil.NewFakeHostUtil(pathToTypeMap)
host.exec = mount.NewFakeExec(nil)
host.pluginMgr.InitPlugins(plugins, nil /* prober */, host)

View File

@ -21,6 +21,10 @@ package fsquota
import (
"fmt"
"io/ioutil"
"os"
"strings"
"testing"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
@ -28,9 +32,6 @@ import (
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume/util/fsquota/common"
"os"
"strings"
"testing"
)
const dummyMountData = `sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
@ -44,8 +45,8 @@ tmpfs /tmp tmpfs rw,nosuid,nodev 0 0
`
func dummyFakeMount1() mount.Interface {
return &mount.FakeMounter{
MountPoints: []mount.MountPoint{
return mount.NewFakeMounter(
[]mount.MountPoint{
{
Device: "tmpfs",
Path: "/tmp",
@ -76,8 +77,7 @@ func dummyFakeMount1() mount.Interface {
Type: "xfs",
Opts: []string{"rw", "relatime", "attr2", "inode64", "usrquota", "prjquota"},
},
},
}
})
}
type backingDevTest struct {
@ -234,9 +234,7 @@ var dummyMountPoints = []mount.MountPoint{
}
func dummyQuotaTest() mount.Interface {
return &mount.FakeMounter{
MountPoints: dummyMountPoints,
}
return mount.NewFakeMounter(dummyMountPoints)
}
func dummySetFSInfo(path string) {

View File

@ -612,7 +612,8 @@ func TestCleanSubPaths(t *testing.T) {
t.Fatalf("failed to prepare test %q: %v", test.name, err.Error())
}
fm := &mount.FakeMounter{MountPoints: mounts, UnmountFunc: test.unmount}
fm := mount.NewFakeMounter(mounts)
fm.UnmountFunc = test.unmount
err = doCleanSubPaths(fm, base, testVol)
if err != nil && !test.expectError {
@ -641,7 +642,7 @@ func setupFakeMounter(testMounts []string) *mount.FakeMounter {
for _, mountPoint := range testMounts {
mounts = append(mounts, mount.MountPoint{Device: "/foo", Path: mountPoint})
}
return &mount.FakeMounter{MountPoints: mounts}
return mount.NewFakeMounter(mounts)
}
func getTestPaths(base string) (string, string) {

View File

@ -24,7 +24,7 @@ import (
"path/filepath"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
cloudprovider "k8s.io/cloud-provider"
@ -110,7 +110,7 @@ func TestPlugin(t *testing.T) {
// Test Mounter
fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{}
fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*vsphereVolumePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)