Move mount/fake.go to mount/fake_mount.go

This patch moves fake.go to mount_fake.go, and follows to principle of
always returning a discrete type rather than an Interface. All callers
of "FakeMounter" are changed to instead use "NewFakeMounter()". The
FakeMounter "Log" struct member is changed to not be exported, and
instead only access through a new "GetLog()" method.
This commit is contained in:
Travis Rhoden 2019-08-26 22:52:08 -06:00
parent 47dc1d6af1
commit 1fd8921546
No known key found for this signature in database
GPG Key ID: 6B4B921EC4ECF91A
37 changed files with 161 additions and 185 deletions

View File

@ -31,8 +31,8 @@ import (
) )
func fakeContainerMgrMountInt() mount.Interface { func fakeContainerMgrMountInt() mount.Interface {
return &mount.FakeMounter{ return mount.NewFakeMounter(
MountPoints: []mount.MountPoint{ []mount.MountPoint{
{ {
Device: "cgroup", Device: "cgroup",
Type: "cgroup", Type: "cgroup",
@ -53,8 +53,7 @@ func fakeContainerMgrMountInt() mount.Interface {
Type: "cgroup", Type: "cgroup",
Opts: []string{"rw", "relatime", "memory"}, Opts: []string{"rw", "relatime", "memory"},
}, },
}, })
}
} }
func TestCgroupMountValidationSuccess(t *testing.T) { func TestCgroupMountValidationSuccess(t *testing.T) {
@ -64,8 +63,8 @@ func TestCgroupMountValidationSuccess(t *testing.T) {
} }
func TestCgroupMountValidationMemoryMissing(t *testing.T) { func TestCgroupMountValidationMemoryMissing(t *testing.T) {
mountInt := &mount.FakeMounter{ mountInt := mount.NewFakeMounter(
MountPoints: []mount.MountPoint{ []mount.MountPoint{
{ {
Device: "cgroup", Device: "cgroup",
Type: "cgroup", Type: "cgroup",
@ -81,15 +80,14 @@ func TestCgroupMountValidationMemoryMissing(t *testing.T) {
Type: "cgroup", Type: "cgroup",
Opts: []string{"rw", "relatime", "cpuacct"}, Opts: []string{"rw", "relatime", "cpuacct"},
}, },
}, })
}
_, err := validateSystemRequirements(mountInt) _, err := validateSystemRequirements(mountInt)
assert.Error(t, err) assert.Error(t, err)
} }
func TestCgroupMountValidationMultipleSubsystem(t *testing.T) { func TestCgroupMountValidationMultipleSubsystem(t *testing.T) {
mountInt := &mount.FakeMounter{ mountInt := mount.NewFakeMounter(
MountPoints: []mount.MountPoint{ []mount.MountPoint{
{ {
Device: "cgroup", Device: "cgroup",
Type: "cgroup", Type: "cgroup",
@ -105,8 +103,7 @@ func TestCgroupMountValidationMultipleSubsystem(t *testing.T) {
Type: "cgroup", Type: "cgroup",
Opts: []string{"rw", "relatime", "cpuacct"}, Opts: []string{"rw", "relatime", "cpuacct"},
}, },
}, })
}
_, err := validateSystemRequirements(mountInt) _, err := validateSystemRequirements(mountInt)
assert.Nil(t, err) assert.Nil(t, err)
} }
@ -118,8 +115,8 @@ func TestSoftRequirementsValidationSuccess(t *testing.T) {
defer os.RemoveAll(tempDir) defer os.RemoveAll(tempDir)
req.NoError(ioutil.WriteFile(path.Join(tempDir, "cpu.cfs_period_us"), []byte("0"), os.ModePerm)) req.NoError(ioutil.WriteFile(path.Join(tempDir, "cpu.cfs_period_us"), []byte("0"), os.ModePerm))
req.NoError(ioutil.WriteFile(path.Join(tempDir, "cpu.cfs_quota_us"), []byte("0"), os.ModePerm)) req.NoError(ioutil.WriteFile(path.Join(tempDir, "cpu.cfs_quota_us"), []byte("0"), os.ModePerm))
mountInt := &mount.FakeMounter{ mountInt := mount.NewFakeMounter(
MountPoints: []mount.MountPoint{ []mount.MountPoint{
{ {
Device: "cgroup", Device: "cgroup",
Type: "cgroup", Type: "cgroup",
@ -136,8 +133,7 @@ func TestSoftRequirementsValidationSuccess(t *testing.T) {
Type: "cgroup", Type: "cgroup",
Opts: []string{"rw", "relatime", "cpuacct", "memory"}, Opts: []string{"rw", "relatime", "cpuacct", "memory"},
}, },
}, })
}
f, err := validateSystemRequirements(mountInt) f, err := validateSystemRequirements(mountInt)
assert.NoError(t, err) assert.NoError(t, err)
assert.True(t, f.cpuHardcapping, "cpu hardcapping is expected to be enabled") assert.True(t, f.cpuHardcapping, "cpu hardcapping is expected to be enabled")

View File

@ -161,7 +161,7 @@ func newTestKubeletWithImageList(
kubelet.kubeClient = fakeKubeClient kubelet.kubeClient = fakeKubeClient
kubelet.heartbeatClient = fakeKubeClient kubelet.heartbeatClient = fakeKubeClient
kubelet.os = &containertest.FakeOS{} kubelet.os = &containertest.FakeOS{}
kubelet.mounter = &mount.FakeMounter{} kubelet.mounter = mount.NewFakeMounter(nil)
kubelet.hostutil = hostutil.NewFakeHostUtil(nil) kubelet.hostutil = hostutil.NewFakeHostUtil(nil)
kubelet.subpather = &subpath.FakeSubpath{} kubelet.subpather = &subpath.FakeSubpath{}

View File

@ -129,7 +129,7 @@ func TestRunOnce(t *testing.T) {
kb.evictionManager = evictionManager kb.evictionManager = evictionManager
kb.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler) kb.admitHandlers.AddPodAdmitHandler(evictionAdmitHandler)
kb.mounter = &mount.FakeMounter{} kb.mounter = mount.NewFakeMounter(nil)
if err := kb.setupDataDirs(); err != nil { if err := kb.setupDataDirs(); err != nil {
t.Errorf("Failed to init data dirs: %v", err) t.Errorf("Failed to init data dirs: %v", err)
} }

View File

@ -84,7 +84,7 @@ func Test_Run_Positive_DoNothing(t *testing.T) {
asw, asw,
hasAddedPods, hasAddedPods,
oex, oex,
&mount.FakeMounter{}, mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil), hostutil.NewFakeHostUtil(nil),
volumePluginMgr, volumePluginMgr,
kubeletPodsDir) kubeletPodsDir)
@ -128,7 +128,7 @@ func Test_Run_Positive_VolumeAttachAndMount(t *testing.T) {
asw, asw,
hasAddedPods, hasAddedPods,
oex, oex,
&mount.FakeMounter{}, mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil), hostutil.NewFakeHostUtil(nil),
volumePluginMgr, volumePluginMgr,
kubeletPodsDir) kubeletPodsDir)
@ -206,7 +206,7 @@ func Test_Run_Positive_VolumeMountControllerAttachEnabled(t *testing.T) {
asw, asw,
hasAddedPods, hasAddedPods,
oex, oex,
&mount.FakeMounter{}, mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil), hostutil.NewFakeHostUtil(nil),
volumePluginMgr, volumePluginMgr,
kubeletPodsDir) kubeletPodsDir)
@ -285,7 +285,7 @@ func Test_Run_Positive_VolumeAttachMountUnmountDetach(t *testing.T) {
asw, asw,
hasAddedPods, hasAddedPods,
oex, oex,
&mount.FakeMounter{}, mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil), hostutil.NewFakeHostUtil(nil),
volumePluginMgr, volumePluginMgr,
kubeletPodsDir) kubeletPodsDir)
@ -375,7 +375,7 @@ func Test_Run_Positive_VolumeUnmountControllerAttachEnabled(t *testing.T) {
asw, asw,
hasAddedPods, hasAddedPods,
oex, oex,
&mount.FakeMounter{}, mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil), hostutil.NewFakeHostUtil(nil),
volumePluginMgr, volumePluginMgr,
kubeletPodsDir) kubeletPodsDir)
@ -502,7 +502,7 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
asw, asw,
hasAddedPods, hasAddedPods,
oex, oex,
&mount.FakeMounter{}, mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil), hostutil.NewFakeHostUtil(nil),
volumePluginMgr, volumePluginMgr,
kubeletPodsDir) kubeletPodsDir)
@ -608,7 +608,7 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
asw, asw,
hasAddedPods, hasAddedPods,
oex, oex,
&mount.FakeMounter{}, mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil), hostutil.NewFakeHostUtil(nil),
volumePluginMgr, volumePluginMgr,
kubeletPodsDir) kubeletPodsDir)
@ -709,7 +709,7 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
asw, asw,
hasAddedPods, hasAddedPods,
oex, oex,
&mount.FakeMounter{}, mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil), hostutil.NewFakeHostUtil(nil),
volumePluginMgr, volumePluginMgr,
kubeletPodsDir) kubeletPodsDir)
@ -823,7 +823,7 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
asw, asw,
hasAddedPods, hasAddedPods,
oex, oex,
&mount.FakeMounter{}, mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil), hostutil.NewFakeHostUtil(nil),
volumePluginMgr, volumePluginMgr,
kubeletPodsDir) kubeletPodsDir)
@ -1096,7 +1096,7 @@ func Test_Run_Positive_VolumeFSResizeControllerAttachEnabled(t *testing.T) {
asw, asw,
hasAddedPods, hasAddedPods,
oex, oex,
&mount.FakeMounter{}, mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil), hostutil.NewFakeHostUtil(nil),
volumePluginMgr, volumePluginMgr,
kubeletPodsDir) kubeletPodsDir)
@ -1278,7 +1278,7 @@ func Test_Run_Positive_VolumeMountControllerAttachEnabledRace(t *testing.T) {
asw, asw,
hasAddedPods, hasAddedPods,
oex, oex,
&mount.FakeMounter{}, mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil), hostutil.NewFakeHostUtil(nil),
volumePluginMgr, volumePluginMgr,
kubeletPodsDir) kubeletPodsDir)

View File

@ -302,7 +302,7 @@ func newTestVolumeManager(tmpDir string, podManager kubepod.Manager, kubeClient
kubeClient, kubeClient,
plugMgr, plugMgr,
&containertest.FakeRuntime{}, &containertest.FakeRuntime{},
&mount.FakeMounter{}, mount.NewFakeMounter(nil),
hostutil.NewFakeHostUtil(nil), hostutil.NewFakeHostUtil(nil),
"", "",
fakeRecorder, fakeRecorder,

View File

@ -5,7 +5,8 @@ go_library(
srcs = [ srcs = [
"doc.go", "doc.go",
"exec.go", "exec.go",
"fake.go", "fake_exec.go",
"fake_mounter.go",
"mount.go", "mount.go",
"mount_helper_common.go", "mount_helper_common.go",
"mount_helper_unix.go", "mount_helper_unix.go",

View File

@ -19,35 +19,18 @@ package mount
import "k8s.io/utils/exec" import "k8s.io/utils/exec"
// NewOSExec returns a new Exec interface implementation based on exec() // NewOSExec returns a new Exec interface implementation based on exec()
func NewOSExec() Exec { func NewOSExec() *OSExec {
return &osExec{} return &OSExec{}
} }
// Real implementation of Exec interface that uses simple utils.Exec // OSExec is an implementation of Exec interface that uses simple utils.Exec
type osExec struct{} type OSExec struct{}
var _ Exec = &osExec{} var _ Exec = &OSExec{}
func (e *osExec) Run(cmd string, args ...string) ([]byte, error) { // Run exucutes the given cmd and arges and returns stdout and stderr as a
// combined byte stream
func (e *OSExec) Run(cmd string, args ...string) ([]byte, error) {
exe := exec.New() exe := exec.New()
return exe.Command(cmd, args...).CombinedOutput() return exe.Command(cmd, args...).CombinedOutput()
} }
// NewFakeExec returns a new FakeExec
func NewFakeExec(run runHook) *FakeExec {
return &FakeExec{runHook: run}
}
// FakeExec for testing.
type FakeExec struct {
runHook runHook
}
type runHook func(cmd string, args ...string) ([]byte, error)
// Run executes the command using the optional runhook, if given
func (f *FakeExec) Run(cmd string, args ...string) ([]byte, error) {
if f.runHook != nil {
return f.runHook(cmd, args...)
}
return nil, nil
}

View File

@ -16,23 +16,6 @@ limitations under the License.
package mount package mount
import "k8s.io/utils/exec"
// NewOSExec returns a new Exec interface implementation based on exec()
func NewOSExec() Exec {
return &osExec{}
}
// Real implementation of Exec interface that uses simple utils.Exec
type osExec struct{}
var _ Exec = &osExec{}
func (e *osExec) Run(cmd string, args ...string) ([]byte, error) {
exe := exec.New()
return exe.Command(cmd, args...).CombinedOutput()
}
// NewFakeExec returns a new FakeExec // NewFakeExec returns a new FakeExec
func NewFakeExec(run runHook) *FakeExec { func NewFakeExec(run runHook) *FakeExec {
return &FakeExec{runHook: run} return &FakeExec{runHook: run}

View File

@ -27,7 +27,7 @@ import (
// FakeMounter implements mount.Interface for tests. // FakeMounter implements mount.Interface for tests.
type FakeMounter struct { type FakeMounter struct {
MountPoints []MountPoint MountPoints []MountPoint
Log []FakeAction log []FakeAction
// Error to return for a path when calling IsLikelyNotMountPoint // Error to return for a path when calling IsLikelyNotMountPoint
MountCheckErrors map[string]error MountCheckErrors map[string]error
// Some tests run things in parallel, make sure the mounter does not produce // Some tests run things in parallel, make sure the mounter does not produce
@ -55,12 +55,26 @@ type FakeAction struct {
FSType string // applies only to "mount" actions FSType string // applies only to "mount" actions
} }
func NewFakeMounter(mps []MountPoint) *FakeMounter {
return &FakeMounter{
MountPoints: mps,
}
}
// ResetLog clears all the log entries in FakeMounter // ResetLog clears all the log entries in FakeMounter
func (f *FakeMounter) ResetLog() { func (f *FakeMounter) ResetLog() {
f.mutex.Lock() f.mutex.Lock()
defer f.mutex.Unlock() defer f.mutex.Unlock()
f.Log = []FakeAction{} f.log = []FakeAction{}
}
// GetLog returns the slice of FakeActions taken by the mounter
func (f *FakeMounter) GetLog() []FakeAction {
f.mutex.Lock()
defer f.mutex.Unlock()
return f.log
} }
// Mount records the mount event and updates the in-memory mount points for FakeMounter // Mount records the mount event and updates the in-memory mount points for FakeMounter
@ -102,7 +116,7 @@ func (f *FakeMounter) Mount(source string, target string, fstype string, options
} }
f.MountPoints = append(f.MountPoints, MountPoint{Device: source, Path: absTarget, Type: fstype, Opts: opts}) f.MountPoints = append(f.MountPoints, MountPoint{Device: source, Path: absTarget, Type: fstype, Opts: opts})
klog.V(5).Infof("Fake mounter: mounted %s to %s", source, absTarget) klog.V(5).Infof("Fake mounter: mounted %s to %s", source, absTarget)
f.Log = append(f.Log, FakeAction{Action: FakeActionMount, Target: absTarget, Source: source, FSType: fstype}) f.log = append(f.log, FakeAction{Action: FakeActionMount, Target: absTarget, Source: source, FSType: fstype})
return nil return nil
} }
@ -133,7 +147,7 @@ func (f *FakeMounter) Unmount(target string) error {
newMountpoints = append(newMountpoints, MountPoint{Device: mp.Device, Path: mp.Path, Type: mp.Type}) newMountpoints = append(newMountpoints, MountPoint{Device: mp.Device, Path: mp.Path, Type: mp.Type})
} }
f.MountPoints = newMountpoints f.MountPoints = newMountpoints
f.Log = append(f.Log, FakeAction{Action: FakeActionUnmount, Target: absTarget}) f.log = append(f.log, FakeAction{Action: FakeActionUnmount, Target: absTarget})
delete(f.MountCheckErrors, target) delete(f.MountCheckErrors, target)
return nil return nil
} }

View File

@ -93,10 +93,10 @@ func TestDoCleanupMountPoint(t *testing.T) {
t.Fatalf("failed to prepare test: %v", err) t.Fatalf("failed to prepare test: %v", err)
} }
fake := &FakeMounter{ fake := NewFakeMounter(
MountPoints: []MountPoint{mountPoint}, []MountPoint{mountPoint},
MountCheckErrors: map[string]error{mountPoint.Path: mountError}, )
} fake.MountCheckErrors = map[string]error{mountPoint.Path: mountError}
err = doCleanupMountPoint(mountPoint.Path, fake, true, tt.corruptedMnt) err = doCleanupMountPoint(mountPoint.Path, fake, true, tt.corruptedMnt)
if tt.expectErr { if tt.expectErr {

View File

@ -73,15 +73,14 @@ func mountPointsEqual(a, b *MountPoint) bool {
} }
func TestGetMountRefs(t *testing.T) { func TestGetMountRefs(t *testing.T) {
fm := &FakeMounter{ fm := NewFakeMounter(
MountPoints: []MountPoint{ []MountPoint{
{Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd"}, {Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd"},
{Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd-in-pod"}, {Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd-in-pod"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd2"}, {Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd2"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod1"}, {Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod1"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod2"}, {Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod2"},
}, })
}
tests := []struct { tests := []struct {
mountPath string mountPath string
@ -137,14 +136,13 @@ func setEquivalent(set1, set2 []string) bool {
} }
func TestGetDeviceNameFromMount(t *testing.T) { func TestGetDeviceNameFromMount(t *testing.T) {
fm := &FakeMounter{ fm := NewFakeMounter(
MountPoints: []MountPoint{ []MountPoint{
{Device: "/dev/disk/by-path/prefix-lun-1", {Device: "/dev/disk/by-path/prefix-lun-1",
Path: "/mnt/111"}, Path: "/mnt/111"},
{Device: "/dev/disk/by-path/prefix-lun-1", {Device: "/dev/disk/by-path/prefix-lun-1",
Path: "/mnt/222"}, Path: "/mnt/222"},
}, })
}
tests := []struct { tests := []struct {
mountPath string mountPath string
@ -166,15 +164,14 @@ func TestGetDeviceNameFromMount(t *testing.T) {
} }
func TestGetMountRefsByDev(t *testing.T) { func TestGetMountRefsByDev(t *testing.T) {
fm := &FakeMounter{ fm := NewFakeMounter(
MountPoints: []MountPoint{ []MountPoint{
{Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd"}, {Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd"},
{Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd-in-pod"}, {Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd-in-pod"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd2"}, {Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/gce-pd/mounts/gce-pd2"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod1"}, {Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod1"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod2"}, {Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~gce-pd/gce-pd2-in-pod2"},
}, })
}
tests := []struct { tests := []struct {
mountPath string mountPath string

View File

@ -224,7 +224,7 @@ func TestIsLikelyNotMountPoint(t *testing.T) {
} }
func TestFormatAndMount(t *testing.T) { func TestFormatAndMount(t *testing.T) {
fakeMounter := ErrorMounter{&FakeMounter{}, 0, nil} fakeMounter := ErrorMounter{NewFakeMounter(nil), 0, nil}
execCallback := func(cmd string, args ...string) ([]byte, error) { execCallback := func(cmd string, args ...string) ([]byte, error) {
for j := range args { for j := range args {
if strings.Contains(args[j], "Get-Disk -Number") { if strings.Contains(args[j], "Get-Disk -Number") {

View File

@ -208,7 +208,7 @@ func TestSafeFormatAndMount(t *testing.T) {
return []byte(script.output), script.err return []byte(script.output), script.err
} }
fakeMounter := ErrorMounter{&FakeMounter{}, 0, test.mountErrs} fakeMounter := ErrorMounter{NewFakeMounter(nil), 0, test.mountErrs}
fakeExec := NewFakeExec(execCallback) fakeExec := NewFakeExec(execCallback)
mounter := SafeFormatAndMount{ mounter := SafeFormatAndMount{
Interface: &fakeMounter, Interface: &fakeMounter,

View File

@ -125,7 +125,7 @@ func TestPlugin(t *testing.T) {
}, },
} }
fakeManager := &fakePDManager{} fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{} fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter) mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
@ -314,7 +314,7 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
}, },
} }
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, mount.NewFakeMounter(nil))
if err != nil { if err != nil {
t.Errorf("Error creating new mounter:%v", err) t.Errorf("Error creating new mounter:%v", err)
} }
@ -322,7 +322,7 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
t.Errorf("Volume Mounter can be type-assert to Unmounter") t.Errorf("Volume Mounter can be type-assert to Unmounter")
} }
unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{}, mount.NewFakeMounter(nil))
if err != nil { if err != nil {
t.Errorf("Error creating new unmounter:%v", err) t.Errorf("Error creating new unmounter:%v", err)
} }
@ -361,7 +361,7 @@ func TestMountOptions(t *testing.T) {
} }
fakeManager := &fakePDManager{} fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{} fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromPersistentVolume(pv, false), types.UID("poduid"), fakeManager, fakeMounter) mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromPersistentVolume(pv, false), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {

View File

@ -138,7 +138,7 @@ func testPlugin(t *testing.T, tmpDir string, volumeHost volume.VolumeHost) {
}, },
}, },
} }
fake := &mount.FakeMounter{} fake := mount.NewFakeMounter(nil)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}} pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake) mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake)
if err != nil { if err != nil {
@ -164,7 +164,7 @@ func testPlugin(t *testing.T, tmpDir string, volumeHost volume.VolumeHost) {
} }
} }
unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), mount.NewFakeMounter(nil))
if err != nil { if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
@ -260,7 +260,7 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
}, },
}, },
} }
fake := &mount.FakeMounter{} fake := mount.NewFakeMounter(nil)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}} pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake) mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake)
if err != nil { if err != nil {
@ -270,7 +270,7 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
t.Errorf("Volume Mounter can be type-assert to Unmounter") t.Errorf("Volume Mounter can be type-assert to Unmounter")
} }
unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), mount.NewFakeMounter(nil))
if err != nil { if err != nil {
t.Errorf("MounterInternal() failed: %v", err) t.Errorf("MounterInternal() failed: %v", err)
} }

View File

@ -21,7 +21,7 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing" utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
@ -75,7 +75,7 @@ func TestPlugin(t *testing.T) {
}, },
}, },
} }
mounter, err := plug.(*cephfsPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets") mounter, err := plug.(*cephfsPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), mount.NewFakeMounter(nil), "secrets")
if err != nil { if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
@ -97,7 +97,7 @@ func TestPlugin(t *testing.T) {
t.Errorf("SetUp() failed: %v", err) t.Errorf("SetUp() failed: %v", err)
} }
} }
unmounter, err := plug.(*cephfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) unmounter, err := plug.(*cephfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), mount.NewFakeMounter(nil))
if err != nil { if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }

View File

@ -25,7 +25,7 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing" utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
@ -155,7 +155,7 @@ func TestPlugin(t *testing.T) {
}, },
}, },
} }
mounter, err := plug.(*cinderPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{}) mounter, err := plug.(*cinderPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{0}, mount.NewFakeMounter(nil))
if err != nil { if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
@ -179,7 +179,7 @@ func TestPlugin(t *testing.T) {
} }
} }
unmounter, err := plug.(*cinderPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{}) unmounter, err := plug.(*cinderPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{0}, mount.NewFakeMounter(nil))
if err != nil { if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }

View File

@ -21,7 +21,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"

View File

@ -23,7 +23,7 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
@ -117,7 +117,7 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: config.medium}}, VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: config.medium}},
} }
physicalMounter = mount.FakeMounter{} physicalMounter = mount.NewFakeMounter(nil)
mountDetector = fakeMountDetector{} mountDetector = fakeMountDetector{}
pod = &v1.Pod{ pod = &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -148,7 +148,7 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
mounter, err := plug.(*emptyDirPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), mounter, err := plug.(*emptyDirPlugin).newMounterInternal(volume.NewSpecFromVolume(spec),
pod, pod,
&physicalMounter, physicalMounter,
&mountDetector, &mountDetector,
volume.VolumeOptions{}) volume.VolumeOptions{})
if err != nil { if err != nil {
@ -186,12 +186,13 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
t.Errorf("Volume directory was created unexpectedly") t.Errorf("Volume directory was created unexpectedly")
} }
log := physicalMounter.GetLog()
// Check the number of mounts performed during setup // Check the number of mounts performed during setup
if e, a := config.expectedSetupMounts, len(physicalMounter.Log); e != a { if e, a := config.expectedSetupMounts, len(log); e != a {
t.Errorf("Expected %v physicalMounter calls during setup, got %v", e, a) t.Errorf("Expected %v physicalMounter calls during setup, got %v", e, a)
} else if config.expectedSetupMounts == 1 && } else if config.expectedSetupMounts == 1 &&
(physicalMounter.Log[0].Action != mount.FakeActionMount || (physicalMounter.Log[0].FSType != "tmpfs" && physicalMounter.Log[0].FSType != "hugetlbfs")) { (log[0].Action != mount.FakeActionMount || (log[0].FSType != "tmpfs" && log[0].FSType != "hugetlbfs")) {
t.Errorf("Unexpected physicalMounter action during setup: %#v", physicalMounter.Log[0]) t.Errorf("Unexpected physicalMounter action during setup: %#v", log[0])
} }
physicalMounter.ResetLog() physicalMounter.ResetLog()
@ -201,7 +202,7 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
teardownMedium = v1.StorageMediumMemory teardownMedium = v1.StorageMediumMemory
} }
unmounterMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown} unmounterMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown}
unmounter, err := plug.(*emptyDirPlugin).newUnmounterInternal(volumeName, types.UID("poduid"), &physicalMounter, unmounterMountDetector) unmounter, err := plug.(*emptyDirPlugin).newUnmounterInternal(volumeName, types.UID("poduid"), physicalMounter, unmounterMountDetector)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
@ -219,11 +220,12 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
t.Errorf("TearDown() failed: %v", err) t.Errorf("TearDown() failed: %v", err)
} }
log = physicalMounter.GetLog()
// Check the number of physicalMounter calls during tardown // Check the number of physicalMounter calls during tardown
if e, a := config.expectedTeardownMounts, len(physicalMounter.Log); e != a { if e, a := config.expectedTeardownMounts, len(log); e != a {
t.Errorf("Expected %v physicalMounter calls during teardown, got %v", e, a) t.Errorf("Expected %v physicalMounter calls during teardown, got %v", e, a)
} else if config.expectedTeardownMounts == 1 && physicalMounter.Log[0].Action != mount.FakeActionUnmount { } else if config.expectedTeardownMounts == 1 && log[0].Action != mount.FakeActionUnmount {
t.Errorf("Unexpected physicalMounter action during teardown: %#v", physicalMounter.Log[0]) t.Errorf("Unexpected physicalMounter action during teardown: %#v", log[0])
} }
physicalMounter.ResetLog() physicalMounter.ResetLog()
} }

View File

@ -24,7 +24,7 @@ import (
"strings" "strings"
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
@ -163,7 +163,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
} }
fakeManager := newFakeDiskManager() fakeManager := newFakeDiskManager()
defer fakeManager.Cleanup() defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{} fakeMounter := mount.NewFakeMounter(nil)
fakeExec := mount.NewFakeExec(nil) fakeExec := mount.NewFakeExec(nil)
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec) mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec)
if err != nil { if err != nil {
@ -226,7 +226,7 @@ func doTestPluginNilMounter(t *testing.T, spec *volume.Spec) {
} }
fakeManager := newFakeDiskManager() fakeManager := newFakeDiskManager()
defer fakeManager.Cleanup() defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{} fakeMounter := mount.NewFakeMounter(nil)
fakeExec := mount.NewFakeExec(nil) fakeExec := mount.NewFakeExec(nil)
mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec) mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec)
if err == nil { if err == nil {
@ -437,14 +437,13 @@ func Test_ConstructVolumeSpec(t *testing.T) {
if runtime.GOOS == "darwin" { if runtime.GOOS == "darwin" {
t.Skipf("Test_ConstructVolumeSpec is not supported on GOOS=%s", runtime.GOOS) t.Skipf("Test_ConstructVolumeSpec is not supported on GOOS=%s", runtime.GOOS)
} }
fm := &mount.FakeMounter{ fm := mount.NewFakeMounter(
MountPoints: []mount.MountPoint{ []mount.MountPoint{
{Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"}, {Device: "/dev/sdb", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"},
{Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/50060e801049cfd1-lun-0"}, {Device: "/dev/sdb", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/50060e801049cfd1-lun-0"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2"}, {Device: "/dev/sdc", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2"},
{Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000"}, {Device: "/dev/sdc", Path: "/var/lib/kubelet/plugins/kubernetes.io/fc/volumeDevices/3600508b400105e210000900000490000"},
}, })
}
mountPaths := []string{ mountPaths := []string{
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1", "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1",
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2", "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod2",
@ -488,11 +487,10 @@ func Test_ConstructVolumeSpec(t *testing.T) {
} }
func Test_ConstructVolumeSpecNoRefs(t *testing.T) { func Test_ConstructVolumeSpecNoRefs(t *testing.T) {
fm := &mount.FakeMounter{ fm := mount.NewFakeMounter(
MountPoints: []mount.MountPoint{ []mount.MountPoint{
{Device: "/dev/sdd", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"}, {Device: "/dev/sdd", Path: "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1"},
}, })
}
mountPaths := []string{ mountPaths := []string{
"/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1", "/var/lib/kubelet/pods/some-pod/volumes/kubernetes.io~fc/fc-in-pod1",
} }

View File

@ -19,7 +19,7 @@ package flexvolume
import ( import (
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
@ -42,7 +42,7 @@ func TestSetUpAt(tt *testing.T) {
ServiceAccountName: "my-sa", ServiceAccountName: "my-sa",
}, },
} }
mounter := &mount.FakeMounter{} mounter := mount.NewFakeMounter(nil)
plugin, rootDir := testPlugin(t) plugin, rootDir := testPlugin(t)
plugin.unsupportedCommands = []string{"unsupportedCmd"} plugin.unsupportedCommands = []string{"unsupportedCmd"}

View File

@ -28,7 +28,7 @@ func TestTearDownAt(tt *testing.T) {
t := harness.For(tt) t := harness.For(tt)
defer t.Close() defer t.Close()
mounter := &mount.FakeMounter{} mounter := mount.NewFakeMounter(nil)
plugin, rootDir := testPlugin(t) plugin, rootDir := testPlugin(t)
plugin.runner = fakeRunner( plugin.runner = fakeRunner(

View File

@ -21,7 +21,7 @@ import (
"os" "os"
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing" utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
@ -153,7 +153,7 @@ func TestPlugin(t *testing.T) {
}, },
} }
fakeManager := &fakeFlockerUtil{} fakeManager := &fakeFlockerUtil{}
fakeMounter := &mount.FakeMounter{} fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*flockerPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter) mounter, err := plug.(*flockerPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)

View File

@ -128,7 +128,7 @@ func TestPlugin(t *testing.T) {
}, },
} }
fakeManager := &fakePDManager{} fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{} fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*gcePersistentDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter) mounter, err := plug.(*gcePersistentDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
@ -271,7 +271,7 @@ func TestMountOptions(t *testing.T) {
} }
fakeManager := &fakePDManager{} fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{} fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*gcePersistentDiskPlugin).newMounterInternal(volume.NewSpecFromPersistentVolume(pv, false), types.UID("poduid"), fakeManager, fakeMounter) mounter, err := plug.(*gcePersistentDiskPlugin).newMounterInternal(volume.NewSpecFromPersistentVolume(pv, false), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {

View File

@ -23,7 +23,7 @@ import (
"testing" "testing"
gapi "github.com/heketi/heketi/pkg/glusterfs/api" gapi "github.com/heketi/heketi/pkg/glusterfs/api"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
@ -106,7 +106,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
ep := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Subsets: []v1.EndpointSubset{{ ep := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}}}} Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}}}}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}} pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*glusterfsPlugin).newMounterInternal(spec, ep, pod, &mount.FakeMounter{}) mounter, err := plug.(*glusterfsPlugin).newMounterInternal(spec, ep, pod, mount.NewFakeMounter(nil))
volumePath := mounter.GetPath() volumePath := mounter.GetPath()
if err != nil { if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
@ -128,7 +128,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
t.Errorf("SetUp() failed: %v", err) t.Errorf("SetUp() failed: %v", err)
} }
} }
unmounter, err := plug.(*glusterfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) unmounter, err := plug.(*glusterfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), mount.NewFakeMounter(nil))
if err != nil { if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }

View File

@ -22,7 +22,7 @@ import (
"strings" "strings"
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
@ -159,7 +159,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
} }
fakeManager := NewFakeDiskManager() fakeManager := NewFakeDiskManager()
defer fakeManager.Cleanup() defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{} fakeMounter := mount.NewFakeMounter(nil)
fakeExec := mount.NewFakeExec(nil) fakeExec := mount.NewFakeExec(nil)
mounter, err := plug.(*iscsiPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec, nil) mounter, err := plug.(*iscsiPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter, fakeExec, nil)
if err != nil { if err != nil {

View File

@ -570,7 +570,7 @@ func TestMountOptions(t *testing.T) {
} }
// Wrap with FakeMounter. // Wrap with FakeMounter.
fakeMounter := &mount.FakeMounter{} fakeMounter := mount.NewFakeMounter(nil)
mounter.(*localVolumeMounter).mounter = fakeMounter mounter.(*localVolumeMounter).mounter = fakeMounter
if err := mounter.SetUp(volume.MounterArgs{}); err != nil { if err := mounter.SetUp(volume.MounterArgs{}); err != nil {

View File

@ -21,7 +21,7 @@ import (
"os" "os"
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
@ -108,7 +108,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
if err != nil { if err != nil {
t.Errorf("Can't find the plugin by name") t.Errorf("Can't find the plugin by name")
} }
fake := &mount.FakeMounter{} fake := mount.NewFakeMounter(nil)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}} pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*nfsPlugin).newMounterInternal(spec, pod, fake) mounter, err := plug.(*nfsPlugin).newMounterInternal(spec, pod, fake)
if err != nil { if err != nil {
@ -135,11 +135,12 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
if mounter.(*nfsMounter).readOnly { if mounter.(*nfsMounter).readOnly {
t.Errorf("The volume source should not be read-only and it is.") t.Errorf("The volume source should not be read-only and it is.")
} }
if len(fake.Log) != 1 { log := fake.GetLog()
t.Errorf("Mount was not called exactly one time. It was called %d times.", len(fake.Log)) if len(log) != 1 {
t.Errorf("Mount was not called exactly one time. It was called %d times.", len(log))
} else { } else {
if fake.Log[0].Action != mount.FakeActionMount { if log[0].Action != mount.FakeActionMount {
t.Errorf("Unexpected mounter action: %#v", fake.Log[0]) t.Errorf("Unexpected mounter action: %#v", log[0])
} }
} }
fake.ResetLog() fake.ResetLog()
@ -159,11 +160,12 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
} else if !os.IsNotExist(err) { } else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err) t.Errorf("TearDown() failed: %v", err)
} }
if len(fake.Log) != 1 { log = fake.GetLog()
t.Errorf("Unmount was not called exactly one time. It was called %d times.", len(fake.Log)) if len(log) != 1 {
t.Errorf("Unmount was not called exactly one time. It was called %d times.", len(log))
} else { } else {
if fake.Log[0].Action != mount.FakeActionUnmount { if log[0].Action != mount.FakeActionUnmount {
t.Errorf("Unexpected unmounter action: %#v", fake.Log[0]) t.Errorf("Unexpected unmounter action: %#v", log[0])
} }
} }

View File

@ -22,7 +22,7 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing" utiltesting "k8s.io/client-go/util/testing"
@ -148,7 +148,7 @@ func TestPlugin(t *testing.T) {
} }
fakeManager := &fakePortworxManager{} fakeManager := &fakePortworxManager{}
// Test Mounter // Test Mounter
fakeMounter := &mount.FakeMounter{} fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*portworxVolumePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter) mounter, err := plug.(*portworxVolumePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)

View File

@ -21,7 +21,7 @@ import (
"os" "os"
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
@ -89,7 +89,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
} }
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}} pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*quobytePlugin).newMounterInternal(spec, pod, &mount.FakeMounter{}) mounter, err := plug.(*quobytePlugin).newMounterInternal(spec, pod, mount.NewFakeMounter(nil))
volumePath := mounter.GetPath() volumePath := mounter.GetPath()
if err != nil { if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
@ -104,7 +104,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
if err := mounter.SetUp(volume.MounterArgs{}); err != nil { if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
unmounter, err := plug.(*quobytePlugin).newUnmounterInternal("vol", types.UID("poduid"), &mount.FakeMounter{}) unmounter, err := plug.(*quobytePlugin).newUnmounterInternal("vol", types.UID("poduid"), mount.NewFakeMounter(nil))
if err != nil { if err != nil {
t.Errorf("Failed to make a new unmounter: %v", err) t.Errorf("Failed to make a new unmounter: %v", err)
} }

View File

@ -27,7 +27,7 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
@ -234,11 +234,12 @@ func (fake *fakeDiskManager) ExpandImage(rbdExpander *rbdVolumeExpander, oldSize
// checkMounterLog checks fakeMounter must have expected logs, and the last action msut equal to expectedAction. // checkMounterLog checks fakeMounter must have expected logs, and the last action msut equal to expectedAction.
func checkMounterLog(t *testing.T, fakeMounter *mount.FakeMounter, expected int, expectedAction mount.FakeAction) { func checkMounterLog(t *testing.T, fakeMounter *mount.FakeMounter, expected int, expectedAction mount.FakeAction) {
if len(fakeMounter.Log) != expected { log := fakeMounter.GetLog()
t.Fatalf("fakeMounter should have %d logs, actual: %d", expected, len(fakeMounter.Log)) if len(log) != expected {
t.Fatalf("fakeMounter should have %d logs, actual: %d", expected, len(log))
} }
lastIndex := len(fakeMounter.Log) - 1 lastIndex := len(log) - 1
lastAction := fakeMounter.Log[lastIndex] lastAction := log[lastIndex]
if !reflect.DeepEqual(expectedAction, lastAction) { if !reflect.DeepEqual(expectedAction, lastAction) {
t.Fatalf("fakeMounter.Log[%d] should be %#v, not: %#v", lastIndex, expectedAction, lastAction) t.Fatalf("fakeMounter.Log[%d] should be %#v, not: %#v", lastIndex, expectedAction, lastAction)
} }

View File

@ -22,7 +22,7 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
@ -194,7 +194,7 @@ func TestPlugin(t *testing.T) {
t.Errorf("Couldn't get secret from %v/%v", pod.Namespace, secretName) t.Errorf("Couldn't get secret from %v/%v", pod.Namespace, secretName)
} }
mounter, err := plug.(*storageosPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, apiCfg, fakeManager, &mount.FakeMounter{}, mount.NewFakeExec(nil)) mounter, err := plug.(*storageosPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, apiCfg, fakeManager, mount.NewFakeMounter(nil), mount.NewFakeExec(nil))
if err != nil { if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err) t.Fatalf("Failed to make a new Mounter: %v", err)
} }
@ -231,7 +231,7 @@ func TestPlugin(t *testing.T) {
// Test Unmounter // Test Unmounter
fakeManager = &fakePDManager{} fakeManager = &fakePDManager{}
unmounter, err := plug.(*storageosPlugin).newUnmounterInternal("vol1-pvname", types.UID("poduid"), fakeManager, &mount.FakeMounter{}, mount.NewFakeExec(nil)) unmounter, err := plug.(*storageosPlugin).newUnmounterInternal("vol1-pvname", types.UID("poduid"), fakeManager, mount.NewFakeMounter(nil), mount.NewFakeExec(nil))
if err != nil { if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
@ -372,7 +372,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
fakeManager := &fakePDManager{} fakeManager := &fakePDManager{}
fakeConfig := &fakeConfig{} fakeConfig := &fakeConfig{}
apiCfg := fakeConfig.GetAPIConfig() apiCfg := fakeConfig.GetAPIConfig()
mounter, err := plug.(*storageosPlugin).newMounterInternal(spec, pod, apiCfg, fakeManager, &mount.FakeMounter{}, mount.NewFakeExec(nil)) mounter, err := plug.(*storageosPlugin).newMounterInternal(spec, pod, apiCfg, fakeManager, mount.NewFakeMounter(nil), mount.NewFakeExec(nil))
if err != nil { if err != nil {
t.Fatalf("error creating a new internal mounter:%v", err) t.Fatalf("error creating a new internal mounter:%v", err)
} }

View File

@ -21,7 +21,7 @@ import (
"os" "os"
storageostypes "github.com/storageos/go-api/types" storageostypes "github.com/storageos/go-api/types"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
utiltesting "k8s.io/client-go/util/testing" utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
@ -221,7 +221,7 @@ func TestAttachVolume(t *testing.T) {
volName: testVolName, volName: testVolName,
volNamespace: testNamespace, volNamespace: testNamespace,
manager: util, manager: util,
mounter: &mount.FakeMounter{}, mounter: mount.NewFakeMounter(nil),
plugin: plug.(*storageosPlugin), plugin: plug.(*storageosPlugin),
}, },
deviceDir: tmpDir, deviceDir: tmpDir,

View File

@ -109,7 +109,7 @@ func NewFakeVolumeHostWithCSINodeName(rootDir string, kubeClient clientset.Inter
func newFakeVolumeHost(rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, cloud cloudprovider.Interface, pathToTypeMap map[string]hostutil.FileType) *fakeVolumeHost { func newFakeVolumeHost(rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, cloud cloudprovider.Interface, pathToTypeMap map[string]hostutil.FileType) *fakeVolumeHost {
host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, cloud: cloud} host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, cloud: cloud}
host.mounter = &mount.FakeMounter{} host.mounter = mount.NewFakeMounter(nil)
host.hostUtil = hostutil.NewFakeHostUtil(pathToTypeMap) host.hostUtil = hostutil.NewFakeHostUtil(pathToTypeMap)
host.exec = mount.NewFakeExec(nil) host.exec = mount.NewFakeExec(nil)
host.pluginMgr.InitPlugins(plugins, nil /* prober */, host) host.pluginMgr.InitPlugins(plugins, nil /* prober */, host)

View File

@ -21,6 +21,10 @@ package fsquota
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os"
"strings"
"testing"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
@ -28,9 +32,6 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume/util/fsquota/common" "k8s.io/kubernetes/pkg/volume/util/fsquota/common"
"os"
"strings"
"testing"
) )
const dummyMountData = `sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0 const dummyMountData = `sysfs /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0
@ -44,8 +45,8 @@ tmpfs /tmp tmpfs rw,nosuid,nodev 0 0
` `
func dummyFakeMount1() mount.Interface { func dummyFakeMount1() mount.Interface {
return &mount.FakeMounter{ return mount.NewFakeMounter(
MountPoints: []mount.MountPoint{ []mount.MountPoint{
{ {
Device: "tmpfs", Device: "tmpfs",
Path: "/tmp", Path: "/tmp",
@ -76,8 +77,7 @@ func dummyFakeMount1() mount.Interface {
Type: "xfs", Type: "xfs",
Opts: []string{"rw", "relatime", "attr2", "inode64", "usrquota", "prjquota"}, Opts: []string{"rw", "relatime", "attr2", "inode64", "usrquota", "prjquota"},
}, },
}, })
}
} }
type backingDevTest struct { type backingDevTest struct {
@ -234,9 +234,7 @@ var dummyMountPoints = []mount.MountPoint{
} }
func dummyQuotaTest() mount.Interface { func dummyQuotaTest() mount.Interface {
return &mount.FakeMounter{ return mount.NewFakeMounter(dummyMountPoints)
MountPoints: dummyMountPoints,
}
} }
func dummySetFSInfo(path string) { func dummySetFSInfo(path string) {

View File

@ -612,7 +612,8 @@ func TestCleanSubPaths(t *testing.T) {
t.Fatalf("failed to prepare test %q: %v", test.name, err.Error()) t.Fatalf("failed to prepare test %q: %v", test.name, err.Error())
} }
fm := &mount.FakeMounter{MountPoints: mounts, UnmountFunc: test.unmount} fm := mount.NewFakeMounter(mounts)
fm.UnmountFunc = test.unmount
err = doCleanSubPaths(fm, base, testVol) err = doCleanSubPaths(fm, base, testVol)
if err != nil && !test.expectError { if err != nil && !test.expectError {
@ -641,7 +642,7 @@ func setupFakeMounter(testMounts []string) *mount.FakeMounter {
for _, mountPoint := range testMounts { for _, mountPoint := range testMounts {
mounts = append(mounts, mount.MountPoint{Device: "/foo", Path: mountPoint}) mounts = append(mounts, mount.MountPoint{Device: "/foo", Path: mountPoint})
} }
return &mount.FakeMounter{MountPoints: mounts} return mount.NewFakeMounter(mounts)
} }
func getTestPaths(base string) (string, string) { func getTestPaths(base string) (string, string) {

View File

@ -24,7 +24,7 @@ import (
"path/filepath" "path/filepath"
"testing" "testing"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing" utiltesting "k8s.io/client-go/util/testing"
cloudprovider "k8s.io/cloud-provider" cloudprovider "k8s.io/cloud-provider"
@ -110,7 +110,7 @@ func TestPlugin(t *testing.T) {
// Test Mounter // Test Mounter
fakeManager := &fakePDManager{} fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{} fakeMounter := mount.NewFakeMounter(nil)
mounter, err := plug.(*vsphereVolumePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter) mounter, err := plug.(*vsphereVolumePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)