Merge pull request #97354 from chrishenzie/separate-volume-hosts

Split the test VolumeHosts type into one per interface
This commit is contained in:
Kubernetes Prow Robot 2020-12-17 19:26:24 -08:00 committed by GitHub
commit e7d23042e1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 556 additions and 338 deletions

View File

@ -36,7 +36,7 @@ var emptyVolumeName = v1.UniqueVolumeName("")
// Verifies newly added volume doesn't exist in GetGloballyMountedVolumes()
func Test_MarkVolumeAsAttached_Positive_NewVolume(t *testing.T) {
// Arrange
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, plugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -83,7 +83,7 @@ func Test_MarkVolumeAsAttached_Positive_NewVolume(t *testing.T) {
// Verifies newly added volume doesn't exist in GetGloballyMountedVolumes()
func Test_MarkVolumeAsAttached_SuppliedVolumeName_Positive_NewVolume(t *testing.T) {
// Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, _ := volumetesting.GetTestKubeletVolumePluginMgr(t)
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -126,7 +126,7 @@ func Test_MarkVolumeAsAttached_SuppliedVolumeName_Positive_NewVolume(t *testing.
// Verifies newly added volume doesn't exist in GetGloballyMountedVolumes()
func Test_MarkVolumeAsAttached_Positive_ExistingVolume(t *testing.T) {
// Arrange
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, plugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
devicePath := "fake/device/path"
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
pod := &v1.Pod{
@ -176,7 +176,7 @@ func Test_MarkVolumeAsAttached_Positive_ExistingVolume(t *testing.T) {
// Verifies volume/pod combo exist using PodExistsInVolume()
func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) {
// Arrange
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, plugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
devicePath := "fake/device/path"
@ -249,7 +249,7 @@ func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) {
// did not fail.
func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) {
// Arrange
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, plugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
devicePath := "fake/device/path"
@ -329,7 +329,7 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) {
// did not fail.
func Test_AddTwoPodsToVolume_Positive(t *testing.T) {
// Arrange
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, plugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
devicePath := "fake/device/path"
@ -457,7 +457,7 @@ func Test_AddTwoPodsToVolume_Positive(t *testing.T) {
// Verifies call fails with "volume does not exist" error.
func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) {
// Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, _ := volumetesting.GetTestKubeletVolumePluginMgr(t)
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
pod := &v1.Pod{
@ -546,7 +546,7 @@ func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) {
// Verifies newly added volume exists in GetGloballyMountedVolumes()
func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) {
// Arrange
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, plugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -594,7 +594,7 @@ func Test_MarkDeviceAsMounted_Positive_NewVolume(t *testing.T) {
func TestUncertainVolumeMounts(t *testing.T) {
// Arrange
volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, plugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr)
devicePath := "fake/device/path"

View File

@ -33,7 +33,7 @@ import (
// PodExistsInVolume() VolumeExists() and GetVolumesToMount()
func Test_AddPodToVolume_Positive_NewPodNewVolume(t *testing.T) {
// Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, _ := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -78,7 +78,7 @@ func Test_AddPodToVolume_Positive_NewPodNewVolume(t *testing.T) {
// PodExistsInVolume() VolumeExists() and GetVolumesToMount() and no errors.
func Test_AddPodToVolume_Positive_ExistingPodExistingVolume(t *testing.T) {
// Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, _ := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -285,7 +285,7 @@ func Test_AddPodToVolume_Positive_NamesForDifferentPodsAndDifferentVolumes(t *te
// Verifies newly added pod/volume are deleted
func Test_DeletePodFromVolume_Positive_PodExistsVolumeExists(t *testing.T) {
// Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, _ := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -337,7 +337,7 @@ func Test_DeletePodFromVolume_Positive_PodExistsVolumeExists(t *testing.T) {
// Verifies only that volume is marked reported in use
func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) {
// Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, _ := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr)
pod1 := &v1.Pod{
@ -462,7 +462,7 @@ func Test_MarkVolumesReportedInUse_Positive_NewPodNewVolume(t *testing.T) {
}
func Test_AddPodToVolume_WithEmptyDirSizeLimit(t *testing.T) {
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, _ := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr)
quantity1Gi := resource.MustParse("1Gi")
quantity2Gi := resource.MustParse("2Gi")

View File

@ -31,7 +31,7 @@ import (
)
func TestMetricCollection(t *testing.T) {
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(k8stypes.NodeName("node-name"), volumePluginMgr)
pod := &v1.Pod{

View File

@ -395,7 +395,7 @@ func TestFindAndRemoveNonattachableVolumes(t *testing.T) {
},
}
fakeVolumePluginMgr, fakeVolumePlugin := volumetesting.GetTestVolumePluginMgr(t)
fakeVolumePluginMgr, fakeVolumePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
dswp, fakePodManager, fakesDSW := createDswpWithVolumeWithCustomPluginMgr(t, pv, pvc, fakeVolumePluginMgr)
// create pod
@ -1077,7 +1077,7 @@ func createPodWithVolume(pod, pv, pvc string, containers []v1.Container) *v1.Pod
}
func createDswpWithVolume(t *testing.T, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) (*desiredStateOfWorldPopulator, kubepod.Manager, cache.DesiredStateOfWorld) {
fakeVolumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
fakeVolumePluginMgr, _ := volumetesting.GetTestKubeletVolumePluginMgr(t)
dswp, fakePodManager, fakesDSW := createDswpWithVolumeWithCustomPluginMgr(t, pv, pvc, fakeVolumePluginMgr)
return dswp, fakePodManager, fakesDSW
}

View File

@ -64,7 +64,7 @@ func hasAddedPods() bool { return true }
// Verifies there are no calls to attach, detach, mount, unmount, etc.
func Test_Run_Positive_DoNothing(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createTestClient()
@ -109,7 +109,7 @@ func Test_Run_Positive_DoNothing(t *testing.T) {
// Verifies there is are attach/mount/etc calls and no detach/unmount calls.
func Test_Run_Positive_VolumeAttachAndMount(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createTestClient()
@ -187,7 +187,7 @@ func Test_Run_Positive_VolumeAttachAndMount(t *testing.T) {
// Verifies there are no attach/detach calls.
func Test_Run_Positive_VolumeMountControllerAttachEnabled(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createTestClient()
@ -266,7 +266,7 @@ func Test_Run_Positive_VolumeMountControllerAttachEnabled(t *testing.T) {
// Verifies detach/unmount calls are issued.
func Test_Run_Positive_VolumeAttachMountUnmountDetach(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createTestClient()
@ -356,7 +356,7 @@ func Test_Run_Positive_VolumeAttachMountUnmountDetach(t *testing.T) {
// Verifies there are no attach/detach calls made.
func Test_Run_Positive_VolumeUnmountControllerAttachEnabled(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createTestClient()
@ -480,7 +480,7 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
}
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createtestClientWithPVPVC(gcepv, gcepvc)
@ -580,7 +580,7 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
}
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createtestClientWithPVPVC(gcepv, gcepvc, v1.AttachedVolume{
@ -681,7 +681,7 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
}
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createtestClientWithPVPVC(gcepv, gcepvc)
@ -789,7 +789,7 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
}
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createtestClientWithPVPVC(gcepv, gcepvc, v1.AttachedVolume{
@ -1094,7 +1094,7 @@ func Test_Run_Positive_VolumeFSResizeControllerAttachEnabled(t *testing.T) {
},
}
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createtestClientWithPVPVC(pv, pvc, v1.AttachedVolume{
@ -1266,7 +1266,7 @@ func Test_UncertainDeviceGlobalMounts(t *testing.T) {
},
}
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
fakePlugin.SupportsRemount = tc.supportRemount
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
@ -1446,7 +1446,7 @@ func Test_UncertainVolumeMountState(t *testing.T) {
},
}
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
fakePlugin.SupportsRemount = tc.supportRemount
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
@ -1721,7 +1721,7 @@ func createtestClientWithPVPVC(pv *v1.PersistentVolume, pvc *v1.PersistentVolume
func Test_Run_Positive_VolumeMountControllerAttachEnabledRace(t *testing.T) {
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)

View File

@ -274,7 +274,7 @@ func newTestVolumeManager(t *testing.T, tmpDir string, podManager kubepod.Manage
fakeRecorder := &record.FakeRecorder{}
plugMgr := &volume.VolumePluginMgr{}
// TODO (#51147) inject mock prober
plugMgr.InitPlugins([]volume.VolumePlugin{plug}, nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, kubeClient, nil))
plugMgr.InitPlugins([]volume.VolumePlugin{plug}, nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, kubeClient, nil))
statusManager := status.NewManager(kubeClient, podManager, &statustest.FakePodDeletionSafetyProvider{})
fakePathHandler := volumetest.NewBlockVolumePathHandler()
vm := NewVolumeManager(

View File

@ -46,7 +46,7 @@ func TestCanSupport(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
if err != nil {
@ -70,7 +70,7 @@ func TestGetAccessModes(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/aws-ebs")
if err != nil {
@ -110,7 +110,7 @@ func TestPlugin(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
if err != nil {
@ -279,7 +279,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, clientset, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, clientset, nil))
plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
@ -299,7 +299,7 @@ func TestMounterAndUnmounterTypeAssert(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
if err != nil {
@ -339,7 +339,7 @@ func TestMountOptions(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
if err != nil {
@ -448,3 +448,23 @@ func TestFormatVolumeID(t *testing.T) {
assert.Equal(t, test.expectedVolumeID, volumeID, test.volumeIDFromPath)
}
}
func TestUnsupportedVolumeHost(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/aws-ebs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
_, err = plug.ConstructVolumeSpec("", "")
if err == nil {
t.Errorf("Expected failure constructing volume spec with unsupported VolumeHost")
}
}

View File

@ -37,7 +37,7 @@ func TestCanSupport(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName)
if err != nil {
@ -86,3 +86,23 @@ func TestGetMaxDataDiskCount(t *testing.T) {
assert.Equal(t, test.expectResult, result)
}
}
func TestUnsupportedVolumeHost(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("azure_dd")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName)
if err != nil {
t.Errorf("Can't find the plugin by name")
}
_, err = plug.ConstructVolumeSpec("", "")
if err == nil {
t.Errorf("Expected failure constructing volume spec with unsupported VolumeHost")
}
}

View File

@ -43,7 +43,7 @@ func TestCanSupport(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
if err != nil {
@ -141,7 +141,7 @@ func TestPlugin(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
if err != nil {
@ -280,7 +280,7 @@ func TestGetVolumeLimit(t *testing.T) {
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
volumeHost := volumetest.NewFakeVolumeHostWithCloudProvider(t, tmpDir, nil, nil, cloud)
volumeHost := volumetest.NewFakeKubeletVolumeHostWithCloudProvider(t, tmpDir, nil, nil, cloud)
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
@ -342,3 +342,23 @@ func getOpenstackConfig() openstack.Config {
}
return cfg
}
func TestUnsupportedVolumeHost(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("cinderTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/cinder")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
_, err = plug.ConstructVolumeSpec("", "")
if err == nil {
t.Errorf("Expected failure constructing volume spec with unsupported VolumeHost")
}
}

View File

@ -1609,7 +1609,7 @@ func newTestWatchPlugin(t *testing.T, fakeClient *fakeclient.Clientset, setupInf
}
}
host := volumetest.NewFakeVolumeHostWithCSINodeName(t,
host := volumetest.NewFakeKubeletVolumeHostWithCSINodeName(t,
tmpDir,
fakeClient,
ProbeVolumePlugins(),

View File

@ -66,7 +66,7 @@ func newTestPlugin(t *testing.T, client *fakeclient.Clientset) (*csiPlugin, stri
volumeAttachmentLister := volumeAttachmentInformer.Lister()
go factory.Start(wait.NeverStop)
host := volumetest.NewFakeVolumeHostWithCSINodeName(t,
host := volumetest.NewFakeKubeletVolumeHostWithCSINodeName(t,
tmpDir,
client,
ProbeVolumePlugins(),
@ -1018,7 +1018,7 @@ func TestPluginFindAttachablePlugin(t *testing.T) {
},
)
factory := informers.NewSharedInformerFactory(client, CsiResyncPeriod)
host := volumetest.NewFakeVolumeHostWithCSINodeName(t,
host := volumetest.NewFakeKubeletVolumeHostWithCSINodeName(t,
tmpDir,
client,
ProbeVolumePlugins(),

View File

@ -285,7 +285,7 @@ func TestCSI_VolumeAll(t *testing.T) {
factory.Start(wait.NeverStop)
factory.WaitForCacheSync(wait.NeverStop)
host := volumetest.NewFakeVolumeHostWithCSINodeName(t,
host := volumetest.NewFakeKubeletVolumeHostWithCSINodeName(t,
tmpDir,
client,
ProbeVolumePlugins(),

View File

@ -45,7 +45,7 @@ func TestCanSupport(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
if err != nil {
@ -69,7 +69,7 @@ func TestGetAccessModes(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/gce-pd")
if err != nil {
@ -113,7 +113,7 @@ func TestPlugin(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
if err != nil {
@ -250,7 +250,7 @@ func TestMountOptions(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
if err != nil {
@ -329,7 +329,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, client, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(gcePersistentDiskPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
@ -344,3 +344,23 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func TestUnsupportedVolumeHost(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
_, err = plug.ConstructVolumeSpec("", "")
if err == nil {
t.Errorf("Expected failure constructing volume spec with unsupported VolumeHost")
}
}

View File

@ -50,7 +50,7 @@ func newHostPathTypeList(pathType ...string) []*v1.HostPathType {
func TestCanSupport(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost(t, "fake", nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, "fake", nil, nil))
plug, err := plugMgr.FindPluginByName(hostPathPluginName)
if err != nil {
@ -72,7 +72,7 @@ func TestCanSupport(t *testing.T) {
func TestGetAccessModes(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost(t, "/tmp/fake", nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, "/tmp/fake", nil, nil))
plug, err := plugMgr.FindPersistentPluginByName(hostPathPluginName)
if err != nil {
@ -85,7 +85,7 @@ func TestGetAccessModes(t *testing.T) {
func TestRecycler(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
pluginHost := volumetest.NewFakeVolumeHost(t, "/tmp/fake", nil, nil)
pluginHost := volumetest.NewFakeKubeletVolumeHost(t, "/tmp/fake", nil, nil)
plugMgr.InitPlugins([]volume.VolumePlugin{&hostPathPlugin{nil, volume.VolumeConfig{}}}, nil, pluginHost)
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: "/foo"}}}}}
@ -104,7 +104,7 @@ func TestDeleter(t *testing.T) {
}
defer os.RemoveAll(tempPath)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost(t, "/tmp/fake", nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, "/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: tempPath}}}}}
plug, err := plugMgr.FindDeletablePluginBySpec(spec)
@ -138,7 +138,7 @@ func TestDeleterTempDir(t *testing.T) {
for name, test := range tests {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost(t, "/tmp/fake", nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, "/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: test.path}}}}}
plug, _ := plugMgr.FindDeletablePluginBySpec(spec)
deleter, _ := plug.NewDeleter(spec)
@ -156,7 +156,7 @@ func TestProvisioner(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{ProvisioningEnabled: true}),
nil,
volumetest.NewFakeVolumeHost(t, "/tmp/fake", nil, nil))
volumetest.NewFakeKubeletVolumeHost(t, "/tmp/fake", nil, nil))
spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{HostPath: &v1.HostPathVolumeSource{Path: fmt.Sprintf("/tmp/hostpath.%s", uuid.NewUUID())}}}}}
plug, err := plugMgr.FindCreatablePluginBySpec(spec)
@ -203,7 +203,7 @@ func TestProvisioner(t *testing.T) {
func TestInvalidHostPath(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost(t, "fake", nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, "fake", nil, nil))
plug, err := plugMgr.FindPluginByName(hostPathPluginName)
if err != nil {
@ -228,7 +228,7 @@ func TestInvalidHostPath(t *testing.T) {
func TestPlugin(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost(t, "fake", nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, "fake", nil, nil))
plug, err := plugMgr.FindPluginByName(hostPathPluginName)
if err != nil {
@ -304,7 +304,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeVolumeHost(t, "/tmp/fake", client, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, "/tmp/fake", client, nil))
plug, _ := plugMgr.FindPluginByName(hostPathPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes

View File

@ -51,7 +51,7 @@ func getPlugin(t *testing.T) (string, volume.VolumePlugin) {
}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName(localVolumePluginName)
if err != nil {
@ -71,7 +71,7 @@ func getBlockPlugin(t *testing.T) (string, volume.BlockVolumePlugin) {
}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindMapperPluginByName(localVolumePluginName)
if err != nil {
os.RemoveAll(tmpDir)
@ -90,7 +90,7 @@ func getPersistentPlugin(t *testing.T) (string, volume.PersistentVolumePlugin) {
}
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName(localVolumePluginName)
if err != nil {
@ -117,7 +117,7 @@ func getDeviceMountablePluginWithBlockPath(t *testing.T, isBlockDevice bool) (st
}
}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHostWithMounterFSType(t, tmpDir, nil, nil, pathToFSType))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHostWithMounterFSType(t, tmpDir, nil, nil, pathToFSType))
plug, err := plugMgr.FindDeviceMountablePluginByName(localVolumePluginName)
if err != nil {
@ -475,7 +475,7 @@ func TestConstructVolumeSpec(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plug := &localVolumePlugin{
host: volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil),
host: volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil),
}
mounter := plug.host.GetMounter(plug.GetPluginName())
fakeMountPoints := []mount.MountPoint{}
@ -635,7 +635,7 @@ func TestUnsupportedPlugins(t *testing.T) {
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
spec := getTestVolume(false, tmpDir, false, nil)
recyclePlug, err := plugMgr.FindRecyclablePluginBySpec(spec)
@ -678,7 +678,7 @@ func TestFilterPodMounts(t *testing.T) {
t.Fatal("mounter is not localVolumeMounter")
}
host := volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil)
host := volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil)
podsDir := host.GetPodsDir()
cases := map[string]struct {

View File

@ -102,7 +102,7 @@ func TestCanSupport(t *testing.T) {
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
@ -247,7 +247,7 @@ func checkMounterLog(t *testing.T, fakeMounter *mount.FakeMounter, expected int,
}
func doTestPlugin(t *testing.T, c *testcase) {
fakeVolumeHost := volumetest.NewFakeVolumeHost(t, c.root, nil, nil)
fakeVolumeHost := volumetest.NewFakeKubeletVolumeHost(t, c.root, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
@ -476,7 +476,7 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
client := fake.NewSimpleClientset(pv, claim)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, client, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(rbdPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
@ -532,7 +532,7 @@ func TestGetDeviceMountPath(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
fakeVolumeHost := volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil)
fakeVolumeHost := volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
@ -599,7 +599,7 @@ func TestConstructVolumeSpec(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
fakeVolumeHost := volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil)
fakeVolumeHost := volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, fakeVolumeHost)
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
@ -659,7 +659,7 @@ func TestGetAccessModes(t *testing.T) {
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/rbd")
if err != nil {
@ -677,7 +677,7 @@ func TestRequiresRemount(t *testing.T) {
tmpDir, _ := utiltesting.MkTmpdir("rbd_test")
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, _ := plugMgr.FindPluginByName("kubernetes.io/rbd")
has := plug.RequiresRemount(nil)
if has {
@ -740,3 +740,24 @@ func TestGetRbdImageInfo(t *testing.T) {
}
}
}
func TestUnsupportedVolumeHost(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("rbd_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
_, err = plug.ConstructVolumeSpec("", "")
if err == nil {
t.Errorf("Expected failure constructing volume spec with unsupported VolumeHost")
}
}

View File

@ -53,7 +53,7 @@ func newPluginMgr(t *testing.T, apiObject runtime.Object) (*volume.VolumePluginM
}
fakeClient := fakeclient.NewSimpleClientset(apiObject)
host := volumetest.NewFakeVolumeHostWithNodeLabels(t,
host := volumetest.NewFakeKubeletVolumeHostWithNodeLabels(t,
tmpDir,
fakeClient,
ProbeVolumePlugins(),

View File

@ -10,6 +10,7 @@ go_library(
srcs = [
"mock_volume.go",
"testing.go",
"volume_host.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/testing",
deps = [

View File

@ -17,9 +17,7 @@ limitations under the License.
package testing
import (
"context"
"fmt"
"net"
"os"
"path/filepath"
"strings"
@ -28,32 +26,20 @@ import (
"time"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/mount-utils"
"k8s.io/utils/exec"
testingexec "k8s.io/utils/exec/testing"
utilstrings "k8s.io/utils/strings"
authenticationv1 "k8s.io/api/authentication/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
utiltesting "k8s.io/client-go/util/testing"
cloudprovider "k8s.io/cloud-provider"
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
"k8s.io/kubernetes/pkg/volume"
. "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/recyclerclient"
"k8s.io/kubernetes/pkg/volume/util/subpath"
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
@ -104,206 +90,6 @@ const (
volumeMounted = "volumeMounted"
)
// fakeVolumeHost is useful for testing volume plugins.
type fakeVolumeHost struct {
rootDir string
kubeClient clientset.Interface
pluginMgr *VolumePluginMgr
cloud cloudprovider.Interface
mounter mount.Interface
hostUtil hostutil.HostUtils
exec *testingexec.FakeExec
nodeLabels map[string]string
nodeName string
subpather subpath.Interface
csiDriverLister storagelistersv1.CSIDriverLister
volumeAttachmentLister storagelistersv1.VolumeAttachmentLister
informerFactory informers.SharedInformerFactory
kubeletErr error
mux sync.Mutex
filteredDialOptions *proxyutil.FilteredDialOptions
}
var _ VolumeHost = &fakeVolumeHost{}
var _ AttachDetachVolumeHost = &fakeVolumeHost{}
func NewFakeVolumeHost(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin) *fakeVolumeHost {
return newFakeVolumeHost(t, rootDir, kubeClient, plugins, nil, nil, "", nil, nil)
}
func NewFakeVolumeHostWithCloudProvider(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, cloud cloudprovider.Interface) *fakeVolumeHost {
return newFakeVolumeHost(t, rootDir, kubeClient, plugins, cloud, nil, "", nil, nil)
}
func NewFakeVolumeHostWithNodeLabels(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, labels map[string]string) *fakeVolumeHost {
volHost := newFakeVolumeHost(t, rootDir, kubeClient, plugins, nil, nil, "", nil, nil)
volHost.nodeLabels = labels
return volHost
}
func NewFakeVolumeHostWithCSINodeName(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, nodeName string, driverLister storagelistersv1.CSIDriverLister, volumeAttachLister storagelistersv1.VolumeAttachmentLister) *fakeVolumeHost {
return newFakeVolumeHost(t, rootDir, kubeClient, plugins, nil, nil, nodeName, driverLister, volumeAttachLister)
}
func NewFakeVolumeHostWithMounterFSType(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, pathToTypeMap map[string]hostutil.FileType) *fakeVolumeHost {
return newFakeVolumeHost(t, rootDir, kubeClient, plugins, nil, pathToTypeMap, "", nil, nil)
}
func newFakeVolumeHost(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, cloud cloudprovider.Interface, pathToTypeMap map[string]hostutil.FileType, nodeName string, driverLister storagelistersv1.CSIDriverLister, volumeAttachLister storagelistersv1.VolumeAttachmentLister) *fakeVolumeHost {
host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, cloud: cloud, nodeName: nodeName, csiDriverLister: driverLister, volumeAttachmentLister: volumeAttachLister}
host.mounter = mount.NewFakeMounter(nil)
host.hostUtil = hostutil.NewFakeHostUtil(pathToTypeMap)
host.exec = &testingexec.FakeExec{DisableScripts: true}
host.pluginMgr = &VolumePluginMgr{}
if err := host.pluginMgr.InitPlugins(plugins, nil /* prober */, host); err != nil {
t.Fatalf("Failed to init plugins while creating fake volume host: %v", err)
}
host.subpather = &subpath.FakeSubpath{}
host.informerFactory = informers.NewSharedInformerFactory(kubeClient, time.Minute)
// Wait until the InitPlugins setup is finished before returning from this setup func
if err := host.WaitForKubeletErrNil(); err != nil {
t.Fatalf("Failed to wait for kubelet err to be nil while creating fake volume host: %v", err)
}
return host
}
func (f *fakeVolumeHost) GetPluginDir(podUID string) string {
return filepath.Join(f.rootDir, "plugins", podUID)
}
func (f *fakeVolumeHost) GetVolumeDevicePluginDir(pluginName string) string {
return filepath.Join(f.rootDir, "plugins", pluginName, "volumeDevices")
}
func (f *fakeVolumeHost) GetPodsDir() string {
return filepath.Join(f.rootDir, "pods")
}
func (f *fakeVolumeHost) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string {
return filepath.Join(f.rootDir, "pods", string(podUID), "volumes", pluginName, volumeName)
}
func (f *fakeVolumeHost) GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string {
return filepath.Join(f.rootDir, "pods", string(podUID), "volumeDevices", pluginName)
}
func (f *fakeVolumeHost) GetPodPluginDir(podUID types.UID, pluginName string) string {
return filepath.Join(f.rootDir, "pods", string(podUID), "plugins", pluginName)
}
func (f *fakeVolumeHost) GetKubeClient() clientset.Interface {
return f.kubeClient
}
func (f *fakeVolumeHost) GetCloudProvider() cloudprovider.Interface {
return f.cloud
}
func (f *fakeVolumeHost) GetMounter(pluginName string) mount.Interface {
return f.mounter
}
func (f *fakeVolumeHost) GetHostUtil() hostutil.HostUtils {
return f.hostUtil
}
func (f *fakeVolumeHost) GetSubpather() subpath.Interface {
return f.subpather
}
func (f *fakeVolumeHost) GetFilteredDialOptions() *proxyutil.FilteredDialOptions {
return f.filteredDialOptions
}
func (f *fakeVolumeHost) GetPluginMgr() *VolumePluginMgr {
return f.pluginMgr
}
func (f *fakeVolumeHost) NewWrapperMounter(volName string, spec Spec, pod *v1.Pod, opts VolumeOptions) (Mounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil {
spec.Volume.Name = wrapperVolumeName
}
plug, err := f.pluginMgr.FindPluginBySpec(&spec)
if err != nil {
return nil, err
}
return plug.NewMounter(&spec, pod, opts)
}
func (f *fakeVolumeHost) NewWrapperUnmounter(volName string, spec Spec, podUID types.UID) (Unmounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil {
spec.Volume.Name = wrapperVolumeName
}
plug, err := f.pluginMgr.FindPluginBySpec(&spec)
if err != nil {
return nil, err
}
return plug.NewUnmounter(spec.Name(), podUID)
}
// Returns the hostname of the host kubelet is running on
func (f *fakeVolumeHost) GetHostName() string {
return "fakeHostName"
}
// Returns host IP or nil in the case of error.
func (f *fakeVolumeHost) GetHostIP() (net.IP, error) {
return nil, fmt.Errorf("GetHostIP() not implemented")
}
func (f *fakeVolumeHost) GetNodeAllocatable() (v1.ResourceList, error) {
return v1.ResourceList{}, nil
}
func (f *fakeVolumeHost) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) {
return func(namespace, name string) (*v1.Secret, error) {
return f.kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}
}
func (f *fakeVolumeHost) GetExec(pluginName string) exec.Interface {
return f.exec
}
func (f *fakeVolumeHost) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) {
return func(namespace, name string) (*v1.ConfigMap, error) {
return f.kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}
}
func (f *fakeVolumeHost) GetServiceAccountTokenFunc() func(string, string, *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return f.kubeClient.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr, metav1.CreateOptions{})
}
}
func (f *fakeVolumeHost) DeleteServiceAccountTokenFunc() func(types.UID) {
return func(types.UID) {}
}
func (f *fakeVolumeHost) GetNodeLabels() (map[string]string, error) {
if f.nodeLabels == nil {
f.nodeLabels = map[string]string{"test-label": "test-value"}
}
return f.nodeLabels, nil
}
func (f *fakeVolumeHost) GetNodeName() types.NodeName {
return types.NodeName(f.nodeName)
}
func (f *fakeVolumeHost) GetEventRecorder() record.EventRecorder {
return nil
}
func (f *fakeVolumeHost) ScriptCommands(scripts []CommandScript) {
ScriptCommands(f.exec, scripts)
}
// CommandScript is used to pre-configure a command that will be executed and
// optionally set it's output (stdout and stderr combined) and return code.
type CommandScript struct {
@ -1820,8 +1606,7 @@ func VerifyGetMapPodDeviceCallCount(
// GetTestVolumePluginMgr creates, initializes, and returns a test volume plugin
// manager and fake volume plugin using a fake volume host.
func GetTestVolumePluginMgr(
t *testing.T) (*VolumePluginMgr, *FakeVolumePlugin) {
func GetTestVolumePluginMgr(t *testing.T) (*VolumePluginMgr, *FakeVolumePlugin) {
plugins := ProbeVolumePlugins(VolumeConfig{})
v := NewFakeVolumeHost(
t,
@ -1832,6 +1617,17 @@ func GetTestVolumePluginMgr(
return v.pluginMgr, plugins[0].(*FakeVolumePlugin)
}
func GetTestKubeletVolumePluginMgr(t *testing.T) (*VolumePluginMgr, *FakeVolumePlugin) {
plugins := ProbeVolumePlugins(VolumeConfig{})
v := NewFakeKubeletVolumeHost(
t,
"", /* rootDir */
nil, /* kubeClient */
plugins, /* plugins */
)
return v.pluginMgr, plugins[0].(*FakeVolumePlugin)
}
// CreateTestPVC returns a provisionable PVC for tests
func CreateTestPVC(capacity string, accessModes []v1.PersistentVolumeAccessMode) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
@ -1869,48 +1665,3 @@ func ContainsAccessMode(modes []v1.PersistentVolumeAccessMode, mode v1.Persisten
}
return false
}
func (f *fakeVolumeHost) CSIDriverLister() storagelistersv1.CSIDriverLister {
return f.csiDriverLister
}
func (f *fakeVolumeHost) VolumeAttachmentLister() storagelistersv1.VolumeAttachmentLister {
return f.volumeAttachmentLister
}
func (f *fakeVolumeHost) CSIDriversSynced() cache.InformerSynced {
// not needed for testing
return nil
}
func (f *fakeVolumeHost) CSINodeLister() storagelistersv1.CSINodeLister {
// not needed for testing
return nil
}
func (f *fakeVolumeHost) GetInformerFactory() informers.SharedInformerFactory {
return f.informerFactory
}
func (f *fakeVolumeHost) IsAttachDetachController() bool {
return true
}
func (f *fakeVolumeHost) SetKubeletError(err error) {
f.mux.Lock()
defer f.mux.Unlock()
f.kubeletErr = err
return
}
func (f *fakeVolumeHost) WaitForCacheSync() error {
return nil
}
func (f *fakeVolumeHost) WaitForKubeletErrNil() error {
return wait.PollImmediate(10*time.Millisecond, 10*time.Second, func() (bool, error) {
f.mux.Lock()
defer f.mux.Unlock()
return f.kubeletErr == nil, nil
})
}

View File

@ -0,0 +1,345 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"context"
"fmt"
"net"
"path/filepath"
"sync"
"testing"
"time"
authenticationv1 "k8s.io/api/authentication/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
cloudprovider "k8s.io/cloud-provider"
proxyutil "k8s.io/kubernetes/pkg/proxy/util"
. "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/hostutil"
"k8s.io/kubernetes/pkg/volume/util/subpath"
"k8s.io/mount-utils"
"k8s.io/utils/exec"
testingexec "k8s.io/utils/exec/testing"
)
// fakeVolumeHost is useful for testing volume plugins.
// TODO: Extract fields specific to fakeKubeletVolumeHost and fakeAttachDetachVolumeHost.
type fakeVolumeHost struct {
rootDir string
kubeClient clientset.Interface
pluginMgr *VolumePluginMgr
cloud cloudprovider.Interface
mounter mount.Interface
hostUtil hostutil.HostUtils
exec *testingexec.FakeExec
nodeLabels map[string]string
nodeName string
subpather subpath.Interface
csiDriverLister storagelistersv1.CSIDriverLister
volumeAttachmentLister storagelistersv1.VolumeAttachmentLister
informerFactory informers.SharedInformerFactory
kubeletErr error
mux sync.Mutex
filteredDialOptions *proxyutil.FilteredDialOptions
}
var _ VolumeHost = &fakeVolumeHost{}
func NewFakeVolumeHost(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin) *fakeVolumeHost {
return newFakeVolumeHost(t, rootDir, kubeClient, plugins, nil, nil, "", nil, nil)
}
func NewFakeVolumeHostWithCloudProvider(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, cloud cloudprovider.Interface) *fakeVolumeHost {
return newFakeVolumeHost(t, rootDir, kubeClient, plugins, cloud, nil, "", nil, nil)
}
func NewFakeVolumeHostWithCSINodeName(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, nodeName string, driverLister storagelistersv1.CSIDriverLister, volumeAttachLister storagelistersv1.VolumeAttachmentLister) *fakeVolumeHost {
return newFakeVolumeHost(t, rootDir, kubeClient, plugins, nil, nil, nodeName, driverLister, volumeAttachLister)
}
func newFakeVolumeHost(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, cloud cloudprovider.Interface, pathToTypeMap map[string]hostutil.FileType, nodeName string, driverLister storagelistersv1.CSIDriverLister, volumeAttachLister storagelistersv1.VolumeAttachmentLister) *fakeVolumeHost {
host := &fakeVolumeHost{rootDir: rootDir, kubeClient: kubeClient, cloud: cloud, nodeName: nodeName, csiDriverLister: driverLister, volumeAttachmentLister: volumeAttachLister}
host.mounter = mount.NewFakeMounter(nil)
host.hostUtil = hostutil.NewFakeHostUtil(pathToTypeMap)
host.exec = &testingexec.FakeExec{DisableScripts: true}
host.pluginMgr = &VolumePluginMgr{}
if err := host.pluginMgr.InitPlugins(plugins, nil /* prober */, host); err != nil {
t.Fatalf("Failed to init plugins while creating fake volume host: %v", err)
}
host.subpather = &subpath.FakeSubpath{}
host.informerFactory = informers.NewSharedInformerFactory(kubeClient, time.Minute)
// Wait until the InitPlugins setup is finished before returning from this setup func
if err := host.WaitForKubeletErrNil(); err != nil {
t.Fatalf("Failed to wait for kubelet err to be nil while creating fake volume host: %v", err)
}
return host
}
func (f *fakeVolumeHost) GetPluginDir(podUID string) string {
return filepath.Join(f.rootDir, "plugins", podUID)
}
func (f *fakeVolumeHost) GetVolumeDevicePluginDir(pluginName string) string {
return filepath.Join(f.rootDir, "plugins", pluginName, "volumeDevices")
}
func (f *fakeVolumeHost) GetPodsDir() string {
return filepath.Join(f.rootDir, "pods")
}
func (f *fakeVolumeHost) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string {
return filepath.Join(f.rootDir, "pods", string(podUID), "volumes", pluginName, volumeName)
}
func (f *fakeVolumeHost) GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string {
return filepath.Join(f.rootDir, "pods", string(podUID), "volumeDevices", pluginName)
}
func (f *fakeVolumeHost) GetPodPluginDir(podUID types.UID, pluginName string) string {
return filepath.Join(f.rootDir, "pods", string(podUID), "plugins", pluginName)
}
func (f *fakeVolumeHost) GetKubeClient() clientset.Interface {
return f.kubeClient
}
func (f *fakeVolumeHost) GetCloudProvider() cloudprovider.Interface {
return f.cloud
}
func (f *fakeVolumeHost) GetMounter(pluginName string) mount.Interface {
return f.mounter
}
func (f *fakeVolumeHost) GetSubpather() subpath.Interface {
return f.subpather
}
func (f *fakeVolumeHost) GetFilteredDialOptions() *proxyutil.FilteredDialOptions {
return f.filteredDialOptions
}
func (f *fakeVolumeHost) GetPluginMgr() *VolumePluginMgr {
return f.pluginMgr
}
func (f *fakeVolumeHost) NewWrapperMounter(volName string, spec Spec, pod *v1.Pod, opts VolumeOptions) (Mounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil {
spec.Volume.Name = wrapperVolumeName
}
plug, err := f.pluginMgr.FindPluginBySpec(&spec)
if err != nil {
return nil, err
}
return plug.NewMounter(&spec, pod, opts)
}
func (f *fakeVolumeHost) NewWrapperUnmounter(volName string, spec Spec, podUID types.UID) (Unmounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil {
spec.Volume.Name = wrapperVolumeName
}
plug, err := f.pluginMgr.FindPluginBySpec(&spec)
if err != nil {
return nil, err
}
return plug.NewUnmounter(spec.Name(), podUID)
}
// Returns the hostname of the host kubelet is running on
func (f *fakeVolumeHost) GetHostName() string {
return "fakeHostName"
}
// Returns host IP or nil in the case of error.
func (f *fakeVolumeHost) GetHostIP() (net.IP, error) {
return nil, fmt.Errorf("GetHostIP() not implemented")
}
func (f *fakeVolumeHost) GetNodeAllocatable() (v1.ResourceList, error) {
return v1.ResourceList{}, nil
}
func (f *fakeVolumeHost) GetSecretFunc() func(namespace, name string) (*v1.Secret, error) {
return func(namespace, name string) (*v1.Secret, error) {
return f.kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}
}
func (f *fakeVolumeHost) GetExec(pluginName string) exec.Interface {
return f.exec
}
func (f *fakeVolumeHost) GetConfigMapFunc() func(namespace, name string) (*v1.ConfigMap, error) {
return func(namespace, name string) (*v1.ConfigMap, error) {
return f.kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{})
}
}
func (f *fakeVolumeHost) GetServiceAccountTokenFunc() func(string, string, *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return func(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
return f.kubeClient.CoreV1().ServiceAccounts(namespace).CreateToken(context.TODO(), name, tr, metav1.CreateOptions{})
}
}
func (f *fakeVolumeHost) DeleteServiceAccountTokenFunc() func(types.UID) {
return func(types.UID) {}
}
func (f *fakeVolumeHost) GetNodeLabels() (map[string]string, error) {
if f.nodeLabels == nil {
f.nodeLabels = map[string]string{"test-label": "test-value"}
}
return f.nodeLabels, nil
}
func (f *fakeVolumeHost) GetNodeName() types.NodeName {
return types.NodeName(f.nodeName)
}
func (f *fakeVolumeHost) GetEventRecorder() record.EventRecorder {
return nil
}
func (f *fakeVolumeHost) ScriptCommands(scripts []CommandScript) {
ScriptCommands(f.exec, scripts)
}
func (f *fakeVolumeHost) WaitForKubeletErrNil() error {
return wait.PollImmediate(10*time.Millisecond, 10*time.Second, func() (bool, error) {
f.mux.Lock()
defer f.mux.Unlock()
return f.kubeletErr == nil, nil
})
}
type fakeAttachDetachVolumeHost struct {
fakeVolumeHost
}
var _ AttachDetachVolumeHost = &fakeAttachDetachVolumeHost{}
// TODO: Create constructors for AttachDetachVolumeHost once it's consumed in tests.
func (f *fakeAttachDetachVolumeHost) CSINodeLister() storagelistersv1.CSINodeLister {
// not needed for testing
return nil
}
func (f *fakeAttachDetachVolumeHost) CSIDriverLister() storagelistersv1.CSIDriverLister {
return f.csiDriverLister
}
func (f *fakeAttachDetachVolumeHost) VolumeAttachmentLister() storagelistersv1.VolumeAttachmentLister {
return f.volumeAttachmentLister
}
func (f *fakeAttachDetachVolumeHost) IsAttachDetachController() bool {
return true
}
type fakeKubeletVolumeHost struct {
fakeVolumeHost
}
var _ KubeletVolumeHost = &fakeKubeletVolumeHost{}
func NewFakeKubeletVolumeHost(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin) *fakeKubeletVolumeHost {
return newFakeKubeletVolumeHost(t, rootDir, kubeClient, plugins, nil, nil, "", nil, nil)
}
func NewFakeKubeletVolumeHostWithCloudProvider(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, cloud cloudprovider.Interface) *fakeKubeletVolumeHost {
return newFakeKubeletVolumeHost(t, rootDir, kubeClient, plugins, cloud, nil, "", nil, nil)
}
func NewFakeKubeletVolumeHostWithNodeLabels(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, labels map[string]string) *fakeKubeletVolumeHost {
volHost := newFakeKubeletVolumeHost(t, rootDir, kubeClient, plugins, nil, nil, "", nil, nil)
volHost.nodeLabels = labels
return volHost
}
func NewFakeKubeletVolumeHostWithCSINodeName(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, nodeName string, driverLister storagelistersv1.CSIDriverLister, volumeAttachLister storagelistersv1.VolumeAttachmentLister) *fakeKubeletVolumeHost {
return newFakeKubeletVolumeHost(t, rootDir, kubeClient, plugins, nil, nil, nodeName, driverLister, volumeAttachLister)
}
func NewFakeKubeletVolumeHostWithMounterFSType(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, pathToTypeMap map[string]hostutil.FileType) *fakeKubeletVolumeHost {
return newFakeKubeletVolumeHost(t, rootDir, kubeClient, plugins, nil, pathToTypeMap, "", nil, nil)
}
func newFakeKubeletVolumeHost(t *testing.T, rootDir string, kubeClient clientset.Interface, plugins []VolumePlugin, cloud cloudprovider.Interface, pathToTypeMap map[string]hostutil.FileType, nodeName string, driverLister storagelistersv1.CSIDriverLister, volumeAttachLister storagelistersv1.VolumeAttachmentLister) *fakeKubeletVolumeHost {
host := &fakeKubeletVolumeHost{}
host.rootDir = rootDir
host.kubeClient = kubeClient
host.cloud = cloud
host.nodeName = nodeName
host.csiDriverLister = driverLister
host.volumeAttachmentLister = volumeAttachLister
host.mounter = mount.NewFakeMounter(nil)
host.hostUtil = hostutil.NewFakeHostUtil(pathToTypeMap)
host.exec = &testingexec.FakeExec{DisableScripts: true}
host.pluginMgr = &VolumePluginMgr{}
if err := host.pluginMgr.InitPlugins(plugins, nil /* prober */, host); err != nil {
t.Fatalf("Failed to init plugins while creating fake volume host: %v", err)
}
host.subpather = &subpath.FakeSubpath{}
host.informerFactory = informers.NewSharedInformerFactory(kubeClient, time.Minute)
// Wait until the InitPlugins setup is finished before returning from this setup func
if err := host.WaitForKubeletErrNil(); err != nil {
t.Fatalf("Failed to wait for kubelet err to be nil while creating fake volume host: %v", err)
}
return host
}
func (f *fakeKubeletVolumeHost) SetKubeletError(err error) {
f.mux.Lock()
defer f.mux.Unlock()
f.kubeletErr = err
return
}
func (f *fakeKubeletVolumeHost) GetInformerFactory() informers.SharedInformerFactory {
return f.informerFactory
}
func (f *fakeKubeletVolumeHost) CSIDriverLister() storagelistersv1.CSIDriverLister {
return f.csiDriverLister
}
func (f *fakeKubeletVolumeHost) CSIDriversSynced() cache.InformerSynced {
// not needed for testing
return nil
}
func (f *fakeKubeletVolumeHost) WaitForCacheSync() error {
return nil
}
func (f *fakeKubeletVolumeHost) GetHostUtil() hostutil.HostUtils {
return f.hostUtil
}

View File

@ -43,7 +43,7 @@ func TestCanSupport(t *testing.T) {
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume")
if err != nil {
@ -92,7 +92,7 @@ func TestPlugin(t *testing.T) {
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume")
if err != nil {
@ -214,7 +214,7 @@ func TestUnsupportedCloudProvider(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil, /* prober */
volumetest.NewFakeVolumeHostWithCloudProvider(t, tmpDir, nil, nil, tc.cloudProvider))
volumetest.NewFakeKubeletVolumeHostWithCloudProvider(t, tmpDir, nil, nil, tc.cloudProvider))
plug, err := plugMgr.FindAttachablePluginByName("kubernetes.io/vsphere-volume")
if err != nil {
@ -236,3 +236,23 @@ func TestUnsupportedCloudProvider(t *testing.T) {
}
}
}
func TestUnsupportedVolumeHost(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("vsphereVolumeTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/vsphere-volume")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
_, err = plug.ConstructVolumeSpec("", "")
if err == nil {
t.Errorf("Expected failure constructing volume spec with unsupported VolumeHost")
}
}