diff --git a/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator_test.go b/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator_test.go index 9e07d29c60c..275f6d29081 100644 --- a/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator_test.go +++ b/pkg/controller/volume/attachdetach/populator/desired_state_of_world_populator_test.go @@ -68,7 +68,7 @@ func TestFindAndAddActivePods_FindAndRemoveDeletedPods(t *testing.T) { podName := util.GetUniquePodName(pod) - generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name + generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].GCEPersistentDisk.PDName pvcLister := fakeInformerFactory.Core().V1().PersistentVolumeClaims().Lister() pvLister := fakeInformerFactory.Core().V1().PersistentVolumes().Lister() diff --git a/pkg/kubelet/kubelet_volumes_test.go b/pkg/kubelet/kubelet_volumes_test.go index dc2c89a660d..140431393bd 100644 --- a/pkg/kubelet/kubelet_volumes_test.go +++ b/pkg/kubelet/kubelet_volumes_test.go @@ -261,7 +261,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) { 1 /* expectedTearDownCallCount */, testKubelet.volumePlugin)) // Verify volumes detached and no longer reported as in use - assert.NoError(t, waitForVolumeDetach(v1.UniqueVolumeName("fake/vol1"), kubelet.volumeManager)) + assert.NoError(t, waitForVolumeDetach(v1.UniqueVolumeName("fake/fake-device"), kubelet.volumeManager)) assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") assert.NoError(t, volumetest.VerifyDetachCallCount( 1 /* expectedDetachCallCount */, testKubelet.volumePlugin)) @@ -279,7 +279,7 @@ func TestVolumeAttachAndMountControllerEnabled(t *testing.T) { Status: v1.NodeStatus{ VolumesAttached: []v1.AttachedVolume{ { - Name: "fake/vol1", + Name: "fake/fake-device", DevicePath: "fake/path", }, }}, @@ -310,7 +310,7 @@ func TestVolumeAttachAndMountControllerEnabled(t *testing.T) { // Fake node status update go simulateVolumeInUseUpdate( - v1.UniqueVolumeName("fake/vol1"), + v1.UniqueVolumeName("fake/fake-device"), stopCh, kubelet.volumeManager) @@ -346,7 +346,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) { Status: v1.NodeStatus{ VolumesAttached: []v1.AttachedVolume{ { - Name: "fake/vol1", + Name: "fake/fake-device", DevicePath: "fake/path", }, }}, @@ -378,7 +378,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) { // Fake node status update go simulateVolumeInUseUpdate( - v1.UniqueVolumeName("fake/vol1"), + v1.UniqueVolumeName("fake/fake-device"), stopCh, kubelet.volumeManager) @@ -419,7 +419,7 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) { 1 /* expectedTearDownCallCount */, testKubelet.volumePlugin)) // Verify volumes detached and no longer reported as in use - assert.NoError(t, waitForVolumeDetach(v1.UniqueVolumeName("fake/vol1"), kubelet.volumeManager)) + assert.NoError(t, waitForVolumeDetach(v1.UniqueVolumeName("fake/fake-device"), kubelet.volumeManager)) assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once") assert.NoError(t, volumetest.VerifyZeroDetachCallCount(testKubelet.volumePlugin)) } diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go index 140c434fe6c..6ba3cd0dfda 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world.go @@ -58,7 +58,7 @@ type ActualStateOfWorld interface { // volume, reset the pod's remountRequired value. // If a volume with the name volumeName does not exist in the list of // attached volumes, an error is returned. - AddPodToVolume(podName volumetypes.UniquePodName, podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, blockVolumeMapper volume.BlockVolumeMapper, outerVolumeSpecName string, volumeGidValue string) error + AddPodToVolume(podName volumetypes.UniquePodName, podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, blockVolumeMapper volume.BlockVolumeMapper, outerVolumeSpecName string, volumeGidValue string, volumeSpec *volume.Spec) error // MarkRemountRequired marks each volume that is successfully attached and // mounted for the specified pod as requiring remount (if the plugin for the @@ -268,6 +268,13 @@ type mountedPod struct { // mapper used to block volumes support blockVolumeMapper volume.BlockVolumeMapper + // spec is the volume spec containing the specification for this volume. + // Used to generate the volume plugin object, and passed to plugin methods. + // In particular, the Unmount method uses spec.Name() as the volumeSpecName + // in the mount path: + // /var/lib/kubelet/pods/{podUID}/volumes/{escapeQualifiedPluginName}/{volumeSpecName}/ + volumeSpec *volume.Spec + // outerVolumeSpecName is the volume.Spec.Name() of the volume as referenced // directly in the pod. If the volume was referenced through a persistent // volume claim, this contains the volume.Spec.Name() of the persistent @@ -303,7 +310,8 @@ func (asw *actualStateOfWorld) MarkVolumeAsMounted( mounter volume.Mounter, blockVolumeMapper volume.BlockVolumeMapper, outerVolumeSpecName string, - volumeGidValue string) error { + volumeGidValue string, + volumeSpec *volume.Spec) error { return asw.AddPodToVolume( podName, podUID, @@ -311,7 +319,8 @@ func (asw *actualStateOfWorld) MarkVolumeAsMounted( mounter, blockVolumeMapper, outerVolumeSpecName, - volumeGidValue) + volumeGidValue, + volumeSpec) } func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(volumeName v1.UniqueVolumeName, nodeName types.NodeName) { @@ -403,7 +412,8 @@ func (asw *actualStateOfWorld) AddPodToVolume( mounter volume.Mounter, blockVolumeMapper volume.BlockVolumeMapper, outerVolumeSpecName string, - volumeGidValue string) error { + volumeGidValue string, + volumeSpec *volume.Spec) error { asw.Lock() defer asw.Unlock() @@ -423,6 +433,7 @@ func (asw *actualStateOfWorld) AddPodToVolume( blockVolumeMapper: blockVolumeMapper, outerVolumeSpecName: outerVolumeSpecName, volumeGidValue: volumeGidValue, + volumeSpec: volumeSpec, } } @@ -444,7 +455,7 @@ func (asw *actualStateOfWorld) MarkRemountRequired( } volumePlugin, err := - asw.volumePluginMgr.FindPluginBySpec(volumeObj.spec) + asw.volumePluginMgr.FindPluginBySpec(podObj.volumeSpec) if err != nil || volumePlugin == nil { // Log and continue processing glog.Errorf( @@ -452,7 +463,7 @@ func (asw *actualStateOfWorld) MarkRemountRequired( podObj.podName, podObj.podUID, volumeObj.volumeName, - volumeObj.spec.Name()) + podObj.volumeSpec.Name()) continue } @@ -546,8 +557,8 @@ func (asw *actualStateOfWorld) VolumeExistsWithSpecName(podName volumetypes.Uniq asw.RLock() defer asw.RUnlock() for _, volumeObj := range asw.attachedVolumes { - for name := range volumeObj.mountedPods { - if podName == name && volumeObj.spec.Name() == volumeSpecName { + for name, podObj := range volumeObj.mountedPods { + if podName == name && podObj.volumeSpec.Name() == volumeSpecName { return true } } @@ -713,13 +724,13 @@ func getMountedVolume( MountedVolume: operationexecutor.MountedVolume{ PodName: mountedPod.podName, VolumeName: attachedVolume.volumeName, - InnerVolumeSpecName: attachedVolume.spec.Name(), + InnerVolumeSpecName: mountedPod.volumeSpec.Name(), OuterVolumeSpecName: mountedPod.outerVolumeSpecName, PluginName: attachedVolume.pluginName, PodUID: mountedPod.podUID, Mounter: mountedPod.mounter, BlockVolumeMapper: mountedPod.blockVolumeMapper, VolumeGidValue: mountedPod.volumeGidValue, - VolumeSpec: attachedVolume.spec, + VolumeSpec: mountedPod.volumeSpec, DeviceMountPath: attachedVolume.deviceMountPath}} } diff --git a/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go b/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go index 634a3328c86..8cc7a8eb848 100644 --- a/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go +++ b/pkg/kubelet/volumemanager/cache/actual_state_of_world_test.go @@ -211,7 +211,7 @@ func Test_AddPodToVolume_Positive_ExistingVolumeNewNode(t *testing.T) { // Act err = asw.AddPodToVolume( - podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */) + podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */, volumeSpec) // Assert if err != nil { @@ -275,14 +275,14 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) { } err = asw.AddPodToVolume( - podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */) + podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */, volumeSpec) if err != nil { t.Fatalf("AddPodToVolume failed. Expected: Actual: <%v>", err) } // Act err = asw.AddPodToVolume( - podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */) + podName, pod.UID, generatedVolumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */, volumeSpec) // Assert if err != nil { @@ -296,6 +296,119 @@ func Test_AddPodToVolume_Positive_ExistingVolumeExistingNode(t *testing.T) { verifyVolumeExistsWithSpecNameInVolumeAsw(t, podName, volumeSpec.Name(), asw) } +// Populates data struct with a volume +// Calls AddPodToVolume() twice to add the same pod to the volume +// Verifies volume/pod combo exist using PodExistsInVolume() and the second call +// did not fail. +func Test_AddTwoPodsToVolume_Positive(t *testing.T) { + // Arrange + volumePluginMgr, plugin := volumetesting.GetTestVolumePluginMgr(t) + asw := NewActualStateOfWorld("mynode" /* nodeName */, volumePluginMgr) + devicePath := "fake/device/path" + + pod1 := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + UID: "pod1uid", + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: "volume-name-1", + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ + PDName: "fake-device1", + }, + }, + }, + }, + }, + } + + pod2 := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + UID: "pod2uid", + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + Name: "volume-name-2", + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ + PDName: "fake-device1", + }, + }, + }, + }, + }, + } + volumeSpec1 := &volume.Spec{Volume: &pod1.Spec.Volumes[0]} + volumeSpec2 := &volume.Spec{Volume: &pod2.Spec.Volumes[0]} + generatedVolumeName1, err := util.GetUniqueVolumeNameFromSpec( + plugin, volumeSpec1) + generatedVolumeName2, err := util.GetUniqueVolumeNameFromSpec( + plugin, volumeSpec2) + + if generatedVolumeName1 != generatedVolumeName2 { + t.Fatalf( + "Unique volume names should be the same. unique volume name 1: <%q> unique volume name 2: <%q>, spec1 %v, spec2 %v", + generatedVolumeName1, + generatedVolumeName2, volumeSpec1, volumeSpec2) + } + + err = asw.MarkVolumeAsAttached(generatedVolumeName1, volumeSpec1, "" /* nodeName */, devicePath) + if err != nil { + t.Fatalf("MarkVolumeAsAttached failed. Expected: Actual: <%v>", err) + } + podName1 := util.GetUniquePodName(pod1) + + mounter1, err := plugin.NewMounter(volumeSpec1, pod1, volume.VolumeOptions{}) + if err != nil { + t.Fatalf("NewMounter failed. Expected: Actual: <%v>", err) + } + + mapper1, err := plugin.NewBlockVolumeMapper(volumeSpec1, pod1, volume.VolumeOptions{}) + if err != nil { + t.Fatalf("NewBlockVolumeMapper failed. Expected: Actual: <%v>", err) + } + + err = asw.AddPodToVolume( + podName1, pod1.UID, generatedVolumeName1, mounter1, mapper1, volumeSpec1.Name(), "" /* volumeGidValue */, volumeSpec1) + if err != nil { + t.Fatalf("AddPodToVolume failed. Expected: Actual: <%v>", err) + } + + podName2 := util.GetUniquePodName(pod2) + + mounter2, err := plugin.NewMounter(volumeSpec2, pod2, volume.VolumeOptions{}) + if err != nil { + t.Fatalf("NewMounter failed. Expected: Actual: <%v>", err) + } + + mapper2, err := plugin.NewBlockVolumeMapper(volumeSpec2, pod2, volume.VolumeOptions{}) + if err != nil { + t.Fatalf("NewBlockVolumeMapper failed. Expected: Actual: <%v>", err) + } + + err = asw.AddPodToVolume( + podName2, pod2.UID, generatedVolumeName1, mounter2, mapper2, volumeSpec2.Name(), "" /* volumeGidValue */, volumeSpec2) + if err != nil { + t.Fatalf("AddPodToVolume failed. Expected: Actual: <%v>", err) + } + + verifyVolumeExistsAsw(t, generatedVolumeName1, true /* shouldExist */, asw) + verifyVolumeDoesntExistInUnmountedVolumes(t, generatedVolumeName1, asw) + verifyVolumeDoesntExistInGloballyMountedVolumes(t, generatedVolumeName1, asw) + verifyPodExistsInVolumeAsw(t, podName1, generatedVolumeName1, "fake/device/path" /* expectedDevicePath */, asw) + verifyVolumeExistsWithSpecNameInVolumeAsw(t, podName1, volumeSpec1.Name(), asw) + verifyPodExistsInVolumeAsw(t, podName2, generatedVolumeName2, "fake/device/path" /* expectedDevicePath */, asw) + verifyVolumeExistsWithSpecNameInVolumeAsw(t, podName2, volumeSpec2.Name(), asw) + verifyVolumeSpecNameInVolumeAsw(t, podName1, []*volume.Spec{volumeSpec1}, asw) + verifyVolumeSpecNameInVolumeAsw(t, podName2, []*volume.Spec{volumeSpec2}, asw) + +} + // Calls AddPodToVolume() to add pod to empty data struct // Verifies call fails with "volume does not exist" error. func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) { @@ -356,7 +469,7 @@ func Test_AddPodToVolume_Negative_VolumeDoesntExist(t *testing.T) { // Act err = asw.AddPodToVolume( - podName, pod.UID, volumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */) + podName, pod.UID, volumeName, mounter, mapper, volumeSpec.Name(), "" /* volumeGidValue */, volumeSpec) // Assert if err == nil { @@ -580,3 +693,18 @@ func verifyVolumeDoesntExistWithSpecNameInVolumeAsw( podExistsInVolume) } } + +func verifyVolumeSpecNameInVolumeAsw( + t *testing.T, + podToCheck volumetypes.UniquePodName, + volumeSpecs []*volume.Spec, + asw ActualStateOfWorld) { + mountedVolumes := + asw.GetMountedVolumesForPod(podToCheck) + + for i, volume := range mountedVolumes { + if volume.InnerVolumeSpecName != volumeSpecs[i].Name() { + t.Fatalf("Volume spec name does not match Expected: <%q> Actual: <%q>", volumeSpecs[i].Name(), volume.InnerVolumeSpecName) + } + } +} diff --git a/pkg/kubelet/volumemanager/cache/desired_state_of_world.go b/pkg/kubelet/volumemanager/cache/desired_state_of_world.go index a61752d8c69..2fb06223e7c 100644 --- a/pkg/kubelet/volumemanager/cache/desired_state_of_world.go +++ b/pkg/kubelet/volumemanager/cache/desired_state_of_world.go @@ -171,7 +171,7 @@ type podToMount struct { // generate the volume plugin object, and passed to plugin methods. // For non-PVC volumes this is the same as defined in the pod object. For // PVC volumes it is from the dereferenced PV object. - spec *volume.Spec + volumeSpec *volume.Spec // outerVolumeSpecName is the volume.Spec.Name() of the volume as referenced // directly in the pod. If the volume was referenced through a persistent @@ -238,7 +238,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume( dsw.volumesToMount[volumeName].podsToMount[podName] = podToMount{ podName: podName, pod: pod, - spec: volumeSpec, + volumeSpec: volumeSpec, outerVolumeSpecName: outerVolumeSpecName, } return volumeName, nil @@ -314,7 +314,7 @@ func (dsw *desiredStateOfWorld) VolumeExistsWithSpecName(podName types.UniquePod defer dsw.RUnlock() for _, volumeObj := range dsw.volumesToMount { for name, podObj := range volumeObj.podsToMount { - if podName == name && podObj.spec.Name() == volumeSpecName { + if podName == name && podObj.volumeSpec.Name() == volumeSpecName { return true } } @@ -351,7 +351,7 @@ func (dsw *desiredStateOfWorld) GetVolumesToMount() []VolumeToMount { VolumeName: volumeName, PodName: podName, Pod: podObj.pod, - VolumeSpec: podObj.spec, + VolumeSpec: podObj.volumeSpec, PluginIsAttachable: volumeObj.pluginIsAttachable, OuterVolumeSpecName: podObj.outerVolumeSpecName, VolumeGidValue: volumeObj.volumeGidValue, diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler.go b/pkg/kubelet/volumemanager/reconciler/reconciler.go index 6fb3b63abd5..5d6619d5b32 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler.go @@ -593,7 +593,8 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*re volume.mounter, volume.blockVolumeMapper, volume.outerVolumeSpecName, - volume.volumeGidValue) + volume.volumeGidValue, + volume.volumeSpec) if err != nil { glog.Errorf("Could not add pod to volume information to actual state of world: %v", err) continue diff --git a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go index 4e49d2ba7e1..9d7e4207b4a 100644 --- a/pkg/kubelet/volumemanager/reconciler/reconciler_test.go +++ b/pkg/kubelet/volumemanager/reconciler/reconciler_test.go @@ -479,7 +479,7 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) { ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"}, Spec: v1.PersistentVolumeSpec{ Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")}, - PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}}, + PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}}, AccessModes: []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, v1.ReadOnlyMany, @@ -570,7 +570,7 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) { ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"}, Spec: v1.PersistentVolumeSpec{ Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")}, - PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}}, + PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}}, AccessModes: []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, v1.ReadOnlyMany, @@ -662,7 +662,7 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) { ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"}, Spec: v1.PersistentVolumeSpec{ Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")}, - PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}}, + PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}}, AccessModes: []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, v1.ReadOnlyMany, @@ -764,7 +764,7 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) { ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"}, Spec: v1.PersistentVolumeSpec{ Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")}, - PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}}, + PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}}, AccessModes: []v1.PersistentVolumeAccessMode{ v1.ReadWriteOnce, v1.ReadOnlyMany, @@ -1029,7 +1029,7 @@ func createTestClient() *fake.Clientset { Status: v1.NodeStatus{ VolumesAttached: []v1.AttachedVolume{ { - Name: "fake-plugin/volume-name", + Name: "fake-plugin/fake-device1", DevicePath: "fake/path", }, }}, diff --git a/pkg/kubelet/volumemanager/volume_manager_test.go b/pkg/kubelet/volumemanager/volume_manager_test.go index bb9b156b52c..6e9b01fdb03 100644 --- a/pkg/kubelet/volumemanager/volume_manager_test.go +++ b/pkg/kubelet/volumemanager/volume_manager_test.go @@ -243,7 +243,7 @@ func createObjects() (*v1.Node, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVol Status: v1.NodeStatus{ VolumesAttached: []v1.AttachedVolume{ { - Name: "fake/pvA", + Name: "fake/fake-device", DevicePath: "fake/path", }, }}, diff --git a/pkg/volume/testing/testing.go b/pkg/volume/testing/testing.go index 9e5ae2cd12a..d3441bf87cd 100644 --- a/pkg/volume/testing/testing.go +++ b/pkg/volume/testing/testing.go @@ -250,7 +250,17 @@ func (plugin *FakeVolumePlugin) GetPluginName() string { } func (plugin *FakeVolumePlugin) GetVolumeName(spec *Spec) (string, error) { - return spec.Name(), nil + var volumeName string + if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil { + volumeName = spec.Volume.GCEPersistentDisk.PDName + } else if spec.PersistentVolume != nil && + spec.PersistentVolume.Spec.GCEPersistentDisk != nil { + volumeName = spec.PersistentVolume.Spec.GCEPersistentDisk.PDName + } + if volumeName == "" { + volumeName = spec.Name() + } + return volumeName, nil } func (plugin *FakeVolumePlugin) CanSupport(spec *Spec) bool { diff --git a/pkg/volume/util/operationexecutor/operation_executor.go b/pkg/volume/util/operationexecutor/operation_executor.go index c586b959ade..c065ace25e1 100644 --- a/pkg/volume/util/operationexecutor/operation_executor.go +++ b/pkg/volume/util/operationexecutor/operation_executor.go @@ -163,7 +163,7 @@ func NewOperationExecutor( // state of the world cache after successful mount/unmount. type ActualStateOfWorldMounterUpdater interface { // Marks the specified volume as mounted to the specified pod - MarkVolumeAsMounted(podName volumetypes.UniquePodName, podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, blockVolumeMapper volume.BlockVolumeMapper, outerVolumeSpecName string, volumeGidValue string) error + MarkVolumeAsMounted(podName volumetypes.UniquePodName, podUID types.UID, volumeName v1.UniqueVolumeName, mounter volume.Mounter, blockVolumeMapper volume.BlockVolumeMapper, outerVolumeSpecName string, volumeGidValue string, volumeSpec *volume.Spec) error // Marks the specified volume as unmounted from the specified pod MarkVolumeAsUnmounted(podName volumetypes.UniquePodName, volumeName v1.UniqueVolumeName) error diff --git a/pkg/volume/util/operationexecutor/operation_generator.go b/pkg/volume/util/operationexecutor/operation_generator.go index f985055fcdd..83ee448a11a 100644 --- a/pkg/volume/util/operationexecutor/operation_generator.go +++ b/pkg/volume/util/operationexecutor/operation_generator.go @@ -564,7 +564,8 @@ func (og *operationGenerator) GenerateMountVolumeFunc( volumeMounter, nil, volumeToMount.OuterVolumeSpecName, - volumeToMount.VolumeGidValue) + volumeToMount.VolumeGidValue, + volumeToMount.VolumeSpec) if markVolMountedErr != nil { // On failure, return error. Caller will log and retry. return volumeToMount.GenerateError("MountVolume.MarkVolumeAsMounted failed", markVolMountedErr) @@ -913,7 +914,8 @@ func (og *operationGenerator) GenerateMapVolumeFunc( nil, blockVolumeMapper, volumeToMount.OuterVolumeSpecName, - volumeToMount.VolumeGidValue) + volumeToMount.VolumeGidValue, + volumeToMount.VolumeSpec) if markVolMountedErr != nil { // On failure, return error. Caller will log and retry. return volumeToMount.GenerateError("MapVolume.MarkVolumeAsMounted failed", markVolMountedErr)