Merge pull request #50334 from mtanino/issue/48953

Automatic merge from submit-queue (batch tested with PRs 49849, 50334, 51414)

Support iscsi volume attach and detach

**What this PR does / why we need it**:

Support iSCSI volume attach and detach to enforce RWO access

**Which issue this PR fixes** : fixes #48953

**Special notes for your reviewer**:

**Release note**:

```
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2017-08-26 18:09:30 -07:00 committed by GitHub
commit 496e71ef98
22 changed files with 274 additions and 61 deletions

View File

@ -91,6 +91,7 @@ go_library(
"//pkg/volume/gce_pd:go_default_library",
"//pkg/volume/glusterfs:go_default_library",
"//pkg/volume/host_path:go_default_library",
"//pkg/volume/iscsi:go_default_library",
"//pkg/volume/local:go_default_library",
"//pkg/volume/nfs:go_default_library",
"//pkg/volume/photon_pd:go_default_library",

View File

@ -47,6 +47,7 @@ import (
"k8s.io/kubernetes/pkg/volume/gce_pd"
"k8s.io/kubernetes/pkg/volume/glusterfs"
"k8s.io/kubernetes/pkg/volume/host_path"
"k8s.io/kubernetes/pkg/volume/iscsi"
"k8s.io/kubernetes/pkg/volume/local"
"k8s.io/kubernetes/pkg/volume/nfs"
"k8s.io/kubernetes/pkg/volume/photon_pd"
@ -76,6 +77,7 @@ func ProbeAttachableVolumePlugins() []volume.VolumePlugin {
allPlugins = append(allPlugins, scaleio.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, storageos.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, fc.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
return allPlugins
}

View File

@ -377,7 +377,7 @@ func (attacher *testPluginAttacher) VolumesAreAttached(specs []*volume.Spec, nod
return nil, nil
}
func (attacher *testPluginAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
func (attacher *testPluginAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) {
attacher.pluginLock.Lock()
defer attacher.pluginLock.Unlock()
if spec == nil {

View File

@ -24,6 +24,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/util/mount"
@ -142,7 +143,7 @@ func (attacher *awsElasticBlockStoreAttacher) BulkVerifyVolumes(volumesByNode ma
return volumesAttachedCheck, nil
}
func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
func (attacher *awsElasticBlockStoreAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err

View File

@ -150,7 +150,7 @@ func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty
return volumesAttachedCheck, nil
}
func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
var err error
lun, err := strconv.Atoi(devicePath)
if err != nil {

View File

@ -24,6 +24,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/cloudprovider"
@ -220,7 +221,7 @@ func (attacher *cinderDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nod
return volumesAttachedCheck, nil
}
func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
func (attacher *cinderDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
// NOTE: devicePath is is path as reported by Cinder, which may be incorrect and should not be used. See Issue #33128
volumeSource, _, err := getVolumeSource(spec)
if err != nil {

View File

@ -24,6 +24,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
@ -65,7 +66,7 @@ func (attacher *fcAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName ty
return volumesAttachedCheck, nil
}
func (attacher *fcAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
func (attacher *fcAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
mounter, err := volumeSpecToMounter(spec, attacher.host)
if err != nil {
glog.Warningf("failed to get fc mounter: %v", err)

View File

@ -20,6 +20,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
)
@ -47,7 +48,7 @@ func (a *flexVolumeAttacher) Attach(spec *volume.Spec, hostName types.NodeName)
}
// WaitForAttach is part of the volume.Attacher interface
func (a *flexVolumeAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
func (a *flexVolumeAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
call := a.plugin.NewDriverCallWithTimeout(waitForAttachCmd, timeout)
call.Append(devicePath)
call.AppendSpec(spec, a.plugin.host, nil)

View File

@ -17,9 +17,11 @@ limitations under the License.
package flexvolume
import (
"k8s.io/kubernetes/pkg/volume"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume"
)
func TestAttach(t *testing.T) {
@ -37,7 +39,7 @@ func TestAttach(t *testing.T) {
func TestWaitForAttach(t *testing.T) {
spec := fakeVolumeSpec()
var pod *v1.Pod
plugin, _ := testPlugin()
plugin.runner = fakeRunner(
assertDriverCall(t, notSupportedOutput(), waitForAttachCmd, "/dev/sdx",
@ -45,7 +47,7 @@ func TestWaitForAttach(t *testing.T) {
)
a, _ := plugin.NewAttacher()
a.WaitForAttach(spec, "/dev/sdx", 1*time.Second)
a.WaitForAttach(spec, "/dev/sdx", pod, 1*time.Second)
}
func TestMountDevice(t *testing.T) {

View File

@ -25,6 +25,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
@ -130,7 +131,7 @@ func (attacher *gcePersistentDiskAttacher) VolumesAreAttached(specs []*volume.Sp
return volumesAttachedCheck, nil
}
func (attacher *gcePersistentDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
func (attacher *gcePersistentDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
ticker := time.NewTicker(checkSleepDuration)
defer ticker.Stop()
timer := time.NewTimer(timeout)

View File

@ -9,6 +9,7 @@ load(
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"disk_manager.go",
"doc.go",
"iscsi.go",

View File

@ -0,0 +1,213 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package iscsi
import (
"fmt"
"os"
"strconv"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
type iscsiAttacher struct {
host volume.VolumeHost
manager diskManager
}
var _ volume.Attacher = &iscsiAttacher{}
var _ volume.AttachableVolumePlugin = &iscsiPlugin{}
func (plugin *iscsiPlugin) NewAttacher() (volume.Attacher, error) {
return &iscsiAttacher{
host: plugin.host,
manager: &ISCSIUtil{},
}, nil
}
func (plugin *iscsiPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(iscsiPluginName)
return mount.GetMountRefs(mounter, deviceMountPath)
}
func (attacher *iscsiAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
return "", nil
}
func (attacher *iscsiAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
for _, spec := range specs {
volumesAttachedCheck[spec] = true
}
return volumesAttachedCheck, nil
}
func (attacher *iscsiAttacher) WaitForAttach(spec *volume.Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error) {
mounter, err := attacher.volumeSpecToMounter(spec, attacher.host, pod)
if err != nil {
glog.Warningf("failed to get iscsi mounter: %v", err)
return "", err
}
return attacher.manager.AttachDisk(*mounter)
}
func (attacher *iscsiAttacher) GetDeviceMountPath(
spec *volume.Spec) (string, error) {
mounter, err := attacher.volumeSpecToMounter(spec, attacher.host, nil)
if err != nil {
glog.Warningf("failed to get iscsi mounter: %v", err)
return "", err
}
if mounter.InitiatorName != "" {
// new iface name is <target portal>:<volume name>
mounter.Iface = mounter.Portals[0] + ":" + mounter.VolName
}
return attacher.manager.MakeGlobalPDName(*mounter.iscsiDisk), nil
}
func (attacher *iscsiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
mounter := attacher.host.GetMounter(iscsiPluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return err
}
options := []string{}
if readOnly {
options = append(options, "ro")
}
if notMnt {
diskMounter := &mount.SafeFormatAndMount{Interface: mounter, Exec: attacher.host.GetExec(iscsiPluginName)}
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return err
}
}
return nil
}
type iscsiDetacher struct {
host volume.VolumeHost
mounter mount.Interface
manager diskManager
}
var _ volume.Detacher = &iscsiDetacher{}
func (plugin *iscsiPlugin) NewDetacher() (volume.Detacher, error) {
return &iscsiDetacher{
host: plugin.host,
mounter: plugin.host.GetMounter(iscsiPluginName),
manager: &ISCSIUtil{},
}, nil
}
func (detacher *iscsiDetacher) Detach(deviceMountPath string, nodeName types.NodeName) error {
return nil
}
func (detacher *iscsiDetacher) UnmountDevice(deviceMountPath string) error {
unMounter := detacher.volumeSpecToUnmounter(detacher.mounter)
err := detacher.manager.DetachDisk(*unMounter, deviceMountPath)
if err != nil {
return fmt.Errorf("iscsi: failed to detach disk: %s\nError: %v", deviceMountPath, err)
}
glog.V(4).Infof("iscsi: successfully detached disk: %s", deviceMountPath)
return nil
}
func (attacher *iscsiAttacher) volumeSpecToMounter(spec *volume.Spec, host volume.VolumeHost, pod *v1.Pod) (*iscsiDiskMounter, error) {
var secret map[string]string
var bkportal []string
iscsi, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
// Obtain secret for AttachDisk
if iscsi.SecretRef != nil && pod != nil {
if secret, err = volumeutil.GetSecretForPod(pod, iscsi.SecretRef.Name, host.GetKubeClient()); err != nil {
glog.Errorf("Couldn't get secret from %v/%v", pod.Namespace, iscsi.SecretRef)
return nil, err
}
}
lun := strconv.Itoa(int(iscsi.Lun))
portal := portalMounter(iscsi.TargetPortal)
bkportal = append(bkportal, portal)
for _, tp := range iscsi.Portals {
bkportal = append(bkportal, portalMounter(string(tp)))
}
iface := iscsi.ISCSIInterface
exec := attacher.host.GetExec(iscsiPluginName)
var initiatorName string
if iscsi.InitiatorName != nil {
initiatorName = *iscsi.InitiatorName
}
return &iscsiDiskMounter{
iscsiDisk: &iscsiDisk{
plugin: &iscsiPlugin{
host: host,
},
VolName: spec.Name(),
Portals: bkportal,
Iqn: iscsi.IQN,
lun: lun,
Iface: iface,
chap_discovery: iscsi.DiscoveryCHAPAuth,
chap_session: iscsi.SessionCHAPAuth,
secret: secret,
InitiatorName: initiatorName,
manager: &ISCSIUtil{}},
fsType: iscsi.FSType,
readOnly: readOnly,
mounter: &mount.SafeFormatAndMount{Interface: host.GetMounter(iscsiPluginName), Exec: exec},
exec: exec,
deviceUtil: volumeutil.NewDeviceHandler(volumeutil.NewIOHandler()),
}, nil
}
func (detacher *iscsiDetacher) volumeSpecToUnmounter(mounter mount.Interface) *iscsiDiskUnmounter {
exec := detacher.host.GetExec(iscsiPluginName)
return &iscsiDiskUnmounter{
iscsiDisk: &iscsiDisk{
plugin: &iscsiPlugin{},
},
mounter: mounter,
exec: exec,
}
}

View File

@ -28,7 +28,7 @@ import (
type diskManager interface {
MakeGlobalPDName(disk iscsiDisk) string
// Attaches the disk to the kubelet's host machine.
AttachDisk(b iscsiDiskMounter) error
AttachDisk(b iscsiDiskMounter) (string, error)
// Detaches the disk from the kubelet's host machine.
DetachDisk(disk iscsiDiskUnmounter, mntPath string) error
}
@ -37,7 +37,6 @@ type diskManager interface {
func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter mount.Interface, fsGroup *int64) error {
// TODO: handle failed mounts here.
notMnt, err := mounter.IsLikelyNotMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mountpoint: %s", volPath)
return err
@ -45,10 +44,6 @@ func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter
if !notMnt {
return nil
}
if err := manager.AttachDisk(b); err != nil {
glog.Errorf("failed to attach disk")
return err
}
if err := os.MkdirAll(volPath, 0750); err != nil {
glog.Errorf("failed to mkdir:%s", volPath)
@ -59,6 +54,10 @@ func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter
if b.readOnly {
options = append(options, "ro")
}
if b.iscsiDisk.InitiatorName != "" {
// new iface name is <target portal>:<volume name>
b.iscsiDisk.Iface = b.iscsiDisk.Portals[0] + ":" + b.iscsiDisk.VolName
}
globalPDPath := manager.MakeGlobalPDName(*b.iscsiDisk)
mountOptions := volume.JoinMountOptions(b.mountOptions, options)
err = mounter.Mount(globalPDPath, volPath, "", mountOptions)
@ -84,8 +83,7 @@ func diskTearDown(manager diskManager, c iscsiDiskUnmounter, volPath string, mou
if notMnt {
return os.Remove(volPath)
}
refs, err := mount.GetMountRefs(mounter, volPath)
_, err = mount.GetMountRefs(mounter, volPath)
if err != nil {
glog.Errorf("failed to get reference count %s", volPath)
return err
@ -94,16 +92,6 @@ func diskTearDown(manager diskManager, c iscsiDiskUnmounter, volPath string, mou
glog.Errorf("failed to unmount %s", volPath)
return err
}
// If len(refs) is 1, then all bind mounts have been removed, and the
// remaining reference is the global mount. It is safe to detach.
if len(refs) == 1 {
mntPath := refs[0]
if err := manager.DetachDisk(c, mntPath); err != nil {
glog.Errorf("failed to detach disk from %s", mntPath)
return err
}
}
notMnt, mntErr := mounter.IsLikelyNotMountPoint(volPath)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)

View File

@ -138,7 +138,7 @@ func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UI
return &iscsiDiskMounter{
iscsiDisk: &iscsiDisk{
podUID: podUID,
volName: spec.Name(),
VolName: spec.Name(),
Portals: bkportal,
Iqn: iscsi.IQN,
lun: lun,
@ -167,7 +167,7 @@ func (plugin *iscsiPlugin) newUnmounterInternal(volName string, podUID types.UID
return &iscsiDiskUnmounter{
iscsiDisk: &iscsiDisk{
podUID: podUID,
volName: volName,
VolName: volName,
manager: manager,
plugin: plugin,
},
@ -190,7 +190,7 @@ func (plugin *iscsiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*v
}
type iscsiDisk struct {
volName string
VolName string
podUID types.UID
Portals []string
Iqn string
@ -209,7 +209,7 @@ type iscsiDisk struct {
func (iscsi *iscsiDisk) GetPath() string {
name := iscsiPluginName
// safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up
return iscsi.plugin.host.GetPodVolumeDir(iscsi.podUID, utilstrings.EscapeQualifiedNameForDisk(name), iscsi.volName)
return iscsi.plugin.host.GetPodVolumeDir(iscsi.podUID, utilstrings.EscapeQualifiedNameForDisk(name), iscsi.VolName)
}
type iscsiDiskMounter struct {

View File

@ -100,18 +100,17 @@ func (fake *fakeDiskManager) Cleanup() {
func (fake *fakeDiskManager) MakeGlobalPDName(disk iscsiDisk) string {
return fake.tmpDir
}
func (fake *fakeDiskManager) AttachDisk(b iscsiDiskMounter) error {
func (fake *fakeDiskManager) AttachDisk(b iscsiDiskMounter) (string, error) {
globalPath := b.manager.MakeGlobalPDName(*b.iscsiDisk)
err := os.MkdirAll(globalPath, 0750)
if err != nil {
return err
return "", err
}
// Simulate the global mount so that the fakeMounter returns the
// expected number of mounts for the attached disk.
b.mounter.Mount(globalPath, globalPath, b.fsType, nil)
fake.attachCalled = true
return nil
return "/dev/sdb", nil
}
func (fake *fakeDiskManager) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
@ -120,7 +119,6 @@ func (fake *fakeDiskManager) DetachDisk(c iscsiDiskUnmounter, mntPath string) er
if err != nil {
return err
}
fake.detachCalled = true
return nil
}
@ -173,9 +171,6 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
t.Errorf("SetUp() failed: %v", err)
}
}
if !fakeManager.attachCalled {
t.Errorf("Attach was not called")
}
fakeManager2 := NewFakeDiskManager()
defer fakeManager2.Cleanup()
@ -195,9 +190,6 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
} else if !os.IsNotExist(err) {
t.Errorf("SetUp() failed: %v", err)
}
if !fakeManager2.detachCalled {
t.Errorf("Detach was not called")
}
}
func TestPluginVolume(t *testing.T) {

View File

@ -29,6 +29,7 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
var (
@ -190,7 +191,7 @@ func (util *ISCSIUtil) loadISCSI(conf *iscsiDisk, mnt string) error {
return nil
}
func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) error {
func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) (string, error) {
var devicePath string
var devicePaths []string
var iscsiTransport string
@ -199,7 +200,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) error {
out, err := b.exec.Run("iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "show")
if err != nil {
glog.Errorf("iscsi: could not read iface %s error: %s", b.Iface, string(out))
return err
return "", err
}
iscsiTransport = extractTransportname(string(out))
@ -209,11 +210,11 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) error {
// create new iface and copy parameters from pre-configured iface to the created iface
if b.InitiatorName != "" {
// new iface name is <target portal>:<volume name>
newIface := bkpPortal[0] + ":" + b.volName
newIface := bkpPortal[0] + ":" + b.VolName
err = cloneIface(b, newIface)
if err != nil {
glog.Errorf("iscsi: failed to clone iface: %s error: %v", b.Iface, err)
return err
return "", err
}
// update iface name
b.Iface = newIface
@ -229,7 +230,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) error {
if iscsiTransport == "" {
glog.Errorf("iscsi: could not find transport name in iface %s", b.Iface)
return fmt.Errorf("Could not parse iface file for %s", b.Iface)
return "", fmt.Errorf("Could not parse iface file for %s", b.Iface)
} else if iscsiTransport == "tcp" {
devicePath = strings.Join([]string{"/dev/disk/by-path/ip", tp, "iscsi", b.Iqn, "lun", b.lun}, "-")
} else {
@ -285,7 +286,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) error {
// delete cloned iface
b.exec.Run("iscsiadm", "-m", "iface", "-I", b.Iface, "-o", "delete")
glog.Errorf("iscsi: failed to get any path for iscsi disk, last err seen:\n%v", lastErr)
return fmt.Errorf("failed to get any path for iscsi disk, last err seen:\n%v", lastErr)
return "", fmt.Errorf("failed to get any path for iscsi disk, last err seen:\n%v", lastErr)
}
//Make sure we use a valid devicepath to find mpio device.
@ -295,16 +296,16 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) error {
globalPDPath := b.manager.MakeGlobalPDName(*b.iscsiDisk)
notMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
if err != nil {
return fmt.Errorf("Heuristic determination of mount point failed:%v", err)
return "", fmt.Errorf("Heuristic determination of mount point failed:%v", err)
}
if !notMnt {
glog.Infof("iscsi: %s already mounted", globalPDPath)
return nil
return "", nil
}
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
glog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath)
return err
return "", err
}
// Persist iscsi disk config to json file for DetachDisk path
@ -327,7 +328,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) error {
glog.Errorf("iscsi: failed to mount iscsi volume %s [%s] to %s, error %v", devicePath, b.fsType, globalPDPath, err)
}
return err
return devicePath, err
}
func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
@ -336,6 +337,12 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
glog.Errorf("iscsi detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err)
return err
}
if pathExists, pathErr := volumeutil.PathExists(mntPath); pathErr != nil {
return fmt.Errorf("Error checking if path exists: %v", pathErr)
} else if !pathExists {
glog.Warningf("Warning: Unmount skipped because path does not exist: %v", mntPath)
return nil
}
if err = c.mounter.Unmount(mntPath); err != nil {
glog.Errorf("iscsi detach disk: failed to unmount: %s\nError: %v", mntPath, err)
return err
@ -350,12 +357,12 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
refCount, err := getDevicePrefixRefCount(c.mounter, prefix)
if err == nil && refCount == 0 {
var bkpPortal []string
var iqn, iface, initiatorName string
var volName, iqn, iface, initiatorName string
found := true
// load iscsi disk config from json file
if err := util.loadISCSI(c.iscsiDisk, mntPath); err == nil {
bkpPortal, iqn, iface = c.iscsiDisk.Portals, c.iscsiDisk.Iqn, c.iscsiDisk.Iface
bkpPortal, iqn, iface, volName = c.iscsiDisk.Portals, c.iscsiDisk.Iqn, c.iscsiDisk.Iface, c.iscsiDisk.VolName
initiatorName = c.iscsiDisk.InitiatorName
} else {
// If the iscsi disk config is not found, fall back to the original behavior.
@ -397,7 +404,7 @@ func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
}
// Delete the iface after all sessions have logged out
// If the iface is not created via iscsi plugin, skip to delete
if initiatorName != "" && found && iface == (portals[0]+":"+c.volName) {
if initiatorName != "" && found && iface == (portals[0]+":"+volName) {
delete := []string{"-m", "iface", "-I", iface, "-o", "delete"}
out, err := c.exec.Run("iscsiadm", delete...)
if err != nil {

0
pkg/volume/iscsi/iscsi_util_test.go Executable file → Normal file
View File

View File

@ -25,6 +25,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/photon"
"k8s.io/kubernetes/pkg/util/mount"
@ -121,7 +122,7 @@ func (attacher *photonPersistentDiskAttacher) VolumesAreAttached(specs []*volume
return volumesAttachedCheck, nil
}
func (attacher *photonPersistentDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
func (attacher *photonPersistentDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
glog.Errorf("Photon Controller attacher: WaitForAttach failed to get volume source")

View File

@ -435,7 +435,7 @@ func (fv *FakeVolume) GetAttachCallCount() int {
return fv.AttachCallCount
}
func (fv *FakeVolume) WaitForAttach(spec *Spec, devicePath string, spectimeout time.Duration) (string, error) {
func (fv *FakeVolume) WaitForAttach(spec *Spec, devicePath string, pod *v1.Pod, spectimeout time.Duration) (string, error) {
fv.Lock()
defer fv.Unlock()
fv.WaitForAttachCallCount++

View File

@ -408,7 +408,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
glog.Infof(volumeToMount.GenerateMsgDetailed("MountVolume.WaitForAttach entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath)))
devicePath, err := volumeAttacher.WaitForAttach(
volumeToMount.VolumeSpec, volumeToMount.DevicePath, waitForAttachTimeout)
volumeToMount.VolumeSpec, volumeToMount.DevicePath, volumeToMount.Pod, waitForAttachTimeout)
if err != nil {
// On failure, return error. Caller will log and retry.
return volumeToMount.GenerateErrorDetailed("MountVolume.WaitForAttach failed", err)

View File

@ -173,7 +173,7 @@ type Attacher interface {
// node. If it successfully attaches, the path to the device
// is returned. Otherwise, if the device does not attach after
// the given timeout period, an error will be returned.
WaitForAttach(spec *Spec, devicePath string, timeout time.Duration) (string, error)
WaitForAttach(spec *Spec, devicePath string, pod *v1.Pod, timeout time.Duration) (string, error)
// GetDeviceMountPath returns a path where the device should
// be mounted after it is attached. This is a global mount

View File

@ -23,6 +23,7 @@ import (
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/util/keymutex"
@ -118,7 +119,7 @@ func (attacher *vsphereVMDKAttacher) VolumesAreAttached(specs []*volume.Spec, no
return volumesAttachedCheck, nil
}
func (attacher *vsphereVMDKAttacher) WaitForAttach(spec *volume.Spec, devicePath string, timeout time.Duration) (string, error) {
func (attacher *vsphereVMDKAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err