Merge pull request #91738 from jsafrane/fix-iscsi-metadata

iscsi: don't write json medata file when the volume is already mounted.
This commit is contained in:
Kubernetes Prow Robot 2020-07-04 16:06:48 -07:00 committed by GitHub
commit 9e70d6f805
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 84 additions and 4 deletions

View File

@ -491,6 +491,22 @@ func (util *ISCSIUtil) persistISCSI(b iscsiDiskMounter) error {
klog.Errorf("iscsi: failed to mkdir %s, error", globalPDPath)
return err
}
if b.volumeMode == v1.PersistentVolumeFilesystem {
notMnt, err := b.mounter.IsLikelyNotMountPoint(globalPDPath)
if err != nil {
return err
}
if !notMnt {
// The volume is already mounted, therefore the previous WaitForAttach must have
// persisted the volume metadata. In addition, the metadata is actually *inside*
// globalPDPath and we can't write it here, because it was shadowed by the volume
// mount.
klog.V(4).Infof("Skipping persistISCSI, the volume is already mounted at %s", globalPDPath)
return nil
}
}
// Persist iscsi disk config to json file for DetachDisk path
return util.persistISCSIFile(*(b.iscsiDisk), globalPDPath)
}

View File

@ -38,6 +38,7 @@ var (
type Config struct {
NS string
PVCs []*v1.PersistentVolumeClaim
PVCsReadOnly bool
InlineVolumeSources []*v1.VolumeSource
IsPrivileged bool
Command string
@ -224,7 +225,7 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
}
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: podConfig.PVCsReadOnly}}}
volumeIndex++
}
for _, src := range podConfig.InlineVolumeSources {

View File

@ -322,7 +322,35 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
// Test access to the volume from pods on different node
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */)
l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */, false /* readOnly */)
})
// This tests below configuration:
// [pod1] [pod2]
// [ node1 ]
// \ / <- same volume mode (read only)
// [volume1]
ginkgo.It("should concurrently access the single read-only volume from pods on the same node", func() {
init()
defer cleanup()
numPods := 2
if !l.driver.GetDriverInfo().Capabilities[CapMultiPODs] {
e2eskipper.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name)
}
// Create volume
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
resource := CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
l.resources = append(l.resources, resource)
// Initialize the volume with a filesystem - it's going to be mounted as read-only below.
initializeVolume(l.cs, l.ns.Name, resource.Pvc, l.config.ClientNodeSelection)
// Test access to the volume from pods on a single node
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
l.config.ClientNodeSelection, resource.Pvc, numPods, true /* sameNode */, true /* readOnly */)
})
// This tests below configuration:
@ -364,7 +392,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
// Test access to the volume from pods on different node
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
l.config.ClientNodeSelection, resource.Pvc, numPods, false /* sameNode */)
l.config.ClientNodeSelection, resource.Pvc, numPods, false /* sameNode */, false /* readOnly */)
})
}
@ -442,7 +470,8 @@ func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs cli
// pod deletion doesn't affect. Pods are deployed on the same node or different nodes depending on requiresSameNode.
// Read/write check are done across pod, by check reading both what pod{n-1} and pod{n} wrote from pod{n}.
func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Interface, ns string,
node e2epod.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool) {
node e2epod.NodeSelection, pvc *v1.PersistentVolumeClaim, numPods int, requiresSameNode bool,
readOnly bool) {
var pods []*v1.Pod
@ -455,6 +484,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
PVCs: []*v1.PersistentVolumeClaim{pvc},
SeLinuxLabel: e2epv.SELinuxLabel,
NodeSelection: node,
PVCsReadOnly: readOnly,
}
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, framework.PodStartTimeout)
defer func() {
@ -483,6 +513,11 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
if readOnly {
ginkgo.By("Skipping volume content checks, volume is read-only")
continue
}
if i != 0 {
ginkgo.By(fmt.Sprintf("From pod%d, checking if reading the data that pod%d write works properly", index, index-1))
// For 1st pod, no one has written data yet, so pass the read check
@ -514,6 +549,11 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
ginkgo.By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
if readOnly {
ginkgo.By("Skipping volume content checks, volume is read-only")
continue
}
if i == 0 {
// This time there should be data that last pod wrote, for 1st pod
ginkgo.By(fmt.Sprintf("From pod%d, rechecking if reading the data that last pod write works properly", index))
@ -585,3 +625,26 @@ func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, nodes *v1.N
return nil
}
// initializeVolume creates a filesystem on given volume, so it can be used as read-only later
func initializeVolume(cs clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock {
// Block volumes do not need to be initialized.
return
}
ginkgo.By(fmt.Sprintf("Initializing a filesystem on PVC %s", pvc.Name))
// Just create a pod with the volume as read-write. Kubernetes will create a filesystem there
// if it does not exist yet.
podConfig := e2epod.Config{
NS: ns,
PVCs: []*v1.PersistentVolumeClaim{pvc},
SeLinuxLabel: e2epv.SELinuxLabel,
NodeSelection: node,
}
pod, err := e2epod.CreateSecPod(cs, &podConfig, framework.PodStartTimeout)
defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
}()
framework.ExpectNoError(err)
}