Merge pull request #107065 from saikat-royc/fix-node-stage-path

change node staging path for csi driver to PV agnostic
This commit is contained in:
Kubernetes Prow Robot 2022-01-21 01:31:58 -08:00 committed by GitHub
commit c175418281
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 200 additions and 48 deletions

View File

@ -629,15 +629,29 @@ func getAttachmentName(volName, csiDriverName, nodeName string) string {
func makeDeviceMountPath(plugin *csiPlugin, spec *volume.Spec) (string, error) {
if spec == nil {
return "", errors.New(log("makeDeviceMountPath failed, spec is nil"))
return "", errors.New("makeDeviceMountPath failed, spec is nil")
}
pvName := spec.PersistentVolume.Name
if pvName == "" {
return "", errors.New(log("makeDeviceMountPath failed, pv name empty"))
driver, err := GetCSIDriverName(spec)
if err != nil {
return "", err
}
if driver == "" {
return "", errors.New("makeDeviceMountPath failed, csi source driver name is empty")
}
return filepath.Join(plugin.host.GetPluginDir(plugin.GetPluginName()), persistentVolumeInGlobalPath, pvName, globalMountInGlobalPath), nil
csiSource, err := getPVSourceFromSpec(spec)
if err != nil {
return "", errors.New(log("makeDeviceMountPath failed to get CSIPersistentVolumeSource: %v", err))
}
if csiSource.VolumeHandle == "" {
return "", errors.New("makeDeviceMountPath failed, CSIPersistentVolumeSource volume handle is empty")
}
result := sha256.Sum256([]byte(fmt.Sprintf("%s", csiSource.VolumeHandle)))
volSha := fmt.Sprintf("%x", result)
return filepath.Join(plugin.host.GetPluginDir(plugin.GetPluginName()), driver, volSha, globalMountInGlobalPath), nil
}
func getDriverAndVolNameFromDeviceMountPath(k8s kubernetes.Interface, deviceMountPath string) (string, string, error) {

View File

@ -18,6 +18,7 @@ package csi
import (
"context"
"crypto/sha256"
"fmt"
"io/ioutil"
"os"
@ -1022,23 +1023,40 @@ func TestAttacherGetDeviceMountPath(t *testing.T) {
csiAttacher := getCsiAttacherFromVolumeAttacher(attacher, testWatchTimeout)
pluginDir := csiAttacher.plugin.host.GetPluginDir(plug.GetPluginName())
testCases := []struct {
testName string
pvName string
expectedMountPath string
shouldFail bool
testName string
pvName string
volumeId string
skipPVCSISource bool // The test clears PV.Spec.CSI
shouldFail bool
addVolSource bool // The test adds a Volume.VolumeSource.CSI.
removeVolumeHandle bool // The test force removes CSI volume handle.
}{
{
testName: "normal test",
pvName: "test-pv1",
expectedMountPath: pluginDir + "/pv/test-pv1/globalmount",
testName: "success test",
pvName: "test-pv1",
volumeId: "test-vol1",
},
{
testName: "no pv name",
pvName: "",
expectedMountPath: pluginDir + "/pv/test-pv1/globalmount",
shouldFail: true,
testName: "fail test, failed to create device mount path due to missing CSI source",
pvName: "test-pv1",
volumeId: "test-vol1",
skipPVCSISource: true,
shouldFail: true,
},
{
testName: "fail test, failed to create device mount path, CSIVolumeSource found",
pvName: "test-pv1",
volumeId: "test-vol1",
addVolSource: true,
shouldFail: true,
},
{
testName: "fail test, failed to create device mount path, missing CSI volume handle",
pvName: "test-pv1",
volumeId: "test-vol1",
shouldFail: true,
removeVolumeHandle: true,
},
}
@ -1047,9 +1065,18 @@ func TestAttacherGetDeviceMountPath(t *testing.T) {
var spec *volume.Spec
// Create spec
pv := makeTestPV(tc.pvName, 10, testDriver, "testvol")
spec = volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
pv := makeTestPV(tc.pvName, 10, testDriver, tc.volumeId)
if tc.removeVolumeHandle {
pv.Spec.PersistentVolumeSource.CSI.VolumeHandle = ""
}
if tc.addVolSource {
spec = volume.NewSpecFromVolume(makeTestVol(tc.pvName, testDriver))
} else {
spec = volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
if tc.skipPVCSISource {
spec.PersistentVolume.Spec.CSI = nil
}
}
// Run
mountPath, err := csiAttacher.GetDeviceMountPath(spec)
@ -1057,10 +1084,11 @@ func TestAttacherGetDeviceMountPath(t *testing.T) {
if err != nil && !tc.shouldFail {
t.Errorf("test should not fail, but error occurred: %v", err)
} else if err == nil {
expectedMountPath := filepath.Join(pluginDir, testDriver, generateSha(tc.volumeId), globalMountInGlobalPath)
if tc.shouldFail {
t.Errorf("test should fail, but no error occurred")
} else if mountPath != tc.expectedMountPath {
t.Errorf("mountPath does not equal expectedMountPath. Got: %s. Expected: %s", mountPath, tc.expectedMountPath)
} else if mountPath != expectedMountPath {
t.Errorf("mountPath does not equal expectedMountPath. Got: %s. Expected: %s", mountPath, expectedMountPath)
}
}
}
@ -1572,53 +1600,76 @@ func TestAttacherUnmountDevice(t *testing.T) {
shouldFail bool
watchTimeout time.Duration
}{
// PV agnostic path positive test cases
{
testName: "normal, json file exists",
testName: "success, json file exists",
volID: "project/zone/test-vol1",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
deviceMountPath: "plugins/csi/" + generateSha("project/zone/test-vol1") + "/globalmount",
jsonFile: `{"driverName": "csi", "volumeHandle":"project/zone/test-vol1"}`,
createPV: false,
stageUnstageSet: true,
},
{
testName: "normal, json file doesn't exist -> use PV",
testName: "stage_unstage not set, PV agnostic path, unmount device is skipped",
deviceMountPath: "plugins/csi/" + generateSha("project/zone/test-vol1") + "/globalmount",
jsonFile: `{"driverName":"test-driver","volumeHandle":"test-vol1"}`,
stageUnstageSet: false,
},
// PV agnostic path negative test cases
{
testName: "fail: missing json, fail to retrieve driver and volumeID from globalpath",
volID: "project/zone/test-vol1",
deviceMountPath: "plugins/csi/" + generateSha("project/zone/test-vol1") + "/globalmount",
jsonFile: "",
stageUnstageSet: true,
shouldFail: true,
},
{
testName: "fail: invalid json, fail to retrieve driver and volumeID from globalpath",
volID: "project/zone/test-vol1",
deviceMountPath: "plugins/csi/" + generateSha("project/zone/test-vol1") + "/globalmount",
jsonFile: `{"driverName"}}`,
stageUnstageSet: true,
shouldFail: true,
},
// Old style PV based path positive test cases
{
testName: "success: json file doesn't exist, old style pv based global path -> use PV",
volID: "project/zone/test-vol1",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: "",
createPV: true,
stageUnstageSet: true,
createPV: true,
},
{
testName: "invalid json -> use PV",
testName: "success: invalid json file, old style pv based global path -> use PV",
volID: "project/zone/test-vol1",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: `{"driverName"}}`,
createPV: true,
stageUnstageSet: true,
},
{
testName: "no json, no PV.volID",
volID: "",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: "",
createPV: true,
shouldFail: true,
},
{
testName: "no json, no PV",
volID: "project/zone/test-vol1",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: "",
createPV: false,
stageUnstageSet: true,
shouldFail: true,
},
{
testName: "stage_unstage not set no vars should not fail",
testName: "stage_unstage not set, PV based path, unmount device is skipped",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: `{"driverName":"test-driver","volumeHandle":"test-vol1"}`,
stageUnstageSet: false,
},
// Old style PV based path negative test cases
{
testName: "fail: json file doesn't exist, old style pv based device path, missing PV",
volID: "project/zone/test-vol1",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: "",
stageUnstageSet: true,
shouldFail: true,
},
{
testName: "fail: no json, no PV.volID, old style pv based global path",
volID: "",
deviceMountPath: "plugins/csi/pv/test-pv-name/globalmount",
jsonFile: "",
shouldFail: true,
},
}
for _, tc := range testCases {
@ -1639,18 +1690,18 @@ func TestAttacherUnmountDevice(t *testing.T) {
if tc.deviceMountPath != "" {
tc.deviceMountPath = filepath.Join(tmpDir, tc.deviceMountPath)
}
// Add the volume to NodeStagedVolumes
cdc := csiAttacher.csiClient.(*fakeCsiDriverClient)
cdc.nodeClient.AddNodeStagedVolume(tc.volID, tc.deviceMountPath, nil)
// Make JSON for this object
// Make the device staged path
if tc.deviceMountPath != "" {
if err := os.MkdirAll(tc.deviceMountPath, 0755); err != nil {
t.Fatalf("error creating directory %s: %s", tc.deviceMountPath, err)
}
}
dir := filepath.Dir(tc.deviceMountPath)
// Make JSON for this object
if tc.jsonFile != "" {
dataPath := filepath.Join(dir, volDataFileName)
if err := ioutil.WriteFile(dataPath, []byte(tc.jsonFile), 0644); err != nil {
@ -1748,3 +1799,8 @@ func getCsiAttacherFromDeviceUnmounter(deviceUnmounter volume.DeviceUnmounter, w
csiAttacher.watchTimeout = watchTimeout
return csiAttacher
}
func generateSha(handle string) string {
result := sha256.Sum256([]byte(fmt.Sprintf("%s", handle)))
return fmt.Sprintf("%x", result)
}

View File

@ -333,6 +333,26 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
}
wg.Wait()
})
ginkgo.It("should mount multiple PV pointing to the same storage on the same node", func() {
// csi-hostpath driver does not support this test case. In this test case, we have 2 PV containing the same underlying storage.
// during the NodeStage call for the second volume, csi-hostpath fails the call, because it thinks the volume is already staged at a different path.
// Note: This is not an issue with driver like PD CSI where the NodeStage is a no-op for block mode.
if pattern.VolMode == v1.PersistentVolumeBlock {
e2eskipper.Skipf("skipping multiple PV mount test for block mode")
}
init()
defer cleanup()
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
MultiplePVMountSingleNodeCheck(l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
}
_, clearProvisionedStorageClass := SetupStorageClass(l.testCase.Client, l.testCase.Class)
defer clearProvisionedStorageClass()
l.testCase.TestDynamicProvisioning()
})
}
// SetupStorageClass ensures that a StorageClass from a spec exists, if the StorageClass already exists
@ -951,3 +971,65 @@ func preparePVCDataSourceForProvisioning(
return dataSourceRef, cleanupFunc
}
// MultiplePVMountSingleNodeCheck checks that multiple PV pointing to the same underlying storage can be mounted simultaneously on a single node.
//
// Steps:
// - Start Pod1 using PVC1, PV1 (which points to a underlying volume v) on node N1.
// - Create PVC2, PV2 and prebind them. PV2 points to the same underlying volume v.
// - Start Pod2 using PVC2, PV2 (which points to a underlying volume v) on node N1.
func MultiplePVMountSingleNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
pod1Config := e2epod.Config{
NS: claim.Namespace,
NodeSelection: node,
PVCs: []*v1.PersistentVolumeClaim{claim},
}
pod1, err := e2epod.CreateSecPodWithNodeSelection(client, &pod1Config, timeouts.PodStart)
framework.ExpectNoError(err)
defer func() {
ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod1.Namespace, pod1.Name))
framework.ExpectNoError(e2epod.DeletePodWithWait(client, pod1))
}()
ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod1.Namespace, pod1.Name, pod1.Spec.NodeName))
// Create new PV which points to the same underlying storage. Retain policy is used so that deletion of second PVC does not trigger the deletion of its bound PV and underlying storage.
e2evolume, err := getBoundPV(client, claim)
framework.ExpectNoError(err)
pv2Config := e2epv.PersistentVolumeConfig{
NamePrefix: fmt.Sprintf("%s-", "pv"),
StorageClassName: *claim.Spec.StorageClassName,
PVSource: e2evolume.Spec.PersistentVolumeSource,
AccessModes: e2evolume.Spec.AccessModes,
VolumeMode: e2evolume.Spec.VolumeMode,
ReclaimPolicy: v1.PersistentVolumeReclaimRetain,
}
pvc2Config := e2epv.PersistentVolumeClaimConfig{
NamePrefix: fmt.Sprintf("%s-", "pvc"),
StorageClassName: &claim.Namespace,
AccessModes: e2evolume.Spec.AccessModes,
VolumeMode: e2evolume.Spec.VolumeMode,
}
pv2, pvc2, err := e2epv.CreatePVCPV(client, timeouts, pv2Config, pvc2Config, claim.Namespace, true)
framework.ExpectNoError(err, "PVC, PV creation failed")
framework.Logf("Created PVC %s/%s and PV %s in namespace %s", pvc2.Namespace, pvc2.Name, pv2.Name)
pod2Config := e2epod.Config{
NS: pvc2.Namespace,
NodeSelection: e2epod.NodeSelection{Name: pod1.Spec.NodeName},
PVCs: []*v1.PersistentVolumeClaim{pvc2},
}
pod2, err := e2epod.CreateSecPodWithNodeSelection(client, &pod2Config, timeouts.PodStart)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod2.Namespace, pod2.Name, pod2.Spec.NodeName))
ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod2.Namespace, pod2.Name))
framework.ExpectNoError(e2epod.DeletePodWithWait(client, pod2))
err = e2epv.DeletePersistentVolumeClaim(client, pvc2.Name, pvc2.Namespace)
framework.ExpectNoError(err, "Failed to delete PVC: %s/%s", pvc2.Namespace, pvc2.Name)
err = e2epv.DeletePersistentVolume(client, pv2.Name)
framework.ExpectNoError(err, "Failed to delete PV: %s", pv2.Name)
}