Add e2e tests for CSI PVCDataSources

address unit test comments
This commit is contained in:
j-griffith 2019-07-11 21:26:27 -06:00
parent 32dc42ed34
commit 47facf91b9
4 changed files with 85 additions and 17 deletions

View File

@ -87,8 +87,9 @@ var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{}
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
func InitHostPathCSIDriver() testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath",
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true,
testsuites.CapMultiPODs: true, testsuites.CapBlock: true},
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapSnapshotDataSource: true,
testsuites.CapMultiPODs: true, testsuites.CapBlock: true,
testsuites.CapPVCDataSource: true},
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",

View File

@ -44,6 +44,7 @@ import (
type StorageClassTest struct {
Client clientset.Interface
Claim *v1.PersistentVolumeClaim
SourceClaim *v1.PersistentVolumeClaim
Class *storagev1.StorageClass
Name string
CloudProviders []string
@ -86,10 +87,11 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
config *PerTestConfig
testCleanup func()
testCase *StorageClassTest
cs clientset.Interface
pvc *v1.PersistentVolumeClaim
sc *storagev1.StorageClass
testCase *StorageClassTest
cs clientset.Interface
pvc *v1.PersistentVolumeClaim
sourcePVC *v1.PersistentVolumeClaim
sc *storagev1.StorageClass
intreeOps opCounts
migratedOps opCounts
@ -134,10 +136,15 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
ClaimSize: claimSize,
StorageClassName: &(l.sc.Name),
}, l.config.Framework.Namespace.Name)
e2elog.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.sc, l.pvc)
l.sourcePVC = framework.MakePersistentVolumeClaim(framework.PersistentVolumeClaimConfig{
ClaimSize: claimSize,
StorageClassName: &(l.sc.Name),
}, l.config.Framework.Namespace.Name)
e2elog.Logf("In creating storage class object and pvc objects for driver - sc: %v, pvc: %v, src-pvc: %v", l.sc, l.pvc, l.sourcePVC)
l.testCase = &StorageClassTest{
Client: l.config.Framework.ClientSet,
Claim: l.pvc,
SourceClaim: l.sourcePVC,
Class: l.sc,
ClaimSize: claimSize,
ExpectedSize: claimSize,
@ -169,13 +176,13 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
})
ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
if !dInfo.Capabilities[CapDataSource] {
if !dInfo.Capabilities[CapSnapshotDataSource] {
framework.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name)
}
sDriver, ok := driver.(SnapshottableTestDriver)
if !ok {
e2elog.Failf("Driver %q has CapDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
e2elog.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
}
init()
@ -183,7 +190,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
dc := l.config.Framework.DynamicClient
vsc := sDriver.GetSnapshotClass(l.config)
dataSource, cleanupFunc := prepareDataSourceForProvisioning(framework.NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.pvc, l.sc, vsc)
dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(framework.NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.pvc, l.sc, vsc)
defer cleanupFunc()
l.pvc.Spec.DataSource = dataSource
@ -194,6 +201,26 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
}
l.testCase.TestDynamicProvisioning()
})
ginkgo.It("should provision storage with pvc data source [Feature:VolumePVCDataSource]", func() {
if !dInfo.Capabilities[CapPVCDataSource] {
framework.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
}
init()
defer cleanup()
dc := l.config.Framework.DynamicClient
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(framework.NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.sourcePVC, l.sc)
defer dataSourceCleanup()
l.pvc.Spec.DataSource = dataSource
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
ginkgo.By("checking whether the created volume has the pre-populated data")
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-datasource-tester", command, framework.NodeSelection{Name: l.config.ClientNodeName})
}
l.testCase.TestDynamicProvisioning()
})
}
// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest
@ -581,7 +608,7 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl
}
}
func prepareDataSourceForProvisioning(
func prepareSnapshotDataSourceForProvisioning(
node framework.NodeSelection,
client clientset.Interface,
dynamicClient dynamic.Interface,
@ -653,3 +680,42 @@ func prepareDataSourceForProvisioning(
return dataSourceRef, cleanupFunc
}
func preparePVCDataSourceForProvisioning(
node framework.NodeSelection,
client clientset.Interface,
dynamicClient dynamic.Interface,
source *v1.PersistentVolumeClaim,
class *storagev1.StorageClass,
) (*v1.TypedLocalObjectReference, func()) {
var err error
if class != nil {
ginkgo.By("[Initialize dataSource]creating a StorageClass " + class.Name)
_, err = client.StorageV1().StorageClasses().Create(class)
framework.ExpectNoError(err)
}
ginkgo.By("[Initialize dataSource]creating a source PVC")
sourcePVC, err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(source)
framework.ExpectNoError(err)
// write namespace to the /mnt/test (= the volume).
ginkgo.By("[Initialize dataSource]write data to volume")
command := fmt.Sprintf("echo '%s' > /mnt/test/initialData", sourcePVC.GetNamespace())
RunInPodWithVolume(client, sourcePVC.Namespace, sourcePVC.Name, "pvc-datasource-writer", command, node)
dataSourceRef := &v1.TypedLocalObjectReference{
Kind: "PersistentVolumeClaim",
Name: sourcePVC.GetName(),
}
cleanupFunc := func() {
e2elog.Logf("deleting source PVC %q/%q", sourcePVC.Namespace, sourcePVC.Name)
err = client.CoreV1().PersistentVolumeClaims(sourcePVC.Namespace).Delete(sourcePVC.Name, nil)
if err != nil && !apierrs.IsNotFound(err) {
e2elog.Failf("Error deleting source PVC %q. Error: %v", sourcePVC.Name, err)
}
}
return dataSourceRef, cleanupFunc
}

View File

@ -78,7 +78,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt
dInfo := driver.GetDriverInfo()
ok := false
sDriver, ok = driver.(SnapshottableTestDriver)
if !dInfo.Capabilities[CapDataSource] || !ok {
if !dInfo.Capabilities[CapSnapshotDataSource] || !ok {
framework.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
}
dDriver, ok = driver.(DynamicPVTestDriver)

View File

@ -131,11 +131,12 @@ type Capability string
// Constants related to capability
const (
CapPersistence Capability = "persistence" // data is persisted across pod restarts
CapBlock Capability = "block" // raw block mode
CapFsGroup Capability = "fsGroup" // volume ownership via fsGroup
CapExec Capability = "exec" // exec a file in the volume
CapDataSource Capability = "dataSource" // support populate data from snapshot
CapPersistence Capability = "persistence" // data is persisted across pod restarts
CapBlock Capability = "block" // raw block mode
CapFsGroup Capability = "fsGroup" // volume ownership via fsGroup
CapExec Capability = "exec" // exec a file in the volume
CapSnapshotDataSource Capability = "snapshotDataSource" // support populate data from snapshot
CapPVCDataSource Capability = "pvcDataSource" // support populate data from pvc
// multiple pods on a node can use the same volume concurrently;
// for CSI, see: