mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
Merge pull request #102775 from jsafrane/cleanup-multivolume
Cleanup multivolume tests
This commit is contained in:
commit
b57263b324
@ -345,6 +345,12 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
|
|||||||
if pattern.SnapshotType == "" {
|
if pattern.SnapshotType == "" {
|
||||||
e2eskipper.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
|
e2eskipper.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
|
if pattern.VolMode == v1.PersistentVolumeBlock {
|
||||||
|
// TODO: refactor prepareSnapshotDataSourceForProvisioning() below to use
|
||||||
|
// utils.CheckWriteToPath / utils.CheckReadFromPath and remove
|
||||||
|
// redundant InjectContent(). This will enable block volume tests.
|
||||||
|
e2eskipper.Skipf("This test does not support block volumes -- skipping")
|
||||||
|
}
|
||||||
|
|
||||||
// Create a volume
|
// Create a volume
|
||||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||||
@ -353,13 +359,14 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
|
|||||||
pvcs := []*v1.PersistentVolumeClaim{resource.Pvc}
|
pvcs := []*v1.PersistentVolumeClaim{resource.Pvc}
|
||||||
|
|
||||||
// Create snapshot of it
|
// Create snapshot of it
|
||||||
|
expectedContent := fmt.Sprintf("volume content %d", time.Now().UTC().UnixNano())
|
||||||
sDriver, ok := driver.(storageframework.SnapshottableTestDriver)
|
sDriver, ok := driver.(storageframework.SnapshottableTestDriver)
|
||||||
if !ok {
|
if !ok {
|
||||||
framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
|
framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
|
||||||
}
|
}
|
||||||
testConfig := storageframework.ConvertTestConfig(l.config)
|
testConfig := storageframework.ConvertTestConfig(l.config)
|
||||||
dc := l.config.Framework.DynamicClient
|
dc := l.config.Framework.DynamicClient
|
||||||
dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(f, testConfig, l.config, pattern, l.cs, dc, resource.Pvc, resource.Sc, sDriver, pattern.VolMode, "injected content")
|
dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(f, testConfig, l.config, pattern, l.cs, dc, resource.Pvc, resource.Sc, sDriver, pattern.VolMode, expectedContent)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
// Create 2nd PVC for testing
|
// Create 2nd PVC for testing
|
||||||
@ -381,8 +388,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// Test access to both volumes on the same node.
|
// Test access to both volumes on the same node.
|
||||||
TestConcurrentAccessToRelatedVolumes(l.config.Framework, l.cs, l.ns.Name,
|
TestConcurrentAccessToRelatedVolumes(l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, expectedContent)
|
||||||
l.config.ClientNodeSelection, pvcs, true /* sameNode */, false /* readOnly */)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// This tests below configuration:
|
// This tests below configuration:
|
||||||
@ -397,14 +403,21 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
|
|||||||
if !l.driver.GetDriverInfo().Capabilities[storageframework.CapPVCDataSource] {
|
if !l.driver.GetDriverInfo().Capabilities[storageframework.CapPVCDataSource] {
|
||||||
e2eskipper.Skipf("Driver %q does not support volume clone - skipping", dInfo.Name)
|
e2eskipper.Skipf("Driver %q does not support volume clone - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
|
if pattern.VolMode == v1.PersistentVolumeBlock {
|
||||||
|
// TODO: refactor preparePVCDataSourceForProvisioning() below to use
|
||||||
|
// utils.CheckWriteToPath / utils.CheckReadFromPath and remove
|
||||||
|
// redundant InjectContent(). This will enable block volume tests.
|
||||||
|
e2eskipper.Skipf("This test does not support block volumes -- skipping")
|
||||||
|
}
|
||||||
|
|
||||||
// Create a volume
|
// Create a volume
|
||||||
|
expectedContent := fmt.Sprintf("volume content %d", time.Now().UTC().UnixNano())
|
||||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||||
resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
resource := storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||||
l.resources = append(l.resources, resource)
|
l.resources = append(l.resources, resource)
|
||||||
pvcs := []*v1.PersistentVolumeClaim{resource.Pvc}
|
pvcs := []*v1.PersistentVolumeClaim{resource.Pvc}
|
||||||
testConfig := storageframework.ConvertTestConfig(l.config)
|
testConfig := storageframework.ConvertTestConfig(l.config)
|
||||||
dataSource, cleanupFunc := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, resource.Pvc, resource.Sc, pattern.VolMode, "injected content")
|
dataSource, cleanupFunc := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, resource.Pvc, resource.Sc, pattern.VolMode, expectedContent)
|
||||||
defer cleanupFunc()
|
defer cleanupFunc()
|
||||||
|
|
||||||
// Create 2nd PVC for testing
|
// Create 2nd PVC for testing
|
||||||
@ -426,8 +439,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
// Test access to both volumes on the same node.
|
// Test access to both volumes on the same node.
|
||||||
TestConcurrentAccessToRelatedVolumes(l.config.Framework, l.cs, l.ns.Name,
|
TestConcurrentAccessToRelatedVolumes(l.config.Framework, l.cs, l.ns.Name, l.config.ClientNodeSelection, pvcs, expectedContent)
|
||||||
l.config.ClientNodeSelection, pvcs, true /* sameNode */, false /* readOnly */)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// This tests below configuration:
|
// This tests below configuration:
|
||||||
@ -696,8 +708,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
|||||||
// Each provided PVC is used by a single pod. The test ensures that volumes created from
|
// Each provided PVC is used by a single pod. The test ensures that volumes created from
|
||||||
// another volume (=clone) or volume snapshot can be used together with the original volume.
|
// another volume (=clone) or volume snapshot can be used together with the original volume.
|
||||||
func TestConcurrentAccessToRelatedVolumes(f *framework.Framework, cs clientset.Interface, ns string,
|
func TestConcurrentAccessToRelatedVolumes(f *framework.Framework, cs clientset.Interface, ns string,
|
||||||
node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, requiresSameNode bool,
|
node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, expectedContent string) {
|
||||||
readOnly bool) {
|
|
||||||
|
|
||||||
var pods []*v1.Pod
|
var pods []*v1.Pod
|
||||||
|
|
||||||
@ -710,7 +721,7 @@ func TestConcurrentAccessToRelatedVolumes(f *framework.Framework, cs clientset.I
|
|||||||
PVCs: []*v1.PersistentVolumeClaim{pvcs[i]},
|
PVCs: []*v1.PersistentVolumeClaim{pvcs[i]},
|
||||||
SeLinuxLabel: e2epod.GetLinuxLabel(),
|
SeLinuxLabel: e2epod.GetLinuxLabel(),
|
||||||
NodeSelection: node,
|
NodeSelection: node,
|
||||||
PVCsReadOnly: readOnly,
|
PVCsReadOnly: false,
|
||||||
ImageID: e2epod.GetTestImageID(imageutils.JessieDnsutils),
|
ImageID: e2epod.GetTestImageID(imageutils.JessieDnsutils),
|
||||||
}
|
}
|
||||||
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, f.Timeouts.PodStart)
|
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, f.Timeouts.PodStart)
|
||||||
@ -721,17 +732,19 @@ func TestConcurrentAccessToRelatedVolumes(f *framework.Framework, cs clientset.I
|
|||||||
pods = append(pods, pod)
|
pods = append(pods, pod)
|
||||||
actualNodeName := pod.Spec.NodeName
|
actualNodeName := pod.Spec.NodeName
|
||||||
|
|
||||||
// Set affinity depending on requiresSameNode
|
// Always run the subsequent pods on the same node.
|
||||||
if requiresSameNode {
|
e2epod.SetAffinity(&node, actualNodeName)
|
||||||
e2epod.SetAffinity(&node, actualNodeName)
|
|
||||||
} else {
|
|
||||||
e2epod.SetAntiAffinity(&node, actualNodeName)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the last pod and remove from slice of pods
|
// Check that all pods the same content
|
||||||
if len(pods) < len(pvcs) {
|
for i, pod := range pods {
|
||||||
framework.Failf("Number of pods shouldn't be less than %d, but got %d", len(pvcs), len(pods))
|
fileName := "/mnt/volume1/index.html"
|
||||||
|
index := i + 1
|
||||||
|
|
||||||
|
ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d has expected initial content", index))
|
||||||
|
commands := e2evolume.GenerateReadFileCmd(fileName)
|
||||||
|
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, expectedContent, time.Minute)
|
||||||
|
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user