mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Merge pull request #105217 from dbgoytia/refactor/102787-enable-block-tests
Refactoring TestConcurrentAccessToRelatedVolumes for enabling Block Volume tests
This commit is contained in:
commit
4af19756bd
@ -454,7 +454,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsTy
|
|||||||
if test.Mode == v1.PersistentVolumeBlock {
|
if test.Mode == v1.PersistentVolumeBlock {
|
||||||
// Block: check content
|
// Block: check content
|
||||||
deviceName := fmt.Sprintf("/opt/%d", i)
|
deviceName := fmt.Sprintf("/opt/%d", i)
|
||||||
commands := generateReadBlockCmd(deviceName, len(test.ExpectedContent))
|
commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent))
|
||||||
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
|
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, test.ExpectedContent, time.Minute)
|
||||||
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
|
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
|
||||||
|
|
||||||
@ -573,7 +573,7 @@ func generateWriteCmd(content, path string) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// generateReadBlockCmd generates the corresponding command lines to read from a block device with the given file path.
|
// generateReadBlockCmd generates the corresponding command lines to read from a block device with the given file path.
|
||||||
func generateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
|
func GenerateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
|
||||||
var commands []string
|
var commands []string
|
||||||
commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
|
commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
|
||||||
return commands
|
return commands
|
||||||
|
@ -328,12 +328,6 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
|
|||||||
if pattern.SnapshotType == "" {
|
if pattern.SnapshotType == "" {
|
||||||
e2eskipper.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
|
e2eskipper.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
if pattern.VolMode == v1.PersistentVolumeBlock {
|
|
||||||
// TODO: refactor prepareSnapshotDataSourceForProvisioning() below to use
|
|
||||||
// utils.CheckWriteToPath / utils.CheckReadFromPath and remove
|
|
||||||
// redundant InjectContent(). This will enable block volume tests.
|
|
||||||
e2eskipper.Skipf("This test does not support block volumes -- skipping")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a volume
|
// Create a volume
|
||||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||||
@ -386,12 +380,6 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
|
|||||||
if !l.driver.GetDriverInfo().Capabilities[storageframework.CapPVCDataSource] {
|
if !l.driver.GetDriverInfo().Capabilities[storageframework.CapPVCDataSource] {
|
||||||
e2eskipper.Skipf("Driver %q does not support volume clone - skipping", dInfo.Name)
|
e2eskipper.Skipf("Driver %q does not support volume clone - skipping", dInfo.Name)
|
||||||
}
|
}
|
||||||
if pattern.VolMode == v1.PersistentVolumeBlock {
|
|
||||||
// TODO: refactor preparePVCDataSourceForProvisioning() below to use
|
|
||||||
// utils.CheckWriteToPath / utils.CheckReadFromPath and remove
|
|
||||||
// redundant InjectContent(). This will enable block volume tests.
|
|
||||||
e2eskipper.Skipf("This test does not support block volumes -- skipping")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a volume
|
// Create a volume
|
||||||
expectedContent := fmt.Sprintf("volume content %d", time.Now().UTC().UnixNano())
|
expectedContent := fmt.Sprintf("volume content %d", time.Now().UTC().UnixNano())
|
||||||
@ -711,17 +699,28 @@ func TestConcurrentAccessToRelatedVolumes(f *framework.Framework, cs clientset.I
|
|||||||
e2epod.SetAffinity(&node, actualNodeName)
|
e2epod.SetAffinity(&node, actualNodeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that all pods the same content
|
for i, pvc := range pvcs {
|
||||||
for i, pod := range pods {
|
var commands []string
|
||||||
fileName := "/mnt/volume1/index.html"
|
|
||||||
index := i + 1
|
|
||||||
|
|
||||||
|
if *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock {
|
||||||
|
fileName := "/mnt/volume1"
|
||||||
|
commands = e2evolume.GenerateReadBlockCmd(fileName, len(expectedContent))
|
||||||
|
// Check that all pods have the same content
|
||||||
|
index := i + 1
|
||||||
ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d has expected initial content", index))
|
ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d has expected initial content", index))
|
||||||
commands := e2evolume.GenerateReadFileCmd(fileName)
|
_, err := framework.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute)
|
||||||
_, err := framework.LookForStringInPodExec(pod.Namespace, pod.Name, commands, expectedContent, time.Minute)
|
framework.ExpectNoError(err, "failed: finding the contents of the block volume %s.", fileName)
|
||||||
|
} else {
|
||||||
|
fileName := "/mnt/volume1/index.html"
|
||||||
|
commands = e2evolume.GenerateReadFileCmd(fileName)
|
||||||
|
// Check that all pods have the same content
|
||||||
|
index := i + 1
|
||||||
|
ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d has expected initial content", index))
|
||||||
|
_, err := framework.LookForStringInPodExec(pods[i].Namespace, pods[i].Name, commands, expectedContent, time.Minute)
|
||||||
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
|
framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// getCurrentTopologies() goes through all Nodes and returns unique driver topologies and count of Nodes per topology
|
// getCurrentTopologies() goes through all Nodes and returns unique driver topologies and count of Nodes per topology
|
||||||
func getCurrentTopologiesNumber(cs clientset.Interface, nodes *v1.NodeList, keys []string) ([]topology, []int, error) {
|
func getCurrentTopologiesNumber(cs clientset.Interface, nodes *v1.NodeList, keys []string) ([]topology, []int, error) {
|
||||||
|
Loading…
Reference in New Issue
Block a user