mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Run volume cloning tests in the same topology segment
Some CSI drivers can't clone a volume into other topology segment (e.g. a cloud availability zone). The scheduler does not know about these restrictions and schedules pods with PVCs that clone a volume mostly randomly. Run all volume cloning tests in the same topology segment, if such segment is available and has at least one schedulable node.
This commit is contained in:
parent
3e0269ce6e
commit
057422504a
@ -60,6 +60,7 @@ type StorageClassTest struct {
|
||||
PvCheck func(claim *v1.PersistentVolumeClaim)
|
||||
VolumeMode v1.PersistentVolumeMode
|
||||
AllowVolumeExpansion bool
|
||||
NodeSelection e2epod.NodeSelection
|
||||
}
|
||||
|
||||
type provisioningTestSuite struct {
|
||||
@ -242,12 +243,20 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
init()
|
||||
defer cleanup()
|
||||
|
||||
if l.config.ClientNodeSelection.Name == "" {
|
||||
// Schedule all pods to the same topology segment (e.g. a cloud availability zone), some
|
||||
// drivers don't support cloning across them.
|
||||
if err := ensureTopologyRequirements(&l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
|
||||
framework.Failf("Error setting topology requirements: %v", err)
|
||||
}
|
||||
}
|
||||
testConfig := storageframework.ConvertTestConfig(l.config)
|
||||
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
|
||||
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
|
||||
defer dataSourceCleanup()
|
||||
|
||||
l.pvc.Spec.DataSource = dataSource
|
||||
l.testCase.NodeSelection = testConfig.ClientNodeSelection
|
||||
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
||||
ginkgo.By("checking whether the created volume has the pre-populated data")
|
||||
tests := []e2evolume.Test{
|
||||
@ -275,6 +284,13 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
init()
|
||||
defer cleanup()
|
||||
|
||||
if l.config.ClientNodeSelection.Name == "" {
|
||||
// Schedule all pods to the same topology segment (e.g. a cloud availability zone), some
|
||||
// drivers don't support cloning across them.
|
||||
if err := ensureTopologyRequirements(&l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
|
||||
framework.Failf("Error setting topology requirements: %v", err)
|
||||
}
|
||||
}
|
||||
testConfig := storageframework.ConvertTestConfig(l.config)
|
||||
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
|
||||
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
|
||||
@ -293,6 +309,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
myTestConfig.Prefix = fmt.Sprintf("%s-%d", myTestConfig.Prefix, i)
|
||||
|
||||
t := *l.testCase
|
||||
t.NodeSelection = testConfig.ClientNodeSelection
|
||||
t.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
||||
ginkgo.By(fmt.Sprintf("checking whether the created volume %d has the pre-populated data", i))
|
||||
tests := []e2evolume.Test{
|
||||
@ -390,8 +407,9 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
|
||||
if *class.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
|
||||
ginkgo.By(fmt.Sprintf("creating a pod referring to the class=%+v claim=%+v", class, claim))
|
||||
var podConfig *e2epod.Config = &e2epod.Config{
|
||||
NS: claim.Namespace,
|
||||
PVCs: []*v1.PersistentVolumeClaim{claim},
|
||||
NS: claim.Namespace,
|
||||
PVCs: []*v1.PersistentVolumeClaim{claim},
|
||||
NodeSelection: t.NodeSelection,
|
||||
}
|
||||
|
||||
var pod *v1.Pod
|
||||
|
Loading…
Reference in New Issue
Block a user