mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #102918 from jsafrane/cloning-topology
Run volume cloning tests in the same topology segment
This commit is contained in:
commit
cda98f20b2
@ -177,20 +177,11 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
|
||||
if l.driver.GetDriverInfo().Capabilities[storageframework.CapSingleNodeVolume] {
|
||||
e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, storageframework.CapSingleNodeVolume)
|
||||
}
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(l.cs)
|
||||
framework.ExpectNoError(err)
|
||||
if len(nodes.Items) < 2 {
|
||||
e2eskipper.Skipf("Number of available nodes is less than 2 - skipping")
|
||||
}
|
||||
if l.config.ClientNodeSelection.Name != "" {
|
||||
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
||||
}
|
||||
// For multi-node tests there must be enough nodes with the same toopology to schedule the pods
|
||||
topologyKeys := dInfo.TopologyKeys
|
||||
if len(topologyKeys) != 0 {
|
||||
if err = ensureTopologyRequirements(&l.config.ClientNodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
|
||||
framework.Failf("Error setting topology requirements: %v", err)
|
||||
}
|
||||
if err := ensureTopologyRequirements(&l.config.ClientNodeSelection, l.cs, dInfo, 2); err != nil {
|
||||
framework.Failf("Error setting topology requirements: %v", err)
|
||||
}
|
||||
|
||||
var pvcs []*v1.PersistentVolumeClaim
|
||||
@ -270,20 +261,11 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
|
||||
if l.driver.GetDriverInfo().Capabilities[storageframework.CapSingleNodeVolume] {
|
||||
e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, storageframework.CapSingleNodeVolume)
|
||||
}
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(l.cs)
|
||||
framework.ExpectNoError(err)
|
||||
if len(nodes.Items) < 2 {
|
||||
e2eskipper.Skipf("Number of available nodes is less than 2 - skipping")
|
||||
}
|
||||
if l.config.ClientNodeSelection.Name != "" {
|
||||
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
||||
}
|
||||
// For multi-node tests there must be enough nodes with the same toopology to schedule the pods
|
||||
topologyKeys := dInfo.TopologyKeys
|
||||
if len(topologyKeys) != 0 {
|
||||
if err = ensureTopologyRequirements(&l.config.ClientNodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
|
||||
framework.Failf("Error setting topology requirements: %v", err)
|
||||
}
|
||||
if err := ensureTopologyRequirements(&l.config.ClientNodeSelection, l.cs, dInfo, 2); err != nil {
|
||||
framework.Failf("Error setting topology requirements: %v", err)
|
||||
}
|
||||
|
||||
var pvcs []*v1.PersistentVolumeClaim
|
||||
@ -486,20 +468,12 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
|
||||
}
|
||||
|
||||
// Check different-node test requirement
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(l.cs)
|
||||
framework.ExpectNoError(err)
|
||||
if len(nodes.Items) < numPods {
|
||||
e2eskipper.Skipf(fmt.Sprintf("Number of available nodes is less than %d - skipping", numPods))
|
||||
}
|
||||
if l.config.ClientNodeSelection.Name != "" {
|
||||
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
||||
}
|
||||
// For multi-node tests there must be enough nodes with the same toopology to schedule the pods
|
||||
topologyKeys := dInfo.TopologyKeys
|
||||
if len(topologyKeys) != 0 {
|
||||
if err = ensureTopologyRequirements(&l.config.ClientNodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
|
||||
framework.Failf("Error setting topology requirements: %v", err)
|
||||
}
|
||||
if err := ensureTopologyRequirements(&l.config.ClientNodeSelection, l.cs, dInfo, 2); err != nil {
|
||||
framework.Failf("Error setting topology requirements: %v", err)
|
||||
}
|
||||
|
||||
// Create volume
|
||||
@ -780,8 +754,21 @@ func getCurrentTopologiesNumber(cs clientset.Interface, nodes *v1.NodeList, keys
|
||||
return topos, topoCount, nil
|
||||
}
|
||||
|
||||
// ensureTopologyRequirements sets nodeSelection affinity according to given topology keys for drivers that provide them
|
||||
func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, nodes *v1.NodeList, cs clientset.Interface, topologyKeys []string, minCount int) error {
|
||||
// ensureTopologyRequirements check that there are enough nodes in the cluster for a test and
|
||||
// sets nodeSelection affinity according to given topology keys for drivers that provide them.
|
||||
func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, cs clientset.Interface, driverInfo *storageframework.DriverInfo, minCount int) error {
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(cs)
|
||||
framework.ExpectNoError(err)
|
||||
if len(nodes.Items) < minCount {
|
||||
e2eskipper.Skipf(fmt.Sprintf("Number of available nodes is less than %d - skipping", minCount))
|
||||
}
|
||||
|
||||
topologyKeys := driverInfo.TopologyKeys
|
||||
if len(topologyKeys) == 0 {
|
||||
// The driver does not have any topology restrictions
|
||||
return nil
|
||||
}
|
||||
|
||||
topologyList, topologyCount, err := getCurrentTopologiesNumber(cs, nodes, topologyKeys)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -797,7 +784,6 @@ func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, nodes *v1.N
|
||||
}
|
||||
// Take the first suitable topology
|
||||
e2epod.SetNodeAffinityTopologyRequirement(nodeSelection, suitableTopologies[0])
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -60,6 +60,7 @@ type StorageClassTest struct {
|
||||
PvCheck func(claim *v1.PersistentVolumeClaim)
|
||||
VolumeMode v1.PersistentVolumeMode
|
||||
AllowVolumeExpansion bool
|
||||
NodeSelection e2epod.NodeSelection
|
||||
}
|
||||
|
||||
type provisioningTestSuite struct {
|
||||
@ -242,12 +243,20 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
init()
|
||||
defer cleanup()
|
||||
|
||||
if l.config.ClientNodeSelection.Name == "" {
|
||||
// Schedule all pods to the same topology segment (e.g. a cloud availability zone), some
|
||||
// drivers don't support cloning across them.
|
||||
if err := ensureTopologyRequirements(&l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
|
||||
framework.Failf("Error setting topology requirements: %v", err)
|
||||
}
|
||||
}
|
||||
testConfig := storageframework.ConvertTestConfig(l.config)
|
||||
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
|
||||
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
|
||||
defer dataSourceCleanup()
|
||||
|
||||
l.pvc.Spec.DataSource = dataSource
|
||||
l.testCase.NodeSelection = testConfig.ClientNodeSelection
|
||||
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
||||
ginkgo.By("checking whether the created volume has the pre-populated data")
|
||||
tests := []e2evolume.Test{
|
||||
@ -275,6 +284,13 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
init()
|
||||
defer cleanup()
|
||||
|
||||
if l.config.ClientNodeSelection.Name == "" {
|
||||
// Schedule all pods to the same topology segment (e.g. a cloud availability zone), some
|
||||
// drivers don't support cloning across them.
|
||||
if err := ensureTopologyRequirements(&l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
|
||||
framework.Failf("Error setting topology requirements: %v", err)
|
||||
}
|
||||
}
|
||||
testConfig := storageframework.ConvertTestConfig(l.config)
|
||||
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
|
||||
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
|
||||
@ -293,6 +309,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
myTestConfig.Prefix = fmt.Sprintf("%s-%d", myTestConfig.Prefix, i)
|
||||
|
||||
t := *l.testCase
|
||||
t.NodeSelection = testConfig.ClientNodeSelection
|
||||
t.PvCheck = func(claim *v1.PersistentVolumeClaim) {
|
||||
ginkgo.By(fmt.Sprintf("checking whether the created volume %d has the pre-populated data", i))
|
||||
tests := []e2evolume.Test{
|
||||
@ -390,8 +407,9 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
|
||||
if *class.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
|
||||
ginkgo.By(fmt.Sprintf("creating a pod referring to the class=%+v claim=%+v", class, claim))
|
||||
var podConfig *e2epod.Config = &e2epod.Config{
|
||||
NS: claim.Namespace,
|
||||
PVCs: []*v1.PersistentVolumeClaim{claim},
|
||||
NS: claim.Namespace,
|
||||
PVCs: []*v1.PersistentVolumeClaim{claim},
|
||||
NodeSelection: t.NodeSelection,
|
||||
}
|
||||
|
||||
var pod *v1.Pod
|
||||
|
Loading…
Reference in New Issue
Block a user