mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Fix multinode storage e2e tests for multizone clusters
This commit is contained in:
parent
718714a935
commit
c42e815fb5
@ -48,6 +48,28 @@ func setNodeAffinityRequirement(nodeSelection *NodeSelection, operator v1.NodeSe
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetNodeAffinityTopologyRequirement sets node affinity to a specified topology
|
||||||
|
func SetNodeAffinityTopologyRequirement(nodeSelection *NodeSelection, topology map[string]string) {
|
||||||
|
if nodeSelection.Affinity == nil {
|
||||||
|
nodeSelection.Affinity = &v1.Affinity{}
|
||||||
|
}
|
||||||
|
if nodeSelection.Affinity.NodeAffinity == nil {
|
||||||
|
nodeSelection.Affinity.NodeAffinity = &v1.NodeAffinity{}
|
||||||
|
}
|
||||||
|
if nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||||
|
nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{}
|
||||||
|
}
|
||||||
|
for k, v := range topology {
|
||||||
|
nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(nodeSelection.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms,
|
||||||
|
v1.NodeSelectorTerm{
|
||||||
|
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||||
|
{Key: k, Operator: v1.NodeSelectorOpIn, Values: []string{v}},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// SetAffinity sets affinity to nodeName to nodeSelection
|
// SetAffinity sets affinity to nodeName to nodeSelection
|
||||||
func SetAffinity(nodeSelection *NodeSelection, nodeName string) {
|
func SetAffinity(nodeSelection *NodeSelection, nodeName string) {
|
||||||
setNodeAffinityRequirement(nodeSelection, v1.NodeSelectorOpIn, nodeName)
|
setNodeAffinityRequirement(nodeSelection, v1.NodeSelectorOpIn, nodeName)
|
||||||
|
@ -370,12 +370,14 @@ func InitISCSIDriver() testsuites.TestDriver {
|
|||||||
//"ext3",
|
//"ext3",
|
||||||
"ext4",
|
"ext4",
|
||||||
),
|
),
|
||||||
|
TopologyKeys: []string{v1.LabelHostname},
|
||||||
Capabilities: map[testsuites.Capability]bool{
|
Capabilities: map[testsuites.Capability]bool{
|
||||||
testsuites.CapPersistence: true,
|
testsuites.CapPersistence: true,
|
||||||
testsuites.CapFsGroup: true,
|
testsuites.CapFsGroup: true,
|
||||||
testsuites.CapBlock: true,
|
testsuites.CapBlock: true,
|
||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
testsuites.CapMultiPODs: true,
|
testsuites.CapMultiPODs: true,
|
||||||
|
testsuites.CapTopology: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -772,10 +774,12 @@ func InitHostPathDriver() testsuites.TestDriver {
|
|||||||
SupportedFsType: sets.NewString(
|
SupportedFsType: sets.NewString(
|
||||||
"", // Default fsType
|
"", // Default fsType
|
||||||
),
|
),
|
||||||
|
TopologyKeys: []string{v1.LabelHostname},
|
||||||
Capabilities: map[testsuites.Capability]bool{
|
Capabilities: map[testsuites.Capability]bool{
|
||||||
testsuites.CapPersistence: true,
|
testsuites.CapPersistence: true,
|
||||||
testsuites.CapMultiPODs: true,
|
testsuites.CapMultiPODs: true,
|
||||||
testsuites.CapSingleNodeVolume: true,
|
testsuites.CapSingleNodeVolume: true,
|
||||||
|
testsuites.CapTopology: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -845,10 +849,12 @@ func InitHostPathSymlinkDriver() testsuites.TestDriver {
|
|||||||
SupportedFsType: sets.NewString(
|
SupportedFsType: sets.NewString(
|
||||||
"", // Default fsType
|
"", // Default fsType
|
||||||
),
|
),
|
||||||
|
TopologyKeys: []string{v1.LabelHostname},
|
||||||
Capabilities: map[testsuites.Capability]bool{
|
Capabilities: map[testsuites.Capability]bool{
|
||||||
testsuites.CapPersistence: true,
|
testsuites.CapPersistence: true,
|
||||||
testsuites.CapMultiPODs: true,
|
testsuites.CapMultiPODs: true,
|
||||||
testsuites.CapSingleNodeVolume: true,
|
testsuites.CapSingleNodeVolume: true,
|
||||||
|
testsuites.CapTopology: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -1059,6 +1065,7 @@ func InitCinderDriver() testsuites.TestDriver {
|
|||||||
"", // Default fsType
|
"", // Default fsType
|
||||||
"ext3",
|
"ext3",
|
||||||
),
|
),
|
||||||
|
TopologyKeys: []string{v1.LabelZoneFailureDomain},
|
||||||
Capabilities: map[testsuites.Capability]bool{
|
Capabilities: map[testsuites.Capability]bool{
|
||||||
testsuites.CapPersistence: true,
|
testsuites.CapPersistence: true,
|
||||||
testsuites.CapFsGroup: true,
|
testsuites.CapFsGroup: true,
|
||||||
@ -1067,6 +1074,7 @@ func InitCinderDriver() testsuites.TestDriver {
|
|||||||
// Cinder supports volume limits, but the test creates large
|
// Cinder supports volume limits, but the test creates large
|
||||||
// number of volumes and times out test suites.
|
// number of volumes and times out test suites.
|
||||||
testsuites.CapVolumeLimits: false,
|
testsuites.CapVolumeLimits: false,
|
||||||
|
testsuites.CapTopology: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -1366,11 +1374,13 @@ func InitVSphereDriver() testsuites.TestDriver {
|
|||||||
"", // Default fsType
|
"", // Default fsType
|
||||||
"ext4",
|
"ext4",
|
||||||
),
|
),
|
||||||
|
TopologyKeys: []string{v1.LabelZoneFailureDomain},
|
||||||
Capabilities: map[testsuites.Capability]bool{
|
Capabilities: map[testsuites.Capability]bool{
|
||||||
testsuites.CapPersistence: true,
|
testsuites.CapPersistence: true,
|
||||||
testsuites.CapFsGroup: true,
|
testsuites.CapFsGroup: true,
|
||||||
testsuites.CapExec: true,
|
testsuites.CapExec: true,
|
||||||
testsuites.CapMultiPODs: true,
|
testsuites.CapMultiPODs: true,
|
||||||
|
testsuites.CapTopology: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -1490,6 +1500,7 @@ func InitAzureDiskDriver() testsuites.TestDriver {
|
|||||||
"ext4",
|
"ext4",
|
||||||
"xfs",
|
"xfs",
|
||||||
),
|
),
|
||||||
|
TopologyKeys: []string{v1.LabelZoneFailureDomain},
|
||||||
Capabilities: map[testsuites.Capability]bool{
|
Capabilities: map[testsuites.Capability]bool{
|
||||||
testsuites.CapPersistence: true,
|
testsuites.CapPersistence: true,
|
||||||
testsuites.CapFsGroup: true,
|
testsuites.CapFsGroup: true,
|
||||||
@ -1499,6 +1510,7 @@ func InitAzureDiskDriver() testsuites.TestDriver {
|
|||||||
// Azure supports volume limits, but the test creates large
|
// Azure supports volume limits, but the test creates large
|
||||||
// number of volumes and times out test suites.
|
// number of volumes and times out test suites.
|
||||||
testsuites.CapVolumeLimits: false,
|
testsuites.CapVolumeLimits: false,
|
||||||
|
testsuites.CapTopology: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -1621,6 +1633,7 @@ func InitAwsDriver() testsuites.TestDriver {
|
|||||||
"ntfs",
|
"ntfs",
|
||||||
),
|
),
|
||||||
SupportedMountOption: sets.NewString("debug", "nouid32"),
|
SupportedMountOption: sets.NewString("debug", "nouid32"),
|
||||||
|
TopologyKeys: []string{v1.LabelZoneFailureDomain},
|
||||||
Capabilities: map[testsuites.Capability]bool{
|
Capabilities: map[testsuites.Capability]bool{
|
||||||
testsuites.CapPersistence: true,
|
testsuites.CapPersistence: true,
|
||||||
testsuites.CapFsGroup: true,
|
testsuites.CapFsGroup: true,
|
||||||
@ -1632,6 +1645,7 @@ func InitAwsDriver() testsuites.TestDriver {
|
|||||||
// AWS supports volume limits, but the test creates large
|
// AWS supports volume limits, but the test creates large
|
||||||
// number of volumes and times out test suites.
|
// number of volumes and times out test suites.
|
||||||
testsuites.CapVolumeLimits: false,
|
testsuites.CapVolumeLimits: false,
|
||||||
|
testsuites.CapTopology: true,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -180,6 +180,14 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
if l.config.ClientNodeName != "" {
|
if l.config.ClientNodeName != "" {
|
||||||
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
||||||
}
|
}
|
||||||
|
// For multi-node tests there must be enough nodes with the same toopology to schedule the pods
|
||||||
|
nodeSelection := e2epod.NodeSelection{Name: l.config.ClientNodeName}
|
||||||
|
topologyKeys := dInfo.TopologyKeys
|
||||||
|
if len(topologyKeys) != 0 {
|
||||||
|
if err = ensureTopologyRequirements(&nodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
|
||||||
|
framework.Failf("Error setting topology requirements: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var pvcs []*v1.PersistentVolumeClaim
|
var pvcs []*v1.PersistentVolumeClaim
|
||||||
numVols := 2
|
numVols := 2
|
||||||
@ -192,7 +200,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
}
|
}
|
||||||
|
|
||||||
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
|
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
|
||||||
e2epod.NodeSelection{Name: l.config.ClientNodeName}, pvcs, false /* sameNode */)
|
nodeSelection, pvcs, false /* sameNode */)
|
||||||
})
|
})
|
||||||
|
|
||||||
// This tests below configuration (only <block, filesystem> pattern is tested):
|
// This tests below configuration (only <block, filesystem> pattern is tested):
|
||||||
@ -266,6 +274,14 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
if l.config.ClientNodeName != "" {
|
if l.config.ClientNodeName != "" {
|
||||||
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
||||||
}
|
}
|
||||||
|
// For multi-node tests there must be enough nodes with the same toopology to schedule the pods
|
||||||
|
nodeSelection := e2epod.NodeSelection{Name: l.config.ClientNodeName}
|
||||||
|
topologyKeys := dInfo.TopologyKeys
|
||||||
|
if len(topologyKeys) != 0 {
|
||||||
|
if err = ensureTopologyRequirements(&nodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
|
||||||
|
framework.Failf("Error setting topology requirements: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
var pvcs []*v1.PersistentVolumeClaim
|
var pvcs []*v1.PersistentVolumeClaim
|
||||||
numVols := 2
|
numVols := 2
|
||||||
@ -283,7 +299,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
}
|
}
|
||||||
|
|
||||||
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
|
TestAccessMultipleVolumesAcrossPodRecreation(l.config.Framework, l.cs, l.ns.Name,
|
||||||
e2epod.NodeSelection{Name: l.config.ClientNodeName}, pvcs, false /* sameNode */)
|
nodeSelection, pvcs, false /* sameNode */)
|
||||||
})
|
})
|
||||||
|
|
||||||
// This tests below configuration:
|
// This tests below configuration:
|
||||||
@ -335,6 +351,14 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
if l.config.ClientNodeName != "" {
|
if l.config.ClientNodeName != "" {
|
||||||
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
e2eskipper.Skipf("Driver %q requires to deploy on a specific node - skipping", l.driver.GetDriverInfo().Name)
|
||||||
}
|
}
|
||||||
|
// For multi-node tests there must be enough nodes with the same toopology to schedule the pods
|
||||||
|
nodeSelection := e2epod.NodeSelection{Name: l.config.ClientNodeName}
|
||||||
|
topologyKeys := dInfo.TopologyKeys
|
||||||
|
if len(topologyKeys) != 0 {
|
||||||
|
if err = ensureTopologyRequirements(&nodeSelection, nodes, l.cs, topologyKeys, 2); err != nil {
|
||||||
|
framework.Failf("Error setting topology requirements: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Create volume
|
// Create volume
|
||||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||||
@ -343,7 +367,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
|||||||
|
|
||||||
// Test access to the volume from pods on different node
|
// Test access to the volume from pods on different node
|
||||||
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
|
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
|
||||||
e2epod.NodeSelection{Name: l.config.ClientNodeName}, resource.Pvc, numPods, false /* sameNode */)
|
nodeSelection, resource.Pvc, numPods, false /* sameNode */)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -504,3 +528,56 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
|||||||
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
utils.CheckReadFromPath(f, pod, *pvc.Spec.VolumeMode, path, byteLen, seed)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getCurrentTopologies() goes through all Nodes and returns unique driver topologies and count of Nodes per topology
|
||||||
|
func getCurrentTopologiesNumber(cs clientset.Interface, nodes *v1.NodeList, keys []string) ([]topology, []int, error) {
|
||||||
|
topos := []topology{}
|
||||||
|
topoCount := []int{}
|
||||||
|
|
||||||
|
// TODO: scale?
|
||||||
|
for _, n := range nodes.Items {
|
||||||
|
topo := map[string]string{}
|
||||||
|
for _, k := range keys {
|
||||||
|
v, ok := n.Labels[k]
|
||||||
|
if ok {
|
||||||
|
topo[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
found := false
|
||||||
|
for i, existingTopo := range topos {
|
||||||
|
if topologyEqual(existingTopo, topo) {
|
||||||
|
found = true
|
||||||
|
topoCount[i]++
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
framework.Logf("found topology %v", topo)
|
||||||
|
topos = append(topos, topo)
|
||||||
|
topoCount = append(topoCount, 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return topos, topoCount, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureTopologyRequirements sets nodeSelection affinity according to given topology keys for drivers that provide them
|
||||||
|
func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, nodes *v1.NodeList, cs clientset.Interface, topologyKeys []string, minCount int) error {
|
||||||
|
topologyList, topologyCount, err := getCurrentTopologiesNumber(cs, nodes, topologyKeys)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
suitableTopologies := []topology{}
|
||||||
|
for i, topo := range topologyList {
|
||||||
|
if topologyCount[i] >= minCount {
|
||||||
|
suitableTopologies = append(suitableTopologies, topo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(suitableTopologies) == 0 {
|
||||||
|
framework.Skipf("No topology with at least %d nodes found - skipping", minCount)
|
||||||
|
}
|
||||||
|
// Take the first suitable topology
|
||||||
|
e2epod.SetNodeAffinityTopologyRequirement(nodeSelection, suitableTopologies[0])
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user