Merge pull request #78697 from tsmetana/fix-multinode-e2e

e2e: Skip multi-node PV test when pods scheduled on the same node
This commit is contained in:
Kubernetes Prow Robot 2019-07-09 03:10:16 -07:00 committed by GitHub
commit 4b5dc0a06d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 60 additions and 5 deletions

View File

@ -950,6 +950,36 @@ func SetAntiAffinity(nodeSelection *NodeSelection, nodeName string) {
SetNodeAffinityRequirement(nodeSelection, v1.NodeSelectorOpNotIn, nodeName)
}
// SetNodeAffinityPreference sets affinity preference with specified operator to nodeName to nodeSelection
func SetNodeAffinityPreference(nodeSelection *NodeSelection, operator v1.NodeSelectorOperator, nodeName string) {
// Add node-anti-affinity.
if nodeSelection.Affinity == nil {
nodeSelection.Affinity = &v1.Affinity{}
}
if nodeSelection.Affinity.NodeAffinity == nil {
nodeSelection.Affinity.NodeAffinity = &v1.NodeAffinity{}
}
nodeSelection.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(nodeSelection.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
v1.PreferredSchedulingTerm{
Weight: int32(100),
Preference: v1.NodeSelectorTerm{
MatchFields: []v1.NodeSelectorRequirement{
{Key: "metadata.name", Operator: operator, Values: []string{nodeName}},
},
},
})
}
// SetAffinityPreference sets affinity preference to nodeName to nodeSelection
func SetAffinityPreference(nodeSelection *NodeSelection, nodeName string) {
SetNodeAffinityPreference(nodeSelection, v1.NodeSelectorOpIn, nodeName)
}
// SetAntiAffinityPreference sets anti-affinity preference to nodeName to nodeSelection
func SetAntiAffinityPreference(nodeSelection *NodeSelection, nodeName string) {
SetNodeAffinityPreference(nodeSelection, v1.NodeSelectorOpNotIn, nodeName)
}
// CreateClientPod defines and creates a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")

View File

@ -375,7 +375,9 @@ func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs cli
if requiresSameNode {
framework.SetAffinity(&node, nodeName)
} else {
framework.SetAntiAffinity(&node, nodeName)
// Do not put hard requirement on the anti-affinity. In some occasions there might exist only
// one available node for the pod scheduling (e.g., multi-AZ cluster with one node per AZ)
framework.SetAntiAffinityPreference(&node, nodeName)
}
// Test access to multiple volumes again on the node updated above
@ -383,7 +385,13 @@ func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs cli
readSeedBase = writeSeedBase
// Update writeSeed with new value
writeSeedBase = time.Now().UTC().UnixNano()
_ = testAccessMultipleVolumes(f, cs, ns, node, pvcs, readSeedBase, writeSeedBase)
secondNodeName := testAccessMultipleVolumes(f, cs, ns, node, pvcs, readSeedBase, writeSeedBase)
if !requiresSameNode && (secondNodeName == nodeName) {
// The pod was created on the same node: presumably there was no other node available
// for the second pod scheduling -- this does not mean the test should fail. Skip it instead/
e2elog.Logf("Warning: The pod got scheduled on the same node despite requesting otherwise: skipping test")
framework.Skipf("No node available for the second pod found")
}
}
// TestConcurrentAccessToSingleVolume tests access to a single volume from multiple pods,
@ -395,6 +403,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
var pods []*v1.Pod
firstNodeName := ""
// Create each pod with pvc
for i := 0; i < numPods; i++ {
index := i + 1
@ -411,12 +420,22 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
pods = append(pods, pod)
framework.ExpectNoError(err, fmt.Sprintf("get pod%d", index))
actualNodeName := pod.Spec.NodeName
// Remember where the first pod was scheduled
if i == 0 {
firstNodeName = actualNodeName
}
// If the second pod got scheduled on the same node as the first one it means
// there was no available node for the second pod and the test should be skipped.
if !requiresSameNode && i == 1 && (actualNodeName == firstNodeName) {
e2elog.Logf("Warning: The pod got scheduled on the same node as the previous one: skipping test")
framework.Skipf("No node available for the second pod found")
}
// Set affinity depending on requiresSameNode
if requiresSameNode {
framework.SetAffinity(&node, actualNodeName)
} else {
framework.SetAntiAffinity(&node, actualNodeName)
framework.SetAntiAffinityPreference(&node, actualNodeName)
}
}

View File

@ -424,7 +424,9 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
// Add node-anti-affinity.
secondNode := node
framework.SetAntiAffinity(&secondNode, actualNodeName)
// Set anti-affinity preference: in case there are no nodes in the same AZ it may happen the second pod gets
// scheduled on the same node as the first one. In such a case the test needs to be skipped.
framework.SetAntiAffinityPreference(&secondNode, actualNodeName)
ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode))
command = "grep 'hello world' /mnt/test/data"
if framework.NodeOSDistroIs("windows") {
@ -434,9 +436,13 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
gomega.Expect(runningPod.Spec.NodeName).NotTo(gomega.Equal(actualNodeName), "second pod should have run on a different node")
StopPod(client, pod)
pod = nil
// The second pod got scheduled on the same node as the first one: skip the test.
if runningPod.Spec.NodeName == actualNodeName {
e2elog.Logf("Warning: The reader pod got scheduled on the same node as the writer pod: skipping test")
framework.Skipf("No node available for the second pod found")
}
}
// TestBindingWaitForFirstConsumer tests the binding with WaitForFirstConsumer mode