Merge pull request #80578 from tsmetana/revert-pr78697

Revert "e2e: Skip multi-node PV test when pods scheduled on the same node
This commit is contained in:
Kubernetes Prow Robot 2019-07-26 16:52:10 -07:00 committed by GitHub
commit a300104f0a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 5 additions and 60 deletions

View File

@ -993,36 +993,6 @@ func SetAntiAffinity(nodeSelection *NodeSelection, nodeName string) {
SetNodeAffinityRequirement(nodeSelection, v1.NodeSelectorOpNotIn, nodeName)
}
// SetNodeAffinityPreference sets affinity preference with specified operator to nodeName to nodeSelection
func SetNodeAffinityPreference(nodeSelection *NodeSelection, operator v1.NodeSelectorOperator, nodeName string) {
// Add node-anti-affinity.
if nodeSelection.Affinity == nil {
nodeSelection.Affinity = &v1.Affinity{}
}
if nodeSelection.Affinity.NodeAffinity == nil {
nodeSelection.Affinity.NodeAffinity = &v1.NodeAffinity{}
}
nodeSelection.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(nodeSelection.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution,
v1.PreferredSchedulingTerm{
Weight: int32(100),
Preference: v1.NodeSelectorTerm{
MatchFields: []v1.NodeSelectorRequirement{
{Key: "metadata.name", Operator: operator, Values: []string{nodeName}},
},
},
})
}
// SetAffinityPreference sets affinity preference to nodeName to nodeSelection
func SetAffinityPreference(nodeSelection *NodeSelection, nodeName string) {
SetNodeAffinityPreference(nodeSelection, v1.NodeSelectorOpIn, nodeName)
}
// SetAntiAffinityPreference sets anti-affinity preference to nodeName to nodeSelection
func SetAntiAffinityPreference(nodeSelection *NodeSelection, nodeName string) {
SetNodeAffinityPreference(nodeSelection, v1.NodeSelectorOpNotIn, nodeName)
}
// CreateClientPod defines and creates a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")

View File

@ -375,9 +375,7 @@ func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs cli
if requiresSameNode {
framework.SetAffinity(&node, nodeName)
} else {
// Do not put hard requirement on the anti-affinity. In some occasions there might exist only
// one available node for the pod scheduling (e.g., multi-AZ cluster with one node per AZ)
framework.SetAntiAffinityPreference(&node, nodeName)
framework.SetAntiAffinity(&node, nodeName)
}
// Test access to multiple volumes again on the node updated above
@ -385,13 +383,7 @@ func TestAccessMultipleVolumesAcrossPodRecreation(f *framework.Framework, cs cli
readSeedBase = writeSeedBase
// Update writeSeed with new value
writeSeedBase = time.Now().UTC().UnixNano()
secondNodeName := testAccessMultipleVolumes(f, cs, ns, node, pvcs, readSeedBase, writeSeedBase)
if !requiresSameNode && (secondNodeName == nodeName) {
// The pod was created on the same node: presumably there was no other node available
// for the second pod scheduling -- this does not mean the test should fail. Skip it instead/
e2elog.Logf("Warning: The pod got scheduled on the same node despite requesting otherwise: skipping test")
framework.Skipf("No node available for the second pod found")
}
_ = testAccessMultipleVolumes(f, cs, ns, node, pvcs, readSeedBase, writeSeedBase)
}
// TestConcurrentAccessToSingleVolume tests access to a single volume from multiple pods,
@ -403,7 +395,6 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
var pods []*v1.Pod
firstNodeName := ""
// Create each pod with pvc
for i := 0; i < numPods; i++ {
index := i + 1
@ -420,22 +411,12 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
pods = append(pods, pod)
framework.ExpectNoError(err, fmt.Sprintf("get pod%d", index))
actualNodeName := pod.Spec.NodeName
// Remember where the first pod was scheduled
if i == 0 {
firstNodeName = actualNodeName
}
// If the second pod got scheduled on the same node as the first one it means
// there was no available node for the second pod and the test should be skipped.
if !requiresSameNode && i == 1 && (actualNodeName == firstNodeName) {
e2elog.Logf("Warning: The pod got scheduled on the same node as the previous one: skipping test")
framework.Skipf("No node available for the second pod found")
}
// Set affinity depending on requiresSameNode
if requiresSameNode {
framework.SetAffinity(&node, actualNodeName)
} else {
framework.SetAntiAffinityPreference(&node, actualNodeName)
framework.SetAntiAffinity(&node, actualNodeName)
}
}

View File

@ -393,9 +393,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
// Add node-anti-affinity.
secondNode := node
// Set anti-affinity preference: in case there are no nodes in the same AZ it may happen the second pod gets
// scheduled on the same node as the first one. In such a case the test needs to be skipped.
framework.SetAntiAffinityPreference(&secondNode, actualNodeName)
framework.SetAntiAffinity(&secondNode, actualNodeName)
ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode))
command = "grep 'hello world' /mnt/test/data"
if framework.NodeOSDistroIs("windows") {
@ -405,13 +403,9 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
gomega.Expect(runningPod.Spec.NodeName).NotTo(gomega.Equal(actualNodeName), "second pod should have run on a different node")
StopPod(client, pod)
pod = nil
// The second pod got scheduled on the same node as the first one: skip the test.
if runningPod.Spec.NodeName == actualNodeName {
e2elog.Logf("Warning: The reader pod got scheduled on the same node as the writer pod: skipping test")
framework.Skipf("No node available for the second pod found")
}
}
// TestBindingWaitForFirstConsumer tests the binding with WaitForFirstConsumer mode