use random node to avoid node0 overloaded

This commit is contained in:
pacoxu 2021-02-24 14:42:48 +08:00
parent 16b909ce14
commit 9488decd5e
2 changed files with 40 additions and 28 deletions

View File

@ -19,6 +19,7 @@ package network
import ( import (
"context" "context"
"fmt" "fmt"
"math/rand"
"net" "net"
"strconv" "strconv"
@ -71,24 +72,24 @@ var _ = SIGDescribe("HostPort", func() {
framework.Failf("No nodes available") framework.Failf("No nodes available")
} }
nodeName := nodes.Items[0].Name randomNode := &nodes.Items[rand.Intn(len(nodes.Items))]
ips := e2enode.GetAddressesByTypeAndFamily(&nodes.Items[0], v1.NodeInternalIP, family) ips := e2enode.GetAddressesByTypeAndFamily(randomNode, v1.NodeInternalIP, family)
if len(ips) == 0 { if len(ips) == 0 {
framework.Failf("Failed to get NodeIP") framework.Failf("Failed to get NodeIP")
} }
hostIP := ips[0] hostIP := ips[0]
port := int32(54321) port := int32(54323)
// Create pods with the same HostPort // Create pods with the same HostPort
ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP %s and expect scheduled", port, localhost)) ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP %s and expect scheduled", port, localhost))
createHostPortPodOnNode(f, "pod1", ns, localhost, port, v1.ProtocolTCP, nodeName) createHostPortPodOnNode(f, "pod1", ns, localhost, port, v1.ProtocolTCP, randomNode.Name)
ginkgo.By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP %s on the node which pod1 resides and expect scheduled", port, hostIP)) ginkgo.By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP %s on the node which pod1 resides and expect scheduled", port, hostIP))
createHostPortPodOnNode(f, "pod2", ns, hostIP, port, v1.ProtocolTCP, nodeName) createHostPortPodOnNode(f, "pod2", ns, hostIP, port, v1.ProtocolTCP, randomNode.Name)
ginkgo.By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP %s but use UDP protocol on the node which pod2 resides", port, hostIP)) ginkgo.By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP %s but use UDP protocol on the node which pod2 resides", port, hostIP))
createHostPortPodOnNode(f, "pod3", ns, hostIP, port, v1.ProtocolUDP, nodeName) createHostPortPodOnNode(f, "pod3", ns, hostIP, port, v1.ProtocolUDP, randomNode.Name)
// check that the port is being actually exposed to each container // check that the port is being actually exposed to each container
// create a pod on the host network in the same node // create a pod on the host network in the same node
@ -99,7 +100,7 @@ var _ = SIGDescribe("HostPort", func() {
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
HostNetwork: true, HostNetwork: true,
NodeName: nodeName, NodeName: randomNode.Name,
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "e2e-host-exec", Name: "e2e-host-exec",

View File

@ -19,6 +19,7 @@ package storage
import ( import (
"context" "context"
"fmt" "fmt"
"math/rand"
"path/filepath" "path/filepath"
"strconv" "strconv"
"strings" "strings"
@ -53,7 +54,7 @@ import (
type localTestConfig struct { type localTestConfig struct {
ns string ns string
nodes []v1.Node nodes []v1.Node
node0 *v1.Node randomNode *v1.Node
client clientset.Interface client clientset.Interface
timeouts *framework.TimeoutContext timeouts *framework.TimeoutContext
scName string scName string
@ -159,8 +160,8 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
scName = fmt.Sprintf("%v-%v", testSCPrefix, f.Namespace.Name) scName = fmt.Sprintf("%v-%v", testSCPrefix, f.Namespace.Name)
// Choose the first node // Choose a random node
node0 := &nodes.Items[0] randomNode := &nodes.Items[rand.Intn(len(nodes.Items))]
hostExec := utils.NewHostExec(f) hostExec := utils.NewHostExec(f)
ltrMgr := utils.NewLocalResourceManager("local-volume-test", hostExec, hostBase) ltrMgr := utils.NewLocalResourceManager("local-volume-test", hostExec, hostBase)
@ -169,7 +170,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
client: f.ClientSet, client: f.ClientSet,
timeouts: f.Timeouts, timeouts: f.Timeouts,
nodes: nodes.Items, nodes: nodes.Items,
node0: node0, randomNode: randomNode,
scName: scName, scName: scName,
discoveryDir: filepath.Join(hostBase, f.Namespace.Name), discoveryDir: filepath.Join(hostBase, f.Namespace.Name),
hostExec: hostExec, hostExec: hostExec,
@ -193,10 +194,10 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
if testVolType == GCELocalSSDVolumeType { if testVolType == GCELocalSSDVolumeType {
SkipUnlessLocalSSDExists(config, "scsi", "fs", config.node0) SkipUnlessLocalSSDExists(config, "scsi", "fs", config.randomNode)
} }
setupStorageClass(config, &testMode) setupStorageClass(config, &testMode)
testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.node0, 1, testMode) testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.randomNode, 1, testMode)
testVol = testVols[0] testVol = testVols[0]
}) })
@ -215,7 +216,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
ginkgo.By("Creating pod1") ginkgo.By("Creating pod1")
pod1, pod1Err = createLocalPod(config, testVol, nil) pod1, pod1Err = createLocalPod(config, testVol, nil)
framework.ExpectNoError(pod1Err) framework.ExpectNoError(pod1Err)
verifyLocalPod(config, testVol, pod1, config.node0.Name) verifyLocalPod(config, testVol, pod1, config.randomNode.Name)
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
@ -306,7 +307,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
ginkgo.It("should fail due to non-existent path", func() { ginkgo.It("should fail due to non-existent path", func() {
testVol := &localTestVolume{ testVol := &localTestVolume{
ltr: &utils.LocalTestResource{ ltr: &utils.LocalTestResource{
Node: config.node0, Node: config.randomNode,
Path: "/non-existent/location/nowhere", Path: "/non-existent/location/nowhere",
}, },
localVolumeType: DirectoryLocalVolumeType, localVolumeType: DirectoryLocalVolumeType,
@ -325,10 +326,14 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
e2eskipper.Skipf("Runs only when number of nodes >= 2") e2eskipper.Skipf("Runs only when number of nodes >= 2")
} }
testVols := setupLocalVolumesPVCsPVs(config, DirectoryLocalVolumeType, config.node0, 1, immediateMode) testVols := setupLocalVolumesPVCsPVs(config, DirectoryLocalVolumeType, config.randomNode, 1, immediateMode)
testVol := testVols[0] testVol := testVols[0]
pod := makeLocalPodWithNodeName(config, testVol, config.nodes[1].Name) conflictNodeName := config.nodes[0].Name
if conflictNodeName == config.randomNode.Name {
conflictNodeName = config.nodes[1].Name
}
pod := makeLocalPodWithNodeName(config, testVol, conflictNodeName)
pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -341,8 +346,9 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
ginkgo.Context("Pod with node different from PV's NodeAffinity", func() { ginkgo.Context("Pod with node different from PV's NodeAffinity", func() {
var ( var (
testVol *localTestVolume testVol *localTestVolume
volumeType localVolumeType volumeType localVolumeType
conflictNodeName string
) )
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
@ -352,7 +358,12 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
volumeType = DirectoryLocalVolumeType volumeType = DirectoryLocalVolumeType
setupStorageClass(config, &immediateMode) setupStorageClass(config, &immediateMode)
testVols := setupLocalVolumesPVCsPVs(config, volumeType, config.node0, 1, immediateMode) testVols := setupLocalVolumesPVCsPVs(config, volumeType, config.randomNode, 1, immediateMode)
conflictNodeName = config.nodes[0].Name
if conflictNodeName == config.randomNode.Name {
conflictNodeName = config.nodes[1].Name
}
testVol = testVols[0] testVol = testVols[0]
}) })
@ -362,11 +373,11 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
}) })
ginkgo.It("should fail scheduling due to different NodeAffinity", func() { ginkgo.It("should fail scheduling due to different NodeAffinity", func() {
testPodWithNodeConflict(config, volumeType, config.nodes[1].Name, makeLocalPodWithNodeAffinity, immediateMode) testPodWithNodeConflict(config, volumeType, conflictNodeName, makeLocalPodWithNodeAffinity, immediateMode)
}) })
ginkgo.It("should fail scheduling due to different NodeSelector", func() { ginkgo.It("should fail scheduling due to different NodeSelector", func() {
testPodWithNodeConflict(config, volumeType, config.nodes[1].Name, makeLocalPodWithNodeSelector, immediateMode) testPodWithNodeConflict(config, volumeType, conflictNodeName, makeLocalPodWithNodeSelector, immediateMode)
}) })
}) })
@ -623,7 +634,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
localVolume := &localTestVolume{ localVolume := &localTestVolume{
ltr: &utils.LocalTestResource{ ltr: &utils.LocalTestResource{
Node: config.node0, Node: config.randomNode,
Path: "/tmp", Path: "/tmp",
}, },
localVolumeType: DirectoryLocalVolumeType, localVolumeType: DirectoryLocalVolumeType,
@ -711,7 +722,7 @@ type makeLocalPodWith func(config *localTestConfig, volume *localTestVolume, nod
func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeType, nodeName string, makeLocalPodFunc makeLocalPodWith, bindingMode storagev1.VolumeBindingMode) { func testPodWithNodeConflict(config *localTestConfig, testVolType localVolumeType, nodeName string, makeLocalPodFunc makeLocalPodWith, bindingMode storagev1.VolumeBindingMode) {
ginkgo.By(fmt.Sprintf("local-volume-type: %s", testVolType)) ginkgo.By(fmt.Sprintf("local-volume-type: %s", testVolType))
testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.node0, 1, bindingMode) testVols := setupLocalVolumesPVCsPVs(config, testVolType, config.randomNode, 1, bindingMode)
testVol := testVols[0] testVol := testVols[0]
pod := makeLocalPodFunc(config, testVol, nodeName) pod := makeLocalPodFunc(config, testVol, nodeName)
@ -729,7 +740,7 @@ func twoPodsReadWriteTest(f *framework.Framework, config *localTestConfig, testV
ginkgo.By("Creating pod1 to write to the PV") ginkgo.By("Creating pod1 to write to the PV")
pod1, pod1Err := createLocalPod(config, testVol, nil) pod1, pod1Err := createLocalPod(config, testVol, nil)
framework.ExpectNoError(pod1Err) framework.ExpectNoError(pod1Err)
verifyLocalPod(config, testVol, pod1, config.node0.Name) verifyLocalPod(config, testVol, pod1, config.randomNode.Name)
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
@ -742,7 +753,7 @@ func twoPodsReadWriteTest(f *framework.Framework, config *localTestConfig, testV
ginkgo.By("Creating pod2 to read from the PV") ginkgo.By("Creating pod2 to read from the PV")
pod2, pod2Err := createLocalPod(config, testVol, nil) pod2, pod2Err := createLocalPod(config, testVol, nil)
framework.ExpectNoError(pod2Err) framework.ExpectNoError(pod2Err)
verifyLocalPod(config, testVol, pod2, config.node0.Name) verifyLocalPod(config, testVol, pod2, config.randomNode.Name)
// testFileContent was written after creating pod1 // testFileContent was written after creating pod1
testReadFileContent(f, volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType) testReadFileContent(f, volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)
@ -766,7 +777,7 @@ func twoPodsReadWriteSerialTest(f *framework.Framework, config *localTestConfig,
ginkgo.By("Creating pod1") ginkgo.By("Creating pod1")
pod1, pod1Err := createLocalPod(config, testVol, nil) pod1, pod1Err := createLocalPod(config, testVol, nil)
framework.ExpectNoError(pod1Err) framework.ExpectNoError(pod1Err)
verifyLocalPod(config, testVol, pod1, config.node0.Name) verifyLocalPod(config, testVol, pod1, config.randomNode.Name)
writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType) writeCmd := createWriteCmd(volumeDir, testFile, testFileContent, testVol.localVolumeType)
@ -782,7 +793,7 @@ func twoPodsReadWriteSerialTest(f *framework.Framework, config *localTestConfig,
ginkgo.By("Creating pod2") ginkgo.By("Creating pod2")
pod2, pod2Err := createLocalPod(config, testVol, nil) pod2, pod2Err := createLocalPod(config, testVol, nil)
framework.ExpectNoError(pod2Err) framework.ExpectNoError(pod2Err)
verifyLocalPod(config, testVol, pod2, config.node0.Name) verifyLocalPod(config, testVol, pod2, config.randomNode.Name)
ginkgo.By("Reading in pod2") ginkgo.By("Reading in pod2")
testReadFileContent(f, volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType) testReadFileContent(f, volumeDir, testFile, testFileContent, pod2, testVol.localVolumeType)