Updated symbol 'framework.GetAllMasterAddresses' to not use word 'master'

This commit is contained in:
Vishwanath Sangale 2020-09-29 21:43:10 -07:00
parent 4b24dca228
commit b8e733fb94
4 changed files with 28 additions and 28 deletions

View File

@ -204,14 +204,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
}()
go controller.Run(stopCh)
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the control plane", node.Name))
host, err := e2enode.GetExternalIP(&node)
framework.ExpectNoError(err)
masterAddresses := framework.GetAllMasterAddresses(c)
controlPlaneAddresses := framework.GetControlPlaneAddresses(c)
defer func() {
ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
for _, masterAddress := range masterAddresses {
e2enetwork.UnblockNetwork(host, masterAddress)
ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the control plane", node.Name))
for _, instanceAddress := range controlPlaneAddresses {
e2enetwork.UnblockNetwork(host, instanceAddress)
}
if ginkgo.CurrentGinkgoTestDescription().Failed {
@ -225,8 +225,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
}
}()
for _, masterAddress := range masterAddresses {
e2enetwork.BlockNetwork(host, masterAddress)
for _, instanceAddress := range controlPlaneAddresses {
e2enetwork.BlockNetwork(host, instanceAddress)
}
ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition")
@ -592,14 +592,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
}()
go controller.Run(stopCh)
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the control plane", node.Name))
host, err := e2enode.GetExternalIP(&node)
framework.ExpectNoError(err)
masterAddresses := framework.GetAllMasterAddresses(c)
controlPlaneAddresses := framework.GetControlPlaneAddresses(c)
defer func() {
ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
for _, masterAddress := range masterAddresses {
e2enetwork.UnblockNetwork(host, masterAddress)
ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the control plane", node.Name))
for _, instanceAddress := range controlPlaneAddresses {
e2enetwork.UnblockNetwork(host, instanceAddress)
}
if ginkgo.CurrentGinkgoTestDescription().Failed {
@ -610,8 +610,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
expectNodeReadiness(true, newNode)
}()
for _, masterAddress := range masterAddresses {
e2enetwork.BlockNetwork(host, masterAddress)
for _, instanceAddress := range controlPlaneAddresses {
e2enetwork.BlockNetwork(host, instanceAddress)
}
ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition")

View File

@ -947,16 +947,16 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
if err != nil {
framework.Failf("Error getting node external ip : %v", err)
}
masterAddresses := framework.GetAllMasterAddresses(c)
ginkgo.By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
controlPlaneAddresses := framework.GetControlPlaneAddresses(c)
ginkgo.By(fmt.Sprintf("block network traffic from node %s to the control plane", node.Name))
defer func() {
// This code will execute even if setting the iptables rule failed.
// It is on purpose because we may have an error even if the new rule
// had been inserted. (yes, we could look at the error code and ssh error
// separately, but I prefer to stay on the safe side).
ginkgo.By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
for _, masterAddress := range masterAddresses {
UnblockNetwork(host, masterAddress)
ginkgo.By(fmt.Sprintf("Unblock network traffic from node %s to the control plane", node.Name))
for _, instanceAddress := range controlPlaneAddresses {
UnblockNetwork(host, instanceAddress)
}
}()
@ -964,8 +964,8 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
for _, masterAddress := range masterAddresses {
BlockNetwork(host, masterAddress)
for _, instanceAddress := range controlPlaneAddresses {
BlockNetwork(host, instanceAddress)
}
framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)

View File

@ -1247,11 +1247,11 @@ func getMasterAddresses(c clientset.Interface) (string, string, string) {
return externalIP, internalIP, hostname
}
// GetAllMasterAddresses returns all IP addresses on which the kubelet can reach the master.
// GetControlPlaneAddresses returns all IP addresses on which the kubelet can reach the control plane.
// It may return internal and external IPs, even if we expect for
// e.g. internal IPs to be used (issue #56787), so that we can be
// sure to block the master fully during tests.
func GetAllMasterAddresses(c clientset.Interface) []string {
// sure to block the control plane fully during tests.
func GetControlPlaneAddresses(c clientset.Interface) []string {
externalIP, internalIP, _ := getMasterAddresses(c)
ips := sets.NewString()

View File

@ -216,10 +216,10 @@ var _ = SIGDescribe("Firewall rule", func() {
framework.Failf("did not find any node addresses")
}
masterAddresses := framework.GetAllMasterAddresses(cs)
for _, masterAddress := range masterAddresses {
assertNotReachableHTTPTimeout(masterAddress, ports.InsecureKubeControllerManagerPort, firewallTestTCPTimeout)
assertNotReachableHTTPTimeout(masterAddress, kubeschedulerconfig.DefaultInsecureSchedulerPort, firewallTestTCPTimeout)
controlPlaneAddresses := framework.GetControlPlaneAddresses(cs)
for _, instanceAddress := range controlPlaneAddresses {
assertNotReachableHTTPTimeout(instanceAddress, ports.InsecureKubeControllerManagerPort, firewallTestTCPTimeout)
assertNotReachableHTTPTimeout(instanceAddress, kubeschedulerconfig.DefaultInsecureSchedulerPort, firewallTestTCPTimeout)
}
assertNotReachableHTTPTimeout(nodeAddr, ports.KubeletPort, firewallTestTCPTimeout)
assertNotReachableHTTPTimeout(nodeAddr, ports.KubeletReadOnlyPort, firewallTestTCPTimeout)