diff --git a/test/e2e/apps/network_partition.go b/test/e2e/apps/network_partition.go index c4737eb9513..c5d7800e580 100644 --- a/test/e2e/apps/network_partition.go +++ b/test/e2e/apps/network_partition.go @@ -204,14 +204,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { }() go controller.Run(stopCh) - ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name)) + ginkgo.By(fmt.Sprintf("Block traffic from node %s to the control plane", node.Name)) host, err := e2enode.GetExternalIP(&node) framework.ExpectNoError(err) - masterAddresses := framework.GetAllMasterAddresses(c) + controlPlaneAddresses := framework.GetControlPlaneAddresses(c) defer func() { - ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name)) - for _, masterAddress := range masterAddresses { - e2enetwork.UnblockNetwork(host, masterAddress) + ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the control plane", node.Name)) + for _, instanceAddress := range controlPlaneAddresses { + e2enetwork.UnblockNetwork(host, instanceAddress) } if ginkgo.CurrentGinkgoTestDescription().Failed { @@ -225,8 +225,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { } }() - for _, masterAddress := range masterAddresses { - e2enetwork.BlockNetwork(host, masterAddress) + for _, instanceAddress := range controlPlaneAddresses { + e2enetwork.BlockNetwork(host, instanceAddress) } ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition") @@ -592,14 +592,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { }() go controller.Run(stopCh) - ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name)) + ginkgo.By(fmt.Sprintf("Block traffic from node %s to the control plane", node.Name)) host, err := e2enode.GetExternalIP(&node) framework.ExpectNoError(err) - masterAddresses := framework.GetAllMasterAddresses(c) + controlPlaneAddresses := framework.GetControlPlaneAddresses(c) defer func() { - ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name)) - for _, masterAddress := range masterAddresses { - e2enetwork.UnblockNetwork(host, masterAddress) + ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the control plane", node.Name)) + for _, instanceAddress := range controlPlaneAddresses { + e2enetwork.UnblockNetwork(host, instanceAddress) } if ginkgo.CurrentGinkgoTestDescription().Failed { @@ -610,8 +610,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { expectNodeReadiness(true, newNode) }() - for _, masterAddress := range masterAddresses { - e2enetwork.BlockNetwork(host, masterAddress) + for _, instanceAddress := range controlPlaneAddresses { + e2enetwork.BlockNetwork(host, instanceAddress) } ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition") diff --git a/test/e2e/framework/network/utils.go b/test/e2e/framework/network/utils.go index aec7386b2fa..18a475d03d2 100644 --- a/test/e2e/framework/network/utils.go +++ b/test/e2e/framework/network/utils.go @@ -947,16 +947,16 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1 if err != nil { framework.Failf("Error getting node external ip : %v", err) } - masterAddresses := framework.GetAllMasterAddresses(c) - ginkgo.By(fmt.Sprintf("block network traffic from node %s to the master", node.Name)) + controlPlaneAddresses := framework.GetControlPlaneAddresses(c) + ginkgo.By(fmt.Sprintf("block network traffic from node %s to the control plane", node.Name)) defer func() { // This code will execute even if setting the iptables rule failed. // It is on purpose because we may have an error even if the new rule // had been inserted. (yes, we could look at the error code and ssh error // separately, but I prefer to stay on the safe side). - ginkgo.By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name)) - for _, masterAddress := range masterAddresses { - UnblockNetwork(host, masterAddress) + ginkgo.By(fmt.Sprintf("Unblock network traffic from node %s to the control plane", node.Name)) + for _, instanceAddress := range controlPlaneAddresses { + UnblockNetwork(host, instanceAddress) } }() @@ -964,8 +964,8 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1 if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) { framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) } - for _, masterAddress := range masterAddresses { - BlockNetwork(host, masterAddress) + for _, instanceAddress := range controlPlaneAddresses { + BlockNetwork(host, instanceAddress) } framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name) diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index b8c493dd08b..26155cbba8a 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -1256,11 +1256,11 @@ func getMasterAddresses(c clientset.Interface) (string, string, string) { return externalIP, internalIP, hostname } -// GetAllMasterAddresses returns all IP addresses on which the kubelet can reach the master. +// GetControlPlaneAddresses returns all IP addresses on which the kubelet can reach the control plane. // It may return internal and external IPs, even if we expect for // e.g. internal IPs to be used (issue #56787), so that we can be -// sure to block the master fully during tests. -func GetAllMasterAddresses(c clientset.Interface) []string { +// sure to block the control plane fully during tests. +func GetControlPlaneAddresses(c clientset.Interface) []string { externalIP, internalIP, _ := getMasterAddresses(c) ips := sets.NewString() diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index 039e5bdd196..699e7680d43 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -216,10 +216,10 @@ var _ = SIGDescribe("Firewall rule", func() { framework.Failf("did not find any node addresses") } - masterAddresses := framework.GetAllMasterAddresses(cs) - for _, masterAddress := range masterAddresses { - assertNotReachableHTTPTimeout(masterAddress, ports.InsecureKubeControllerManagerPort, firewallTestTCPTimeout) - assertNotReachableHTTPTimeout(masterAddress, kubeschedulerconfig.DefaultInsecureSchedulerPort, firewallTestTCPTimeout) + controlPlaneAddresses := framework.GetControlPlaneAddresses(cs) + for _, instanceAddress := range controlPlaneAddresses { + assertNotReachableHTTPTimeout(instanceAddress, ports.InsecureKubeControllerManagerPort, firewallTestTCPTimeout) + assertNotReachableHTTPTimeout(instanceAddress, kubeschedulerconfig.DefaultInsecureSchedulerPort, firewallTestTCPTimeout) } assertNotReachableHTTPTimeout(nodeAddr, ports.KubeletPort, firewallTestTCPTimeout) assertNotReachableHTTPTimeout(nodeAddr, ports.KubeletReadOnlyPort, firewallTestTCPTimeout)