mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Updated symbol 'framework.GetAllMasterAddresses' to not use word 'master'
This commit is contained in:
parent
4b24dca228
commit
b8e733fb94
@ -204,14 +204,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
}()
|
}()
|
||||||
go controller.Run(stopCh)
|
go controller.Run(stopCh)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the control plane", node.Name))
|
||||||
host, err := e2enode.GetExternalIP(&node)
|
host, err := e2enode.GetExternalIP(&node)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
masterAddresses := framework.GetAllMasterAddresses(c)
|
controlPlaneAddresses := framework.GetControlPlaneAddresses(c)
|
||||||
defer func() {
|
defer func() {
|
||||||
ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
|
ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the control plane", node.Name))
|
||||||
for _, masterAddress := range masterAddresses {
|
for _, instanceAddress := range controlPlaneAddresses {
|
||||||
e2enetwork.UnblockNetwork(host, masterAddress)
|
e2enetwork.UnblockNetwork(host, instanceAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||||
@ -225,8 +225,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for _, masterAddress := range masterAddresses {
|
for _, instanceAddress := range controlPlaneAddresses {
|
||||||
e2enetwork.BlockNetwork(host, masterAddress)
|
e2enetwork.BlockNetwork(host, instanceAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||||
@ -592,14 +592,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
}()
|
}()
|
||||||
go controller.Run(stopCh)
|
go controller.Run(stopCh)
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
|
ginkgo.By(fmt.Sprintf("Block traffic from node %s to the control plane", node.Name))
|
||||||
host, err := e2enode.GetExternalIP(&node)
|
host, err := e2enode.GetExternalIP(&node)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
masterAddresses := framework.GetAllMasterAddresses(c)
|
controlPlaneAddresses := framework.GetControlPlaneAddresses(c)
|
||||||
defer func() {
|
defer func() {
|
||||||
ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
|
ginkgo.By(fmt.Sprintf("Unblock traffic from node %s to the control plane", node.Name))
|
||||||
for _, masterAddress := range masterAddresses {
|
for _, instanceAddress := range controlPlaneAddresses {
|
||||||
e2enetwork.UnblockNetwork(host, masterAddress)
|
e2enetwork.UnblockNetwork(host, instanceAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||||
@ -610,8 +610,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
|||||||
expectNodeReadiness(true, newNode)
|
expectNodeReadiness(true, newNode)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for _, masterAddress := range masterAddresses {
|
for _, instanceAddress := range controlPlaneAddresses {
|
||||||
e2enetwork.BlockNetwork(host, masterAddress)
|
e2enetwork.BlockNetwork(host, instanceAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition")
|
||||||
|
@ -947,16 +947,16 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("Error getting node external ip : %v", err)
|
framework.Failf("Error getting node external ip : %v", err)
|
||||||
}
|
}
|
||||||
masterAddresses := framework.GetAllMasterAddresses(c)
|
controlPlaneAddresses := framework.GetControlPlaneAddresses(c)
|
||||||
ginkgo.By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
|
ginkgo.By(fmt.Sprintf("block network traffic from node %s to the control plane", node.Name))
|
||||||
defer func() {
|
defer func() {
|
||||||
// This code will execute even if setting the iptables rule failed.
|
// This code will execute even if setting the iptables rule failed.
|
||||||
// It is on purpose because we may have an error even if the new rule
|
// It is on purpose because we may have an error even if the new rule
|
||||||
// had been inserted. (yes, we could look at the error code and ssh error
|
// had been inserted. (yes, we could look at the error code and ssh error
|
||||||
// separately, but I prefer to stay on the safe side).
|
// separately, but I prefer to stay on the safe side).
|
||||||
ginkgo.By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
|
ginkgo.By(fmt.Sprintf("Unblock network traffic from node %s to the control plane", node.Name))
|
||||||
for _, masterAddress := range masterAddresses {
|
for _, instanceAddress := range controlPlaneAddresses {
|
||||||
UnblockNetwork(host, masterAddress)
|
UnblockNetwork(host, instanceAddress)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -964,8 +964,8 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
|
|||||||
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
|
if !e2enode.WaitConditionToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
|
||||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||||
}
|
}
|
||||||
for _, masterAddress := range masterAddresses {
|
for _, instanceAddress := range controlPlaneAddresses {
|
||||||
BlockNetwork(host, masterAddress)
|
BlockNetwork(host, instanceAddress)
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
|
framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
|
||||||
|
@ -1247,11 +1247,11 @@ func getMasterAddresses(c clientset.Interface) (string, string, string) {
|
|||||||
return externalIP, internalIP, hostname
|
return externalIP, internalIP, hostname
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAllMasterAddresses returns all IP addresses on which the kubelet can reach the master.
|
// GetControlPlaneAddresses returns all IP addresses on which the kubelet can reach the control plane.
|
||||||
// It may return internal and external IPs, even if we expect for
|
// It may return internal and external IPs, even if we expect for
|
||||||
// e.g. internal IPs to be used (issue #56787), so that we can be
|
// e.g. internal IPs to be used (issue #56787), so that we can be
|
||||||
// sure to block the master fully during tests.
|
// sure to block the control plane fully during tests.
|
||||||
func GetAllMasterAddresses(c clientset.Interface) []string {
|
func GetControlPlaneAddresses(c clientset.Interface) []string {
|
||||||
externalIP, internalIP, _ := getMasterAddresses(c)
|
externalIP, internalIP, _ := getMasterAddresses(c)
|
||||||
|
|
||||||
ips := sets.NewString()
|
ips := sets.NewString()
|
||||||
|
@ -216,10 +216,10 @@ var _ = SIGDescribe("Firewall rule", func() {
|
|||||||
framework.Failf("did not find any node addresses")
|
framework.Failf("did not find any node addresses")
|
||||||
}
|
}
|
||||||
|
|
||||||
masterAddresses := framework.GetAllMasterAddresses(cs)
|
controlPlaneAddresses := framework.GetControlPlaneAddresses(cs)
|
||||||
for _, masterAddress := range masterAddresses {
|
for _, instanceAddress := range controlPlaneAddresses {
|
||||||
assertNotReachableHTTPTimeout(masterAddress, ports.InsecureKubeControllerManagerPort, firewallTestTCPTimeout)
|
assertNotReachableHTTPTimeout(instanceAddress, ports.InsecureKubeControllerManagerPort, firewallTestTCPTimeout)
|
||||||
assertNotReachableHTTPTimeout(masterAddress, kubeschedulerconfig.DefaultInsecureSchedulerPort, firewallTestTCPTimeout)
|
assertNotReachableHTTPTimeout(instanceAddress, kubeschedulerconfig.DefaultInsecureSchedulerPort, firewallTestTCPTimeout)
|
||||||
}
|
}
|
||||||
assertNotReachableHTTPTimeout(nodeAddr, ports.KubeletPort, firewallTestTCPTimeout)
|
assertNotReachableHTTPTimeout(nodeAddr, ports.KubeletPort, firewallTestTCPTimeout)
|
||||||
assertNotReachableHTTPTimeout(nodeAddr, ports.KubeletReadOnlyPort, firewallTestTCPTimeout)
|
assertNotReachableHTTPTimeout(nodeAddr, ports.KubeletReadOnlyPort, firewallTestTCPTimeout)
|
||||||
|
Loading…
Reference in New Issue
Block a user