Merge pull request #121588 from vlasebian/fix-issue-121209

Fix flaky test for services that shouldn't be available when PublishNotReadyAddresses is false
This commit is contained in:
Kubernetes Prow Robot 2023-10-31 13:14:06 +01:00 committed by GitHub
commit d337523d62
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -2118,23 +2118,35 @@ var _ = common.SIGDescribe("Services", func() {
if err != nil { if err != nil {
framework.Failf("error waiting for pod %s to be unready %v", webserverPod0.Name, err) framework.Failf("error waiting for pod %s to be unready %v", webserverPod0.Name, err)
} }
// Wait the change has been propagated and the service start to fail
clusterIPAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort)) nodeIPs0 := e2enode.GetAddresses(&node0, v1.NodeInternalIP)
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, clusterIPAddress) nodeIPs1 := e2enode.GetAddresses(&node1, v1.NodeInternalIP)
if pollErr := wait.PollImmediate(framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, func() (bool, error) { nodePortAddress0 := net.JoinHostPort(nodeIPs0[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
nodePortAddress1 := net.JoinHostPort(nodeIPs1[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
// Wait until the change has been propagated to both nodes.
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress0)
if pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, true, func(_ context.Context) (bool, error) {
_, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd) _, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
if err != nil { if err != nil {
return true, nil return true, nil
} }
return false, nil return false, nil
}); pollErr != nil { }); pollErr != nil {
framework.ExpectNoError(pollErr, "service still serves traffic") framework.ExpectNoError(pollErr, "pod on node0 still serves traffic")
}
cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress1)
if pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, e2eservice.KubeProxyEndpointLagTimeout, true, func(_ context.Context) (bool, error) {
_, err := e2eoutput.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
if err != nil {
return true, nil
}
return false, nil
}); pollErr != nil {
framework.ExpectNoError(pollErr, "pod on node1 still serves traffic")
} }
nodeIPs0 := e2enode.GetAddresses(&node0, v1.NodeInternalIP) clusterIPAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
nodeIPs1 := e2enode.GetAddresses(&node1, v1.NodeInternalIP)
nodePortAddress0 := net.JoinHostPort(nodeIPs0[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
nodePortAddress1 := net.JoinHostPort(nodeIPs1[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
// connect 3 times every 5 seconds to the Service and expect a failure // connect 3 times every 5 seconds to the Service and expect a failure
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, clusterIPAddress) cmd = fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, clusterIPAddress)