mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 10:20:51 +00:00
test/e2e/network: add e2e tests for ProxyTerminatingEndpoints
Signed-off-by: Andrew Sy Kim <andrewsy@google.com>
This commit is contained in:
parent
53439020a4
commit
01c178c9de
@ -52,6 +52,7 @@ import (
|
||||
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
netutils "k8s.io/utils/net"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
@ -2309,6 +2310,456 @@ var _ = common.SIGDescribe("Services", func() {
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should fail health check node port if there are only terminating endpoints [Feature:ProxyTerminatingEndpoints]", func() {
|
||||
// windows kube-proxy does not support this feature yet
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
|
||||
// This behavior is not supported if Kube-proxy is in "userspace" mode.
|
||||
// So we check the kube-proxy mode and skip this test if that's the case.
|
||||
if proxyMode, err := proxyMode(f); err == nil {
|
||||
if proxyMode == "userspace" {
|
||||
e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode")
|
||||
}
|
||||
} else {
|
||||
framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err)
|
||||
}
|
||||
|
||||
nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2)
|
||||
framework.ExpectNoError(err)
|
||||
nodeCounts := len(nodes.Items)
|
||||
if nodeCounts < 2 {
|
||||
e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
|
||||
}
|
||||
node0 := nodes.Items[0]
|
||||
|
||||
serviceName := "svc-proxy-terminating"
|
||||
ns := f.Namespace.Name
|
||||
servicePort := 80
|
||||
|
||||
ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)},
|
||||
}
|
||||
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
||||
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
|
||||
webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "600")
|
||||
webserverPod0.Labels = jig.Labels
|
||||
webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600)
|
||||
e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
|
||||
|
||||
_, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})
|
||||
|
||||
pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
|
||||
e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})
|
||||
|
||||
pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
|
||||
nodeIPs := e2enode.GetAddresses(&node0, v1.NodeInternalIP)
|
||||
healthCheckNodePortAddr := net.JoinHostPort(nodeIPs[0], strconv.Itoa(int(svc.Spec.HealthCheckNodePort)))
|
||||
// validate that the health check node port from kube-proxy returns 200 when there are ready endpoints
|
||||
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
cmd := fmt.Sprintf(`curl -s -o /dev/null -w "%%{http_code}" --connect-timeout 5 http://%s/healthz`, healthCheckNodePortAddr)
|
||||
out, err := framework.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
expectedOut := "200"
|
||||
if out != expectedOut {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// webserver should continue to serve traffic through the Service after deletion, even though the health check node port should return 503
|
||||
ginkgo.By("Terminating the webserver pod")
|
||||
err = cs.CoreV1().Pods(ns).Delete(context.TODO(), webserverPod0.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// validate that the health check node port from kube-proxy returns 503 when there are no ready endpoints
|
||||
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
|
||||
cmd := fmt.Sprintf(`curl -s -o /dev/null -w "%%{http_code}" --connect-timeout 5 http://%s/healthz`, healthCheckNodePortAddr)
|
||||
out, err := framework.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
expectedOut := "503"
|
||||
if out != expectedOut {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// also verify that while health check node port indicates 0 endpoints and returns 503, the endpoint still serves traffic.
|
||||
nodePortAddress := net.JoinHostPort(nodeIPs[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
|
||||
execHostnameTest(*pausePod0, nodePortAddress, webserverPod0.Name)
|
||||
})
|
||||
|
||||
ginkgo.It("should fallback to terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Cluster [Feature:ProxyTerminatingEndpoints]", func() {
|
||||
// windows kube-proxy does not support this feature yet
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
|
||||
// This behavior is not supported if Kube-proxy is in "userspace" mode.
|
||||
// So we check the kube-proxy mode and skip this test if that's the case.
|
||||
if proxyMode, err := proxyMode(f); err == nil {
|
||||
if proxyMode == "userspace" {
|
||||
e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode")
|
||||
}
|
||||
} else {
|
||||
framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err)
|
||||
}
|
||||
|
||||
nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2)
|
||||
framework.ExpectNoError(err)
|
||||
nodeCounts := len(nodes.Items)
|
||||
if nodeCounts < 2 {
|
||||
e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
|
||||
}
|
||||
node0 := nodes.Items[0]
|
||||
node1 := nodes.Items[1]
|
||||
|
||||
serviceName := "svc-proxy-terminating"
|
||||
ns := f.Namespace.Name
|
||||
servicePort := 80
|
||||
|
||||
ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)},
|
||||
}
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
|
||||
webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "600")
|
||||
webserverPod0.Labels = jig.Labels
|
||||
webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600)
|
||||
e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
|
||||
|
||||
_, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})
|
||||
|
||||
ginkgo.By("Creating 2 pause pods that will try to connect to the webservers")
|
||||
pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
|
||||
e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})
|
||||
|
||||
pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
|
||||
pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
|
||||
e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})
|
||||
|
||||
pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
|
||||
// webserver should continue to serve traffic through the Service after delete since:
|
||||
// - it has a 600s termination grace period
|
||||
// - it is the only ready endpoint
|
||||
err = cs.CoreV1().Pods(ns).Delete(context.TODO(), webserverPod0.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// assert 5 times that both the local and remote pod can connect to the Service while all endpoints are terminating
|
||||
serviceAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
|
||||
for i := 0; i < 5; i++ {
|
||||
// There's a Service with internalTrafficPolicy=Cluster,
|
||||
// with a single endpoint (which is terminating) called webserver0 running on node0.
|
||||
// pausePod0 and pausePod1 are on node0 and node1 respectively.
|
||||
// pausePod0 -> Service clusterIP succeeds because traffic policy is "Cluster"
|
||||
// pausePod1 -> Service clusterIP succeeds because traffic policy is "Cluster"
|
||||
execHostnameTest(*pausePod0, serviceAddress, webserverPod0.Name)
|
||||
execHostnameTest(*pausePod1, serviceAddress, webserverPod0.Name)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should fallback to local terminating endpoints when there are no ready endpoints with internalTrafficPolicy=Local [Feature:ProxyTerminatingEndpoints]", func() {
|
||||
// windows kube-proxy does not support this feature yet
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
|
||||
// This behavior is not supported if Kube-proxy is in "userspace" mode.
|
||||
// So we check the kube-proxy mode and skip this test if that's the case.
|
||||
if proxyMode, err := proxyMode(f); err == nil {
|
||||
if proxyMode == "userspace" {
|
||||
e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode")
|
||||
}
|
||||
} else {
|
||||
framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err)
|
||||
}
|
||||
|
||||
nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2)
|
||||
framework.ExpectNoError(err)
|
||||
nodeCounts := len(nodes.Items)
|
||||
if nodeCounts < 2 {
|
||||
e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
|
||||
}
|
||||
node0 := nodes.Items[0]
|
||||
node1 := nodes.Items[1]
|
||||
|
||||
serviceName := "svc-proxy-terminating"
|
||||
ns := f.Namespace.Name
|
||||
servicePort := 80
|
||||
|
||||
ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
local := v1.ServiceInternalTrafficPolicyLocal
|
||||
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)},
|
||||
}
|
||||
svc.Spec.InternalTrafficPolicy = &local
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
|
||||
webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "600")
|
||||
webserverPod0.Labels = jig.Labels
|
||||
webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600)
|
||||
e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
|
||||
|
||||
_, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})
|
||||
|
||||
ginkgo.By("Creating 2 pause pods that will try to connect to the webservers")
|
||||
pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
|
||||
e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})
|
||||
|
||||
pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
|
||||
pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
|
||||
e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})
|
||||
|
||||
pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
|
||||
// webserver should continue to serve traffic through the Service after delete since:
|
||||
// - it has a 600s termination grace period
|
||||
// - it is the only ready endpoint
|
||||
err = cs.CoreV1().Pods(ns).Delete(context.TODO(), webserverPod0.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout
|
||||
serviceAddress := net.JoinHostPort(svc.Spec.ClusterIP, strconv.Itoa(servicePort))
|
||||
for i := 0; i < 5; i++ {
|
||||
// There's a Service with internalTrafficPolicy=Local,
|
||||
// with a single endpoint (which is terminating) called webserver0 running on node0.
|
||||
// pausePod0 and pausePod1 are on node0 and node1 respectively.
|
||||
// pausePod0 -> Service clusterIP succeeds because webserver0 is running on node0 and traffic policy is "Local"
|
||||
// pausePod1 -> Service clusterIP fails because webserver0 is on a different node and traffic policy is "Local"
|
||||
execHostnameTest(*pausePod0, serviceAddress, webserverPod0.Name)
|
||||
|
||||
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, serviceAddress)
|
||||
_, err := framework.RunHostCmd(pausePod1.Namespace, pausePod1.Name, cmd)
|
||||
framework.ExpectError(err, "expected error when trying to connect to cluster IP")
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should fallback to terminating endpoints when there are no ready endpoints with externallTrafficPolicy=Cluster [Feature:ProxyTerminatingEndpoints]", func() {
|
||||
// windows kube-proxy does not support this feature yet
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
|
||||
// This behavior is not supported if Kube-proxy is in "userspace" mode.
|
||||
// So we check the kube-proxy mode and skip this test if that's the case.
|
||||
if proxyMode, err := proxyMode(f); err == nil {
|
||||
if proxyMode == "userspace" {
|
||||
e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode")
|
||||
}
|
||||
} else {
|
||||
framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err)
|
||||
}
|
||||
|
||||
nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2)
|
||||
framework.ExpectNoError(err)
|
||||
nodeCounts := len(nodes.Items)
|
||||
if nodeCounts < 2 {
|
||||
e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
|
||||
}
|
||||
node0 := nodes.Items[0]
|
||||
node1 := nodes.Items[1]
|
||||
|
||||
serviceName := "svc-proxy-terminating"
|
||||
ns := f.Namespace.Name
|
||||
servicePort := 80
|
||||
|
||||
ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)},
|
||||
}
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
|
||||
webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "600")
|
||||
webserverPod0.Labels = jig.Labels
|
||||
webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600)
|
||||
e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
|
||||
|
||||
_, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})
|
||||
|
||||
ginkgo.By("Creating 2 pause pods that will try to connect to the webservers")
|
||||
pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
|
||||
e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})
|
||||
|
||||
pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
|
||||
pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
|
||||
e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})
|
||||
|
||||
pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
|
||||
// webserver should continue to serve traffic through the Service after delete since:
|
||||
// - it has a 600s termination grace period
|
||||
// - it is the only ready endpoint
|
||||
err = cs.CoreV1().Pods(ns).Delete(context.TODO(), webserverPod0.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// assert 5 times that both the local and remote pod can connect to the Service NodePort while all endpoints are terminating
|
||||
nodeIPs := e2enode.GetAddresses(&node0, v1.NodeInternalIP)
|
||||
nodePortAddress := net.JoinHostPort(nodeIPs[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
|
||||
for i := 0; i < 5; i++ {
|
||||
// There's a Service Type=NodePort with externalTrafficPolicy=Cluster,
|
||||
// with a single endpoint (which is terminating) called webserver0 running on node0.
|
||||
// pausePod0 and pausePod1 are on node0 and node1 respectively.
|
||||
// pausePod0 -> node0 node port succeeds because webserver0 is running on node0 and traffic policy is "Cluster"
|
||||
// pausePod1 -> node0 node port succeeds because webserver0 is running on node0 and traffic policy is "Cluster"
|
||||
execHostnameTest(*pausePod0, nodePortAddress, webserverPod0.Name)
|
||||
execHostnameTest(*pausePod1, nodePortAddress, webserverPod0.Name)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should fallback to local terminating endpoints when there are no ready endpoints with externalTrafficPolicy=Local [Feature:ProxyTerminatingEndpoints]", func() {
|
||||
// windows kube-proxy does not support this feature yet
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
|
||||
// This behavior is not supported if Kube-proxy is in "userspace" mode.
|
||||
// So we check the kube-proxy mode and skip this test if that's the case.
|
||||
if proxyMode, err := proxyMode(f); err == nil {
|
||||
if proxyMode == "userspace" {
|
||||
e2eskipper.Skipf("The test doesn't work with kube-proxy in userspace mode")
|
||||
}
|
||||
} else {
|
||||
framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err)
|
||||
}
|
||||
|
||||
nodes, err := e2enode.GetBoundedReadySchedulableNodes(cs, 2)
|
||||
framework.ExpectNoError(err)
|
||||
nodeCounts := len(nodes.Items)
|
||||
if nodeCounts < 2 {
|
||||
e2eskipper.Skipf("The test requires at least two ready nodes on %s, but found %v", framework.TestContext.Provider, nodeCounts)
|
||||
}
|
||||
node0 := nodes.Items[0]
|
||||
node1 := nodes.Items[1]
|
||||
|
||||
serviceName := "svc-proxy-terminating"
|
||||
ns := f.Namespace.Name
|
||||
servicePort := 80
|
||||
|
||||
ginkgo.By("creating a TCP service " + serviceName + " where all pods are terminating" + ns)
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
svc, err := jig.CreateTCPService(func(svc *v1.Service) {
|
||||
svc.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(80)},
|
||||
}
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Creating 1 webserver pod to be part of the TCP service")
|
||||
webserverPod0 := e2epod.NewAgnhostPod(ns, "echo-hostname-0", nil, nil, nil, "netexec", "--http-port", strconv.Itoa(servicePort), "--delay-shutdown", "600")
|
||||
webserverPod0.Labels = jig.Labels
|
||||
webserverPod0.Spec.TerminationGracePeriodSeconds = utilpointer.Int64(600)
|
||||
e2epod.SetNodeSelection(&webserverPod0.Spec, e2epod.NodeSelection{Name: node0.Name})
|
||||
|
||||
_, err = cs.CoreV1().Pods(ns).Create(context.TODO(), webserverPod0, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, webserverPod0.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
validateEndpointsPortsOrFail(cs, ns, serviceName, portsByPodName{webserverPod0.Name: {servicePort}})
|
||||
|
||||
ginkgo.By("Creating 2 pause pods that will try to connect to the webservers")
|
||||
pausePod0 := e2epod.NewAgnhostPod(ns, "pause-pod-0", nil, nil, nil)
|
||||
e2epod.SetNodeSelection(&pausePod0.Spec, e2epod.NodeSelection{Name: node0.Name})
|
||||
|
||||
pausePod0, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod0, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod0.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
|
||||
pausePod1 := e2epod.NewAgnhostPod(ns, "pause-pod-1", nil, nil, nil)
|
||||
e2epod.SetNodeSelection(&pausePod1.Spec, e2epod.NodeSelection{Name: node1.Name})
|
||||
|
||||
pausePod1, err = cs.CoreV1().Pods(ns).Create(context.TODO(), pausePod1, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(f.ClientSet, pausePod1.Name, f.Namespace.Name, framework.PodStartTimeout))
|
||||
|
||||
// webserver should continue to serve traffic through the Service after delete since:
|
||||
// - it has a 600s termination grace period
|
||||
// - it is the only ready endpoint
|
||||
err = cs.CoreV1().Pods(ns).Delete(context.TODO(), webserverPod0.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// assert 5 times that the first pause pod can connect to the Service locally and the second one errors with a timeout
|
||||
nodeIPs0 := e2enode.GetAddresses(&node0, v1.NodeInternalIP)
|
||||
nodeIPs1 := e2enode.GetAddresses(&node1, v1.NodeInternalIP)
|
||||
nodePortAddress0 := net.JoinHostPort(nodeIPs0[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
|
||||
nodePortAddress1 := net.JoinHostPort(nodeIPs1[0], strconv.Itoa(int(svc.Spec.Ports[0].NodePort)))
|
||||
for i := 0; i < 5; i++ {
|
||||
// There's a Service Type=NodePort with externalTrafficPolicy=Local,
|
||||
// with a single endpoint (which is terminating) called webserver0 running on node0.
|
||||
// pausePod0 and pausePod1 are on node0 and node1 respectively.
|
||||
// pausePod0 -> node1 node port fails because it's "external" and there are no local endpoints
|
||||
// pausePod1 -> node0 node port succeeds because webserver0 is running on node0
|
||||
// pausePod0 -> node0 and pausePod1 -> node1 both succeed because pod-to-same-node-NodePort
|
||||
// connections are neither internal nor external and always get Cluster traffic policy.
|
||||
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 5 %s/hostname`, nodePortAddress1)
|
||||
_, err := framework.RunHostCmd(pausePod0.Namespace, pausePod0.Name, cmd)
|
||||
framework.ExpectError(err, "expected error when trying to connect to node port for pausePod0")
|
||||
|
||||
execHostnameTest(*pausePod0, nodePortAddress0, webserverPod0.Name)
|
||||
execHostnameTest(*pausePod1, nodePortAddress0, webserverPod0.Name)
|
||||
execHostnameTest(*pausePod1, nodePortAddress1, webserverPod0.Name)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.18
|
||||
Testname: Find Kubernetes Service in default Namespace
|
||||
|
Loading…
Reference in New Issue
Block a user