Merge pull request #96627 from aojea/hostport

fix e2e conformance test predicates conflict hostport
This commit is contained in:
Kubernetes Prow Robot 2020-11-20 15:03:34 -08:00 committed by GitHub
commit 8095565176
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 128 additions and 34 deletions

View File

@ -63,7 +63,6 @@ go_library(
"//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/google.golang.org/api/compute/v1:go_default_library", "//vendor/google.golang.org/api/compute/v1:go_default_library",
"//vendor/k8s.io/utils/net:go_default_library",
], ],
) )

View File

@ -19,12 +19,15 @@ package scheduling
import ( import (
"context" "context"
"fmt" "fmt"
"net"
"strconv"
"time" "time"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
nodev1 "k8s.io/api/node/v1" nodev1 "k8s.io/api/node/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
utilversion "k8s.io/apimachinery/pkg/util/version" utilversion "k8s.io/apimachinery/pkg/util/version"
@ -37,7 +40,6 @@ import (
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
k8utilnet "k8s.io/utils/net"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -660,6 +662,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ConformanceIt("validates that there is no conflict between pods with same hostPort but different hostIP and protocol", func() { framework.ConformanceIt("validates that there is no conflict between pods with same hostPort but different hostIP and protocol", func() {
nodeName := GetNodeThatCanRunPod(f) nodeName := GetNodeThatCanRunPod(f)
localhost := "127.0.0.1"
if framework.TestContext.ClusterIsIPv6() {
localhost = "::1"
}
hostIP := getNodeHostIP(f, nodeName)
// use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not // use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not
ginkgo.By("Trying to apply a random label on the found node.") ginkgo.By("Trying to apply a random label on the found node.")
@ -674,14 +681,75 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
defer framework.RemoveLabelOffNode(cs, nodeName, k) defer framework.RemoveLabelOffNode(cs, nodeName, k)
port := int32(54321) port := int32(54321)
ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP 127.0.0.1 and expect scheduled", port)) ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP %s and expect scheduled", port, localhost))
createHostPortPodOnNode(f, "pod1", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, true) createHostPortPodOnNode(f, "pod1", ns, localhost, port, v1.ProtocolTCP, nodeSelector, true)
ginkgo.By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP 127.0.0.2 on the node which pod1 resides and expect scheduled", port)) ginkgo.By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP %s on the node which pod1 resides and expect scheduled", port, hostIP))
createHostPortPodOnNode(f, "pod2", ns, "127.0.0.2", port, v1.ProtocolTCP, nodeSelector, true) createHostPortPodOnNode(f, "pod2", ns, hostIP, port, v1.ProtocolTCP, nodeSelector, true)
ginkgo.By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP 127.0.0.2 but use UDP protocol on the node which pod2 resides", port)) ginkgo.By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP %s but use UDP protocol on the node which pod2 resides", port, hostIP))
createHostPortPodOnNode(f, "pod3", ns, "127.0.0.2", port, v1.ProtocolUDP, nodeSelector, true) createHostPortPodOnNode(f, "pod3", ns, hostIP, port, v1.ProtocolUDP, nodeSelector, true)
// check that the port is being actually exposed to each container
// create a pod on the host network in the same node
hostExecPod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "e2e-host-exec",
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
HostNetwork: true,
NodeSelector: nodeSelector,
Containers: []v1.Container{
{
Name: "e2e-host-exec",
Image: imageutils.GetE2EImage(imageutils.Agnhost),
},
},
},
}
f.PodClient().CreateSync(hostExecPod)
// use a 5 seconds timeout per connection
timeout := 5
// IPv6 doesn't NAT from localhost -> localhost, it doesn't have the route_localnet kernel hack, so we need to specify the source IP
cmdPod1 := []string{"/bin/sh", "-c", fmt.Sprintf("curl -g --connect-timeout %v --interface %s http://%s/hostname", timeout, hostIP, net.JoinHostPort(localhost, strconv.Itoa(int(port))))}
cmdPod2 := []string{"/bin/sh", "-c", fmt.Sprintf("curl -g --connect-timeout %v http://%s/hostname", timeout, net.JoinHostPort(hostIP, strconv.Itoa(int(port))))}
cmdPod3 := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vuz -w %v %s %d", timeout, hostIP, port)}
// try 5 times to connect to the exposed ports
success := false
for i := 0; i < 5; i++ {
// check pod1
ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d", hostExecPod.Name, localhost, port))
hostname1, _, err := f.ExecCommandInContainerWithFullOutput(hostExecPod.Name, "e2e-host-exec", cmdPod1...)
if err != nil {
framework.Logf("Can not connect from %s to pod(pod1) to serverIP: %s, port: %d", hostExecPod.Name, localhost, port)
continue
}
// check pod2
ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port))
hostname2, _, err := f.ExecCommandInContainerWithFullOutput(hostExecPod.Name, "e2e-host-exec", cmdPod2...)
if err != nil {
framework.Logf("Can not connect from %s to pod(pod2) to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port)
continue
}
// the hostname returned has to be different because we are exposing the same port to two different pods
if hostname1 == hostname2 {
framework.Logf("pods must have different hostname: pod1 has hostname %s, pod2 has hostname %s", hostname1, hostname2)
continue
}
// check pod3
ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d UDP", hostExecPod.Name, hostIP, port))
_, _, err = f.ExecCommandInContainerWithFullOutput(hostExecPod.Name, "e2e-host-exec", cmdPod3...)
if err != nil {
framework.Logf("Can not connect from %s to pod(pod2) to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port)
continue
}
success = true
}
if !success {
framework.Failf("Failed to connect to exposed host ports")
}
}) })
/* /*
@ -692,7 +760,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
*/ */
framework.ConformanceIt("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func() { framework.ConformanceIt("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func() {
nodeName := GetNodeThatCanRunPod(f) nodeName := GetNodeThatCanRunPod(f)
hostIP := getNodeHostIP(f, nodeName)
// use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not // use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not
ginkgo.By("Trying to apply a random label on the found node.") ginkgo.By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
@ -709,8 +777,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
ginkgo.By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port)) ginkgo.By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port))
createHostPortPodOnNode(f, "pod4", ns, "", port, v1.ProtocolTCP, nodeSelector, true) createHostPortPodOnNode(f, "pod4", ns, "", port, v1.ProtocolTCP, nodeSelector, true)
ginkgo.By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled", port)) ginkgo.By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP %s on the node which pod4 resides and expect not scheduled", port, hostIP))
createHostPortPodOnNode(f, "pod5", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, false) createHostPortPodOnNode(f, "pod5", ns, hostIP, port, v1.ProtocolTCP, nodeSelector, false)
}) })
ginkgo.Context("PodTopologySpread Filtering", func() { ginkgo.Context("PodTopologySpread Filtering", func() {
@ -1011,37 +1079,51 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod
} }
// create pod which using hostport on the specified node according to the nodeSelector // create pod which using hostport on the specified node according to the nodeSelector
// it starts an http server on the exposed port
func createHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, port int32, protocol v1.Protocol, nodeSelector map[string]string, expectScheduled bool) { func createHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, port int32, protocol v1.Protocol, nodeSelector map[string]string, expectScheduled bool) {
hostIP = translateIPv4ToIPv6(hostIP) hostPortPod := &v1.Pod{
createPausePod(f, pausePodConfig{ ObjectMeta: metav1.ObjectMeta{
Name: podName, Name: podName,
Ports: []v1.ContainerPort{
{
HostPort: port,
ContainerPort: 80,
Protocol: protocol,
HostIP: hostIP,
},
}, },
NodeSelector: nodeSelector, Spec: v1.PodSpec{
}) Containers: []v1.Container{
{
Name: "agnhost",
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: []string{"netexec", "--http-port=80", "--udp-port=80"},
Ports: []v1.ContainerPort{
{
HostPort: port,
ContainerPort: 80,
Protocol: protocol,
HostIP: hostIP,
},
},
ReadinessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/hostname",
Port: intstr.IntOrString{
IntVal: int32(80),
},
Scheme: v1.URISchemeHTTP,
},
},
},
},
},
NodeSelector: nodeSelector,
},
}
_, err := f.ClientSet.CoreV1().Pods(ns).Create(context.TODO(), hostPortPod, metav1.CreateOptions{})
framework.ExpectNoError(err)
err := e2epod.WaitForPodNotPending(f.ClientSet, ns, podName) err = e2epod.WaitForPodNotPending(f.ClientSet, ns, podName)
if expectScheduled { if expectScheduled {
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
} }
// translateIPv4ToIPv6 maps an IPv4 address into a valid IPv6 address
// adding the well known prefix "0::ffff:" https://tools.ietf.org/html/rfc2765
// if the ip is IPv4 and the cluster IPFamily is IPv6, otherwise returns the same ip
func translateIPv4ToIPv6(ip string) string {
if framework.TestContext.IPFamily == "ipv6" && ip != "" && !k8utilnet.IsIPv6String(ip) {
ip = "0::ffff:" + ip
}
return ip
}
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods on worker nodes. // GetPodsScheduled returns a number of currently scheduled and not scheduled Pods on worker nodes.
func GetPodsScheduled(workerNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) { func GetPodsScheduled(workerNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
for _, pod := range pods.Items { for _, pod := range pods.Items {
@ -1058,3 +1140,16 @@ func GetPodsScheduled(workerNodes sets.String, pods *v1.PodList) (scheduledPods,
} }
return return
} }
// getNodeHostIP returns the first internal IP on the node matching the main Cluster IP family
func getNodeHostIP(f *framework.Framework, nodeName string) string {
// Get the internal HostIP of the node
family := v1.IPv4Protocol
if framework.TestContext.ClusterIsIPv6() {
family = v1.IPv6Protocol
}
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
ips := e2enode.GetAddressesByTypeAndFamily(node, v1.NodeInternalIP, family)
return ips[0]
}