diff --git a/test/e2e/scheduling/BUILD b/test/e2e/scheduling/BUILD index 8d45ca50e1b..40fe5e3e0e2 100644 --- a/test/e2e/scheduling/BUILD +++ b/test/e2e/scheduling/BUILD @@ -63,7 +63,6 @@ go_library( "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/stretchr/testify/assert:go_default_library", "//vendor/google.golang.org/api/compute/v1:go_default_library", - "//vendor/k8s.io/utils/net:go_default_library", ], ) diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 8ee72fd2f4a..6cadb51f0c6 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -19,12 +19,15 @@ package scheduling import ( "context" "fmt" + "net" + "strconv" "time" v1 "k8s.io/api/core/v1" nodev1 "k8s.io/api/node/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" utilversion "k8s.io/apimachinery/pkg/util/version" @@ -37,7 +40,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" - k8utilnet "k8s.io/utils/net" "github.com/onsi/ginkgo" @@ -660,6 +662,11 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { framework.ConformanceIt("validates that there is no conflict between pods with same hostPort but different hostIP and protocol", func() { nodeName := GetNodeThatCanRunPod(f) + localhost := "127.0.0.1" + if framework.TestContext.ClusterIsIPv6() { + localhost = "::1" + } + hostIP := getNodeHostIP(f, nodeName) // use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not ginkgo.By("Trying to apply a random label on the found node.") @@ -674,14 +681,75 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { defer framework.RemoveLabelOffNode(cs, nodeName, k) port := int32(54321) - ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP 127.0.0.1 and expect scheduled", port)) - createHostPortPodOnNode(f, "pod1", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, true) + ginkgo.By(fmt.Sprintf("Trying to create a pod(pod1) with hostport %v and hostIP %s and expect scheduled", port, localhost)) + createHostPortPodOnNode(f, "pod1", ns, localhost, port, v1.ProtocolTCP, nodeSelector, true) - ginkgo.By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP 127.0.0.2 on the node which pod1 resides and expect scheduled", port)) - createHostPortPodOnNode(f, "pod2", ns, "127.0.0.2", port, v1.ProtocolTCP, nodeSelector, true) + ginkgo.By(fmt.Sprintf("Trying to create another pod(pod2) with hostport %v but hostIP %s on the node which pod1 resides and expect scheduled", port, hostIP)) + createHostPortPodOnNode(f, "pod2", ns, hostIP, port, v1.ProtocolTCP, nodeSelector, true) - ginkgo.By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP 127.0.0.2 but use UDP protocol on the node which pod2 resides", port)) - createHostPortPodOnNode(f, "pod3", ns, "127.0.0.2", port, v1.ProtocolUDP, nodeSelector, true) + ginkgo.By(fmt.Sprintf("Trying to create a third pod(pod3) with hostport %v, hostIP %s but use UDP protocol on the node which pod2 resides", port, hostIP)) + createHostPortPodOnNode(f, "pod3", ns, hostIP, port, v1.ProtocolUDP, nodeSelector, true) + + // check that the port is being actually exposed to each container + // create a pod on the host network in the same node + hostExecPod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "e2e-host-exec", + Namespace: f.Namespace.Name, + }, + Spec: v1.PodSpec{ + HostNetwork: true, + NodeSelector: nodeSelector, + Containers: []v1.Container{ + { + Name: "e2e-host-exec", + Image: imageutils.GetE2EImage(imageutils.Agnhost), + }, + }, + }, + } + f.PodClient().CreateSync(hostExecPod) + + // use a 5 seconds timeout per connection + timeout := 5 + // IPv6 doesn't NAT from localhost -> localhost, it doesn't have the route_localnet kernel hack, so we need to specify the source IP + cmdPod1 := []string{"/bin/sh", "-c", fmt.Sprintf("curl -g --connect-timeout %v --interface %s http://%s/hostname", timeout, hostIP, net.JoinHostPort(localhost, strconv.Itoa(int(port))))} + cmdPod2 := []string{"/bin/sh", "-c", fmt.Sprintf("curl -g --connect-timeout %v http://%s/hostname", timeout, net.JoinHostPort(hostIP, strconv.Itoa(int(port))))} + cmdPod3 := []string{"/bin/sh", "-c", fmt.Sprintf("nc -vuz -w %v %s %d", timeout, hostIP, port)} + // try 5 times to connect to the exposed ports + success := false + for i := 0; i < 5; i++ { + // check pod1 + ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d", hostExecPod.Name, localhost, port)) + hostname1, _, err := f.ExecCommandInContainerWithFullOutput(hostExecPod.Name, "e2e-host-exec", cmdPod1...) + if err != nil { + framework.Logf("Can not connect from %s to pod(pod1) to serverIP: %s, port: %d", hostExecPod.Name, localhost, port) + continue + } + // check pod2 + ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port)) + hostname2, _, err := f.ExecCommandInContainerWithFullOutput(hostExecPod.Name, "e2e-host-exec", cmdPod2...) + if err != nil { + framework.Logf("Can not connect from %s to pod(pod2) to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port) + continue + } + // the hostname returned has to be different because we are exposing the same port to two different pods + if hostname1 == hostname2 { + framework.Logf("pods must have different hostname: pod1 has hostname %s, pod2 has hostname %s", hostname1, hostname2) + continue + } + // check pod3 + ginkgo.By(fmt.Sprintf("checking connectivity from pod %s to serverIP: %s, port: %d UDP", hostExecPod.Name, hostIP, port)) + _, _, err = f.ExecCommandInContainerWithFullOutput(hostExecPod.Name, "e2e-host-exec", cmdPod3...) + if err != nil { + framework.Logf("Can not connect from %s to pod(pod2) to serverIP: %s, port: %d", hostExecPod.Name, hostIP, port) + continue + } + success = true + } + if !success { + framework.Failf("Failed to connect to exposed host ports") + } }) /* @@ -692,7 +760,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { */ framework.ConformanceIt("validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP", func() { nodeName := GetNodeThatCanRunPod(f) - + hostIP := getNodeHostIP(f, nodeName) // use nodeSelector to make sure the testing pods get assigned on the same node to explicitly verify there exists conflict or not ginkgo.By("Trying to apply a random label on the found node.") k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID())) @@ -709,8 +777,8 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() { ginkgo.By(fmt.Sprintf("Trying to create a pod(pod4) with hostport %v and hostIP 0.0.0.0(empty string here) and expect scheduled", port)) createHostPortPodOnNode(f, "pod4", ns, "", port, v1.ProtocolTCP, nodeSelector, true) - ginkgo.By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP 127.0.0.1 on the node which pod4 resides and expect not scheduled", port)) - createHostPortPodOnNode(f, "pod5", ns, "127.0.0.1", port, v1.ProtocolTCP, nodeSelector, false) + ginkgo.By(fmt.Sprintf("Trying to create another pod(pod5) with hostport %v but hostIP %s on the node which pod4 resides and expect not scheduled", port, hostIP)) + createHostPortPodOnNode(f, "pod5", ns, hostIP, port, v1.ProtocolTCP, nodeSelector, false) }) ginkgo.Context("PodTopologySpread Filtering", func() { @@ -1011,37 +1079,51 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod } // create pod which using hostport on the specified node according to the nodeSelector +// it starts an http server on the exposed port func createHostPortPodOnNode(f *framework.Framework, podName, ns, hostIP string, port int32, protocol v1.Protocol, nodeSelector map[string]string, expectScheduled bool) { - hostIP = translateIPv4ToIPv6(hostIP) - createPausePod(f, pausePodConfig{ - Name: podName, - Ports: []v1.ContainerPort{ - { - HostPort: port, - ContainerPort: 80, - Protocol: protocol, - HostIP: hostIP, - }, + hostPortPod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, }, - NodeSelector: nodeSelector, - }) + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "agnhost", + Image: imageutils.GetE2EImage(imageutils.Agnhost), + Args: []string{"netexec", "--http-port=80", "--udp-port=80"}, + Ports: []v1.ContainerPort{ + { + HostPort: port, + ContainerPort: 80, + Protocol: protocol, + HostIP: hostIP, + }, + }, + ReadinessProbe: &v1.Probe{ + Handler: v1.Handler{ + HTTPGet: &v1.HTTPGetAction{ + Path: "/hostname", + Port: intstr.IntOrString{ + IntVal: int32(80), + }, + Scheme: v1.URISchemeHTTP, + }, + }, + }, + }, + }, + NodeSelector: nodeSelector, + }, + } + _, err := f.ClientSet.CoreV1().Pods(ns).Create(context.TODO(), hostPortPod, metav1.CreateOptions{}) + framework.ExpectNoError(err) - err := e2epod.WaitForPodNotPending(f.ClientSet, ns, podName) + err = e2epod.WaitForPodNotPending(f.ClientSet, ns, podName) if expectScheduled { framework.ExpectNoError(err) } } -// translateIPv4ToIPv6 maps an IPv4 address into a valid IPv6 address -// adding the well known prefix "0::ffff:" https://tools.ietf.org/html/rfc2765 -// if the ip is IPv4 and the cluster IPFamily is IPv6, otherwise returns the same ip -func translateIPv4ToIPv6(ip string) string { - if framework.TestContext.IPFamily == "ipv6" && ip != "" && !k8utilnet.IsIPv6String(ip) { - ip = "0::ffff:" + ip - } - return ip -} - // GetPodsScheduled returns a number of currently scheduled and not scheduled Pods on worker nodes. func GetPodsScheduled(workerNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) { for _, pod := range pods.Items { @@ -1058,3 +1140,16 @@ func GetPodsScheduled(workerNodes sets.String, pods *v1.PodList) (scheduledPods, } return } + +// getNodeHostIP returns the first internal IP on the node matching the main Cluster IP family +func getNodeHostIP(f *framework.Framework, nodeName string) string { + // Get the internal HostIP of the node + family := v1.IPv4Protocol + if framework.TestContext.ClusterIsIPv6() { + family = v1.IPv6Protocol + } + node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) + framework.ExpectNoError(err) + ips := e2enode.GetAddressesByTypeAndFamily(node, v1.NodeInternalIP, family) + return ips[0] +}