diff --git a/test/e2e/network/BUILD b/test/e2e/network/BUILD index 6cf2ac7e59f..54bbe373cc0 100644 --- a/test/e2e/network/BUILD +++ b/test/e2e/network/BUILD @@ -82,6 +82,7 @@ go_library( "//test/e2e/framework/skipper:go_default_library", "//test/e2e/framework/ssh:go_default_library", "//test/e2e/network/scale:go_default_library", + "//test/e2e/storage/utils:go_default_library", "//test/utils:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud:go_default_library", diff --git a/test/e2e/network/network_policy.go b/test/e2e/network/network_policy.go index bd9e0243b7d..29a9895ad53 100644 --- a/test/e2e/network/network_policy.go +++ b/test/e2e/network/network_policy.go @@ -19,6 +19,7 @@ package network import ( "context" "encoding/json" + v1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -26,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/kubernetes/test/e2e/framework" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" imageutils "k8s.io/kubernetes/test/utils/image" @@ -1739,9 +1741,48 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() { }) cleanupServerPodAndService(f, podA, serviceA) }) + ginkgo.It("should not allow access by TCP when a policy specifies only SCTP [Feature:NetworkPolicy] [Feature:SCTP]", func() { + ginkgo.By("getting the state of the sctp module on nodes") + nodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) + framework.ExpectNoError(err) + sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(f, nodes) + ginkgo.By("Creating a network policy for the server which allows traffic only via SCTP on port 80.") + protocolSCTP := v1.ProtocolSCTP + policy := &networkingv1.NetworkPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "allow-only-sctp-ingress-on-port-80", + }, + Spec: networkingv1.NetworkPolicySpec{ + // Apply to server + PodSelector: metav1.LabelSelector{ + MatchLabels: map[string]string{ + "pod-name": podServerLabelSelector, + }, + }, + // Allow traffic only via SCTP on port 80 . + Ingress: []networkingv1.NetworkPolicyIngressRule{{ + Ports: []networkingv1.NetworkPolicyPort{{ + Port: &intstr.IntOrString{IntVal: 80}, + Protocol: &protocolSCTP, + }}, + }}, + }, + } + appliedPolicy, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).Create(context.TODO(), policy, metav1.CreateOptions{}) + framework.ExpectNoError(err) + defer cleanupNetworkPolicy(f, appliedPolicy) + + ginkgo.By("Testing pods cannot connect on port 80 anymore when not using SCTP as protocol.") + testCannotConnect(f, f.Namespace, "client-a", service, 80) + + ginkgo.By("validating sctp module is still not loaded") + sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(f, nodes) + if !sctpLoadedAtStart && sctpLoadedAtEnd { + framework.Failf("The state of the sctp module has changed due to the test case") + } + }) }) - }) func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) { diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index c7b09934ca1..61f3468d89b 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -56,6 +56,7 @@ import ( e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" + "k8s.io/kubernetes/test/e2e/storage/utils" testutils "k8s.io/kubernetes/test/utils" imageutils "k8s.io/kubernetes/test/utils/image" gcecloud "k8s.io/legacy-cloud-providers/gce" @@ -3863,3 +3864,211 @@ func getApiserverRestartCount(c clientset.Interface) (int32, error) { } return -1, fmt.Errorf("Failed to find kube-apiserver container in pod") } + +var _ = SIGDescribe("SCTP [Feature:SCTP] [LinuxOnly]", func() { + f := framework.NewDefaultFramework("sctp") + + var cs clientset.Interface + + ginkgo.BeforeEach(func() { + cs = f.ClientSet + }) + + ginkgo.It("should allow creating a basic SCTP service with pod and endpoints", func() { + serviceName := "sctp-endpoint-test" + ns := f.Namespace.Name + jig := e2eservice.NewTestJig(cs, ns, serviceName) + + ginkgo.By("getting the state of the sctp module on nodes") + nodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(f, nodes) + + ginkgo.By("creating service " + serviceName + " in namespace " + ns) + _, err = jig.CreateSCTPServiceWithPort(nil, 5060) + framework.ExpectNoError(err) + defer func() { + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) + }() + + err = e2enetwork.WaitForService(f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout) + framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceName, err)) + + ginkgo.By("validating endpoints do not exist yet") + err = validateEndpointsPorts(cs, ns, serviceName, portsByPodName{}) + framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns) + + ginkgo.By("creating a pod for the service") + names := map[string]bool{} + + name1 := "pod1" + + createPodOrFail(cs, ns, name1, jig.Labels, []v1.ContainerPort{{ContainerPort: 5060, Protocol: v1.ProtocolSCTP}}) + names[name1] = true + defer func() { + for name := range names { + err := cs.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", name, ns) + } + }() + + ginkgo.By("validating endpoints exists") + err = validateEndpointsPorts(cs, ns, serviceName, portsByPodName{name1: {5060}}) + framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns) + + ginkgo.By("deleting the pod") + e2epod.DeletePodOrFail(cs, ns, name1) + delete(names, name1) + ginkgo.By("validating endpoints do not exist anymore") + err = validateEndpointsPorts(cs, ns, serviceName, portsByPodName{}) + framework.ExpectNoError(err, "failed to validate endpoints for service %s in namespace: %s", serviceName, ns) + + ginkgo.By("validating sctp module is still not loaded") + sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(f, nodes) + if !sctpLoadedAtStart && sctpLoadedAtEnd { + framework.Failf("The state of the sctp module has changed due to the test case") + } + }) + + ginkgo.It("should create a Pod with SCTP HostPort", func() { + ginkgo.By("checking whether kubenet is used") + node, err := e2enode.GetRandomReadySchedulableNode(cs) + framework.ExpectNoError(err) + hostExec := utils.NewHostExec(f) + defer hostExec.Cleanup() + cmd := "ps -C kubelet -o cmd= | grep kubenet" + framework.Logf("Executing cmd %q on node %v", cmd, node.Name) + err = hostExec.IssueCommand(cmd, node) + if err != nil { + e2eskipper.Skipf("Interrogation of kubenet usage failed on node %s", node.Name) + } + framework.Logf("kubenet is in use") + + ginkgo.By("getting the state of the sctp module on the selected node") + nodes := &v1.NodeList{} + nodes.Items = append(nodes.Items, *node) + sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(f, nodes) + + ginkgo.By("creating a pod with hostport on the selected node") + podName := "hostport" + + podSpec := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: f.Namespace.Name, + Labels: map[string]string{"app": "hostport-pod"}, + }, + Spec: v1.PodSpec{ + NodeName: node.Name, + Containers: []v1.Container{ + { + Name: "hostport", + Image: imageutils.GetE2EImage(imageutils.Agnhost), + Args: []string{"pause"}, + Ports: []v1.ContainerPort{ + { + Protocol: v1.ProtocolSCTP, + ContainerPort: 5060, + HostPort: 5060, + }, + }, + ImagePullPolicy: "IfNotPresent", + }, + }, + }, + } + + ginkgo.By(fmt.Sprintf("Launching the pod on node %v", node.Name)) + f.PodClient().CreateSync(podSpec) + defer func() { + err := cs.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", podName, f.Namespace.Name) + }() + + ginkgo.By("dumping iptables rules on the node") + cmd = "sudo iptables-save" + framework.Logf("Executing cmd %q on node %v", cmd, node.Name) + result, err := hostExec.IssueCommandWithResult(cmd, node) + if err != nil { + framework.Failf("Interrogation of iptables rules failed on node %v", node.Name) + } + + ginkgo.By("checking that iptables contains the necessary iptables rules") + found := false + for _, line := range strings.Split(result, "\n") { + if strings.Contains(line, "-p sctp") && strings.Contains(line, "--dport 5060") { + found = true + break + } + } + if !found { + framework.Failf("iptables rules are not set for a pod with sctp hostport") + } + ginkgo.By("validating sctp module is still not loaded") + sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(f, nodes) + if !sctpLoadedAtStart && sctpLoadedAtEnd { + framework.Failf("The state of the sctp module has changed due to the test case") + } + }) + ginkgo.It("should create a ClusterIP Service with SCTP ports", func() { + ginkgo.By("checking that kube-proxy is in iptables mode") + if proxyMode, err := proxyMode(f); err != nil { + e2eskipper.Skipf("Couldn't detect KubeProxy mode - skip, %v", err) + } else if proxyMode != "iptables" { + e2eskipper.Skipf("The test doesn't work if kube-proxy is not in iptables mode") + } + + serviceName := "sctp-clusterip" + ns := f.Namespace.Name + jig := e2eservice.NewTestJig(cs, ns, serviceName) + + ginkgo.By("getting the state of the sctp module on nodes") + nodes, err := e2enode.GetReadySchedulableNodes(cs) + framework.ExpectNoError(err) + sctpLoadedAtStart := CheckSCTPModuleLoadedOnNodes(f, nodes) + + ginkgo.By("creating service " + serviceName + " in namespace " + ns) + _, err = jig.CreateSCTPServiceWithPort(func(svc *v1.Service) { + svc.Spec.Type = v1.ServiceTypeClusterIP + svc.Spec.Ports = []v1.ServicePort{{Protocol: v1.ProtocolSCTP, Port: 5060}} + }, 5060) + framework.ExpectNoError(err) + defer func() { + err := cs.CoreV1().Services(ns).Delete(context.TODO(), serviceName, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns) + }() + + err = e2enetwork.WaitForService(f.ClientSet, ns, serviceName, true, 5*time.Second, e2eservice.TestTimeout) + framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceName, err)) + + ginkgo.By("dumping iptables rules on a node") + hostExec := utils.NewHostExec(f) + defer hostExec.Cleanup() + node, err := e2enode.GetRandomReadySchedulableNode(cs) + framework.ExpectNoError(err) + cmd := "sudo iptables-save" + framework.Logf("Executing cmd %q on node %v", cmd, node.Name) + result, err := hostExec.IssueCommandWithResult(cmd, node) + if err != nil { + framework.Failf("Interrogation of iptables rules failed on node %v", node.Name) + } + + ginkgo.By("checking that iptables contains the necessary iptables rules") + kubeService := false + for _, line := range strings.Split(result, "\n") { + if strings.Contains(line, "-A KUBE-SERVICES") && strings.Contains(line, "-p sctp") { + kubeService = true + break + } + } + if !kubeService { + framework.Failf("iptables rules are not set for a clusterip service with sctp ports") + } + ginkgo.By("validating sctp module is still not loaded") + sctpLoadedAtEnd := CheckSCTPModuleLoadedOnNodes(f, nodes) + if !sctpLoadedAtStart && sctpLoadedAtEnd { + framework.Failf("The state of the sctp module has changed due to the test case") + } + }) +}) diff --git a/test/e2e/network/util.go b/test/e2e/network/util.go index 11022359e1b..13acfd6d38b 100644 --- a/test/e2e/network/util.go +++ b/test/e2e/network/util.go @@ -19,13 +19,16 @@ package network import ( "bytes" "fmt" + "regexp" + "strings" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" + "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -76,3 +79,30 @@ func newAgnhostPod(name string, args ...string) *v1.Pod { }, } } + +// CheckSCTPModuleLoadedOnNodes checks whether any node on the list has the +// sctp.ko module loaded +// For security reasons, and also to allow clusters to use userspace SCTP implementations, +// we require that just creating an SCTP Pod/Service/NetworkPolicy must not do anything +// that would cause the sctp kernel module to be loaded. +func CheckSCTPModuleLoadedOnNodes(f *framework.Framework, nodes *v1.NodeList) bool { + hostExec := utils.NewHostExec(f) + defer hostExec.Cleanup() + re := regexp.MustCompile(`^\s*sctp\s+`) + cmd := "lsmod | grep sctp" + for _, node := range nodes.Items { + framework.Logf("Executing cmd %q on node %v", cmd, node.Name) + result, err := hostExec.IssueCommandWithResult(cmd, &node) + if err != nil { + framework.Logf("sctp module is not loaded or error occurred while executing command %s on node: %v", cmd, err) + } + for _, line := range strings.Split(result, "\n") { + if found := re.Find([]byte(line)); found != nil { + framework.Logf("the sctp module is loaded on node: %v", node.Name) + return true + } + } + framework.Logf("the sctp module is not loaded on node: %v", node.Name) + } + return false +}