mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #97897 from knabben/sctp-netpol-test
Copying SCTP tests to Netpol e2e framework
This commit is contained in:
commit
97cf67b329
@ -24,6 +24,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
|
"//test/e2e/framework/skipper:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
"//vendor/github.com/pkg/errors:go_default_library",
|
"//vendor/github.com/pkg/errors:go_default_library",
|
||||||
|
@ -270,6 +270,7 @@ func (c *Container) Spec() v1.Container {
|
|||||||
var (
|
var (
|
||||||
// agnHostImage is the image URI of AgnHost
|
// agnHostImage is the image URI of AgnHost
|
||||||
agnHostImage = imageutils.GetE2EImage(imageutils.Agnhost)
|
agnHostImage = imageutils.GetE2EImage(imageutils.Agnhost)
|
||||||
|
env = []v1.EnvVar{}
|
||||||
cmd []string
|
cmd []string
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -279,15 +280,21 @@ func (c *Container) Spec() v1.Container {
|
|||||||
case v1.ProtocolUDP:
|
case v1.ProtocolUDP:
|
||||||
cmd = []string{"/agnhost", "serve-hostname", "--udp", "--http=false", "--port", fmt.Sprintf("%d", c.Port)}
|
cmd = []string{"/agnhost", "serve-hostname", "--udp", "--http=false", "--port", fmt.Sprintf("%d", c.Port)}
|
||||||
case v1.ProtocolSCTP:
|
case v1.ProtocolSCTP:
|
||||||
cmd = []string{"/agnhost", "netexec", "--sctp-port", fmt.Sprintf("%d", c.Port)}
|
env = append(env, v1.EnvVar{
|
||||||
|
Name: fmt.Sprintf("SERVE_SCTP_PORT_%d", c.Port),
|
||||||
|
Value: "foo",
|
||||||
|
})
|
||||||
|
cmd = []string{"/agnhost", "porter"}
|
||||||
default:
|
default:
|
||||||
framework.Failf("invalid protocol %v", c.Protocol)
|
framework.Failf("invalid protocol %v", c.Protocol)
|
||||||
}
|
}
|
||||||
|
|
||||||
return v1.Container{
|
return v1.Container{
|
||||||
Name: c.Name(),
|
Name: c.Name(),
|
||||||
ImagePullPolicy: v1.PullIfNotPresent,
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
Image: agnHostImage,
|
Image: agnHostImage,
|
||||||
Command: cmd,
|
Command: cmd,
|
||||||
|
Env: env,
|
||||||
SecurityContext: &v1.SecurityContext{},
|
SecurityContext: &v1.SecurityContext{},
|
||||||
Ports: []v1.ContainerPort{
|
Ports: []v1.ContainerPort{
|
||||||
{
|
{
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
networkingv1 "k8s.io/api/networking/v1"
|
networkingv1 "k8s.io/api/networking/v1"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
utilnet "k8s.io/utils/net"
|
utilnet "k8s.io/utils/net"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -35,7 +36,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
addSCTPContainers = false
|
|
||||||
isVerbose = true
|
isVerbose = true
|
||||||
|
|
||||||
// useFixedNamespaces is useful when working on these tests: instead of creating new pods and
|
// useFixedNamespaces is useful when working on these tests: instead of creating new pods and
|
||||||
@ -54,6 +54,15 @@ const (
|
|||||||
ignoreLoopback = true
|
ignoreLoopback = true
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
protocolTCP = v1.ProtocolTCP
|
||||||
|
protocolUDP = v1.ProtocolUDP
|
||||||
|
protocolSCTP = v1.ProtocolSCTP
|
||||||
|
|
||||||
|
// addSCTPContainers is a flag to enable SCTP containers on bootstrap.
|
||||||
|
addSCTPContainers = false
|
||||||
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
You might be wondering, why are there multiple namespaces used for each test case?
|
You might be wondering, why are there multiple namespaces used for each test case?
|
||||||
|
|
||||||
@ -117,26 +126,7 @@ var _ = SIGDescribeCopy("Netpol [LinuxOnly]", func() {
|
|||||||
|
|
||||||
ginkgo.Context("NetworkPolicy between server and client", func() {
|
ginkgo.Context("NetworkPolicy between server and client", func() {
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
if useFixedNamespaces {
|
initializeResourcesByFixedNS(f)
|
||||||
_ = initializeResources(f)
|
|
||||||
|
|
||||||
_, _, _, model, k8s := getK8SModel(f)
|
|
||||||
framework.ExpectNoError(k8s.cleanNetworkPolicies(model.NamespaceNames), "unable to clean network policies")
|
|
||||||
err := wait.Poll(waitInterval, waitTimeout, func() (done bool, err error) {
|
|
||||||
for _, ns := range model.NamespaceNames {
|
|
||||||
netpols, err := k8s.clientSet.NetworkingV1().NetworkPolicies(ns).List(context.TODO(), metav1.ListOptions{})
|
|
||||||
framework.ExpectNoError(err, "get network policies from ns %s", ns)
|
|
||||||
if len(netpols.Items) > 0 {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true, nil
|
|
||||||
})
|
|
||||||
framework.ExpectNoError(err, "unable to wait for network policy deletion")
|
|
||||||
} else {
|
|
||||||
framework.Logf("Using %v as the default dns domain for this cluster... ", framework.TestContext.ClusterDNSDomain)
|
|
||||||
framework.ExpectNoError(initializeResources(f), "unable to initialize resources")
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
@ -349,7 +339,7 @@ var _ = SIGDescribeCopy("Netpol [LinuxOnly]", func() {
|
|||||||
"ns": nsY,
|
"ns": nsY,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
allowPort81Policy := GetAllowIngressByNamespaceAndPort("allow-client-a-via-ns-selector", map[string]string{"pod": "a"}, allowedLabels, &intstr.IntOrString{IntVal: 81})
|
allowPort81Policy := GetAllowIngressByNamespaceAndPort("allow-client-a-via-ns-selector", map[string]string{"pod": "a"}, allowedLabels, &intstr.IntOrString{IntVal: 81}, &protocolTCP)
|
||||||
CreatePolicy(k8s, allowPort81Policy, nsX)
|
CreatePolicy(k8s, allowPort81Policy, nsX)
|
||||||
|
|
||||||
reachability := NewReachability(model.AllPods(), true)
|
reachability := NewReachability(model.AllPods(), true)
|
||||||
@ -368,7 +358,7 @@ var _ = SIGDescribeCopy("Netpol [LinuxOnly]", func() {
|
|||||||
"ns": nsY,
|
"ns": nsY,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
allowPort81Policy := GetAllowIngressByNamespaceAndPort("allow-client-a-via-ns-selector", map[string]string{"pod": "a"}, allowedLabels, &intstr.IntOrString{IntVal: 81})
|
allowPort81Policy := GetAllowIngressByNamespaceAndPort("allow-client-a-via-ns-selector", map[string]string{"pod": "a"}, allowedLabels, &intstr.IntOrString{IntVal: 81}, &protocolTCP)
|
||||||
CreatePolicy(k8s, allowPort81Policy, nsX)
|
CreatePolicy(k8s, allowPort81Policy, nsX)
|
||||||
|
|
||||||
reachabilityALLOW := NewReachability(model.AllPods(), true)
|
reachabilityALLOW := NewReachability(model.AllPods(), true)
|
||||||
@ -385,7 +375,7 @@ var _ = SIGDescribeCopy("Netpol [LinuxOnly]", func() {
|
|||||||
ginkgo.By("Verifying traffic on port 80.")
|
ginkgo.By("Verifying traffic on port 80.")
|
||||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityDENY})
|
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityDENY})
|
||||||
|
|
||||||
allowPort80Policy := GetAllowIngressByNamespaceAndPort("allow-client-a-via-ns-selector-80", map[string]string{"pod": "a"}, allowedLabels, &intstr.IntOrString{IntVal: 80})
|
allowPort80Policy := GetAllowIngressByNamespaceAndPort("allow-client-a-via-ns-selector-80", map[string]string{"pod": "a"}, allowedLabels, &intstr.IntOrString{IntVal: 80}, &protocolTCP)
|
||||||
CreatePolicy(k8s, allowPort80Policy, nsX)
|
CreatePolicy(k8s, allowPort80Policy, nsX)
|
||||||
|
|
||||||
ginkgo.By("Verifying that we can add a policy to unblock port 80")
|
ginkgo.By("Verifying that we can add a policy to unblock port 80")
|
||||||
@ -427,7 +417,7 @@ var _ = SIGDescribeCopy("Netpol [LinuxOnly]", func() {
|
|||||||
"ns": nsY,
|
"ns": nsY,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
policy := GetAllowIngressByNamespaceAndPort("allow-client-a-via-ns-selector-80", map[string]string{"pod": "a"}, allowedLabels, &intstr.IntOrString{Type: intstr.String, StrVal: "serve-80-tcp"})
|
policy := GetAllowIngressByNamespaceAndPort("allow-client-a-via-ns-selector-80", map[string]string{"pod": "a"}, allowedLabels, &intstr.IntOrString{Type: intstr.String, StrVal: "serve-80-tcp"}, &protocolTCP)
|
||||||
CreatePolicy(k8s, policy, nsX)
|
CreatePolicy(k8s, policy, nsX)
|
||||||
|
|
||||||
reachability := NewReachability(model.AllPods(), true)
|
reachability := NewReachability(model.AllPods(), true)
|
||||||
@ -570,7 +560,7 @@ var _ = SIGDescribeCopy("Netpol [LinuxOnly]", func() {
|
|||||||
allowedPodLabels := &metav1.LabelSelector{MatchLabels: map[string]string{"pod": "b"}}
|
allowedPodLabels := &metav1.LabelSelector{MatchLabels: map[string]string{"pod": "b"}}
|
||||||
policy := GetAllowIngressByPod("allow-client-a-via-pod-selector", map[string]string{"pod": "a"}, allowedPodLabels)
|
policy := GetAllowIngressByPod("allow-client-a-via-pod-selector", map[string]string{"pod": "a"}, allowedPodLabels)
|
||||||
// add an egress rule on to it...
|
// add an egress rule on to it...
|
||||||
protocolUDP := v1.ProtocolUDP
|
|
||||||
policy.Spec.Egress = []networkingv1.NetworkPolicyEgressRule{
|
policy.Spec.Egress = []networkingv1.NetworkPolicyEgressRule{
|
||||||
{
|
{
|
||||||
Ports: []networkingv1.NetworkPolicyPort{
|
Ports: []networkingv1.NetworkPolicyPort{
|
||||||
@ -926,6 +916,81 @@ var _ = SIGDescribeCopy("Netpol [LinuxOnly]", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
var _ = SIGDescribeCopy("Netpol [Feature:SCTPConnectivity][LinuxOnly][Disruptive]", func() {
|
||||||
|
f := framework.NewDefaultFramework("sctp-network-policy")
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
// Windows does not support network policies.
|
||||||
|
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("NetworkPolicy between server and client using SCTP", func() {
|
||||||
|
ginkgo.BeforeEach(func() {
|
||||||
|
addSCTPContainers = true
|
||||||
|
initializeResourcesByFixedNS(f)
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.AfterEach(func() {
|
||||||
|
if !useFixedNamespaces {
|
||||||
|
_, _, _, model, k8s := getK8SModel(f)
|
||||||
|
framework.ExpectNoError(k8s.deleteNamespaces(model.NamespaceNames), "unable to clean up SCTP netpol namespaces")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func() {
|
||||||
|
nsX, _, _, model, k8s := getK8SModel(f)
|
||||||
|
policy := GetDenyIngress("deny-all")
|
||||||
|
CreatePolicy(k8s, policy, nsX)
|
||||||
|
|
||||||
|
reachability := NewReachability(model.AllPods(), true)
|
||||||
|
reachability.ExpectPeer(&Peer{}, &Peer{Namespace: nsX}, false)
|
||||||
|
|
||||||
|
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolSCTP, Reachability: reachability})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() {
|
||||||
|
ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)")
|
||||||
|
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
|
||||||
|
allowedLabels := &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"ns": nsY,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
allowPort81Policy := GetAllowIngressByNamespaceAndPort("allow-ingress-on-port-81-ns-x", map[string]string{"pod": "a"}, allowedLabels, &intstr.IntOrString{IntVal: 81}, &protocolSCTP)
|
||||||
|
CreatePolicy(k8s, allowPort81Policy, nsX)
|
||||||
|
|
||||||
|
reachability := NewReachability(model.AllPods(), true)
|
||||||
|
reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{Namespace: nsX, Pod: "a"}, false)
|
||||||
|
reachability.ExpectPeer(&Peer{Namespace: nsZ}, &Peer{Namespace: nsX, Pod: "a"}, false)
|
||||||
|
|
||||||
|
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolSCTP, Reachability: reachability})
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() {
|
||||||
|
nsX, nsY, _, model, k8s := getK8SModel(f)
|
||||||
|
allowedNamespaces := &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"ns": nsY,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
allowedPods := &metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{
|
||||||
|
"pod": "a",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
policy := GetAllowIngressByNamespaceAndPod("allow-ns-y-pod-a-via-namespace-pod-selector", map[string]string{"pod": "a"}, allowedNamespaces, allowedPods)
|
||||||
|
CreatePolicy(k8s, policy, nsX)
|
||||||
|
|
||||||
|
reachability := NewReachability(model.AllPods(), true)
|
||||||
|
reachability.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||||
|
reachability.Expect(NewPodString(nsY, "a"), NewPodString(nsX, "a"), true)
|
||||||
|
|
||||||
|
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolSCTP, Reachability: reachability})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
// getNamespaces returns the canonical set of namespaces used by this test, taking a root ns as input. This allows this test to run in parallel.
|
// getNamespaces returns the canonical set of namespaces used by this test, taking a root ns as input. This allows this test to run in parallel.
|
||||||
func getNamespaces(rootNs string) (string, string, string, []string) {
|
func getNamespaces(rootNs string) (string, string, string, []string) {
|
||||||
if useFixedNamespaces {
|
if useFixedNamespaces {
|
||||||
@ -961,6 +1026,30 @@ func getK8SModel(f *framework.Framework) (string, string, string, *Model, *kubeM
|
|||||||
return nsX, nsY, nsZ, model, k8s
|
return nsX, nsY, nsZ, model, k8s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// initializeResourcesByFixedNS uses the e2e framework to create all necessary namespace resources, cleaning up
|
||||||
|
// network policies from the namespace if useFixedNamespace is set true, avoiding policies overlap of new tests.
|
||||||
|
func initializeResourcesByFixedNS(f *framework.Framework) {
|
||||||
|
if useFixedNamespaces {
|
||||||
|
_ = initializeResources(f)
|
||||||
|
_, _, _, model, k8s := getK8SModel(f)
|
||||||
|
framework.ExpectNoError(k8s.cleanNetworkPolicies(model.NamespaceNames), "unable to clean network policies")
|
||||||
|
err := wait.Poll(waitInterval, waitTimeout, func() (done bool, err error) {
|
||||||
|
for _, ns := range model.NamespaceNames {
|
||||||
|
netpols, err := k8s.clientSet.NetworkingV1().NetworkPolicies(ns).List(context.TODO(), metav1.ListOptions{})
|
||||||
|
framework.ExpectNoError(err, "get network policies from ns %s", ns)
|
||||||
|
if len(netpols.Items) > 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
framework.ExpectNoError(err, "unable to wait for network policy deletion")
|
||||||
|
} else {
|
||||||
|
framework.Logf("Using %v as the default dns domain for this cluster... ", framework.TestContext.ClusterDNSDomain)
|
||||||
|
framework.ExpectNoError(initializeResources(f), "unable to initialize resources")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// initializeResources uses the e2e framework to create all necessary namespace resources, based on the network policy
|
// initializeResources uses the e2e framework to create all necessary namespace resources, based on the network policy
|
||||||
// model derived from the framework. It then waits for the resources described by the model to be up and running
|
// model derived from the framework. It then waits for the resources described by the model to be up and running
|
||||||
// (i.e. all pods are ready and running in their namespaces).
|
// (i.e. all pods are ready and running in their namespaces).
|
||||||
|
@ -236,8 +236,8 @@ func GetAllowIngressByNamespace(name string, targetLabels map[string]string, pee
|
|||||||
return policy
|
return policy
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAllowIngressByNamespaceAndPort allows ingress for namespace AND port
|
// GetAllowIngressByNamespaceAndPort allows ingress for namespace AND port AND protocol
|
||||||
func GetAllowIngressByNamespaceAndPort(name string, targetLabels map[string]string, peerNamespaceSelector *metav1.LabelSelector, port *intstr.IntOrString) *networkingv1.NetworkPolicy {
|
func GetAllowIngressByNamespaceAndPort(name string, targetLabels map[string]string, peerNamespaceSelector *metav1.LabelSelector, port *intstr.IntOrString, protocol *v1.Protocol) *networkingv1.NetworkPolicy {
|
||||||
policy := &networkingv1.NetworkPolicy{
|
policy := &networkingv1.NetworkPolicy{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
@ -251,7 +251,10 @@ func GetAllowIngressByNamespaceAndPort(name string, targetLabels map[string]stri
|
|||||||
NamespaceSelector: peerNamespaceSelector,
|
NamespaceSelector: peerNamespaceSelector,
|
||||||
}},
|
}},
|
||||||
Ports: []networkingv1.NetworkPolicyPort{
|
Ports: []networkingv1.NetworkPolicyPort{
|
||||||
{Port: port},
|
{
|
||||||
|
Port: port,
|
||||||
|
Protocol: protocol,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
|
Loading…
Reference in New Issue
Block a user