Using ServiceIPs instead of DNS names in the NetworkPolicy Probes + adding Interface decoupling (#102354)

* Squashed commit of the following:

commit 7f774dcb54b511a3956aed0fac5c803f145e383a
Author: Jay Vyas (jayunit100) <jvyas@vmware.com>
Date:   Fri Jun 18 10:58:16 2021 +0000

    fix commit message

commit 0ac09650742f02004dbb227310057ea3760c4da9
Author: jay vyas <jvyas@vmware.com>
Date:   Thu Jun 17 07:50:33 2021 -0400

    Update test/e2e/network/netpol/kubemanager.go

    Co-authored-by: Antonio Ojea <antonio.ojea.garcia@gmail.com>

commit 6a8bf0a6a2690dac56fec2bdcdce929311c513ca
Author: jay vyas <jvyas@vmware.com>
Date:   Sun Jun 13 08:17:25 2021 -0400

    Implement Service polling for network policy suite to remove reliance on CoreDNS when verifying network policys

    Update test/e2e/network/netpol/probe.go

    Co-authored-by: Antonio Ojea <antonio.ojea.garcia@gmail.com>

    Add deafultNS to use service probe

commit b9c17a48327aab35a855540c2294a51137aa4a48
Author: Matthew Fenwick <mfenwick100@gmail.com>
Date:   Thu May 27 07:30:59 2021 -0400

    address code review comments for networkpolicy decoupling from dns

commit e23ef6ff0d189cf2ed80dbafed9881d68402cb56
Author: jay vyas <jvyas@vmware.com>
Date:   Wed May 26 13:30:21 2021 -0400

    NetworkPolicy decoupling from DNS

gofmt

remove old function

* model refactor

* minor

* dropped getK8sModel func

* dropped modelMap, added global model in BeforeEach and subsequent changes

Co-authored-by: Rajas Kakodkar <rajaskakodkar16@gmail.com>
This commit is contained in:
jay vyas 2021-07-20 02:09:32 -04:00 committed by GitHub
parent e847b849c4
commit 59c0523bca
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 183 additions and 147 deletions

View File

@ -22,7 +22,6 @@ import (
"net"
"strconv"
"strings"
"time"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -70,10 +69,14 @@ func (k *kubeManager) initializeCluster(model *Model) error {
}
createdPods = append(createdPods, kubePod)
_, err = k.createService(pod.Service())
svc, err := k.createService(pod.Service())
if err != nil {
return err
}
if net.ParseIP(svc.Spec.ClusterIP) == nil {
return fmt.Errorf("empty IP address found for service %s/%s", svc.Namespace, svc.Name)
}
pod.ServiceIP = svc.Spec.ClusterIP
}
}
@ -110,9 +113,14 @@ func (k *kubeManager) getPod(ns string, name string) (*v1.Pod, error) {
return kubePod, nil
}
// probeConnectivity execs into a pod and checks its connectivity to another pod..
// probeConnectivity execs into a pod and checks its connectivity to another pod.
// Implements the Prober interface.
func (k *kubeManager) probeConnectivity(nsFrom string, podFrom string, containerFrom string, addrTo string, protocol v1.Protocol, toPort int, timeoutSeconds int) (bool, string, error) {
port := strconv.Itoa(toPort)
if addrTo == "" {
return false, "no IP provided", fmt.Errorf("empty addrTo field")
}
framework.Logf("Starting probe from pod %v to %v", podFrom, addrTo)
var cmd []string
timeout := fmt.Sprintf("--timeout=%vs", timeoutSeconds)
@ -260,45 +268,3 @@ func (k *kubeManager) deleteNamespaces(namespaces []string) error {
}
return nil
}
// waitForHTTPServers waits for all webservers to be up, on all protocols sent in the input, and then validates them using the same probe logic as the rest of the suite.
func (k *kubeManager) waitForHTTPServers(model *Model) error {
const maxTries = 10
framework.Logf("waiting for HTTP servers (ports 80 and 81) to become ready")
testCases := map[string]*TestCase{}
for _, port := range model.Ports {
// Protocols is provided as input so that we can skip udp polling for windows
for _, protocol := range model.Protocols {
fromPort := 81
desc := fmt.Sprintf("%d->%d,%s", fromPort, port, protocol)
testCases[desc] = &TestCase{ToPort: int(port), Protocol: protocol}
}
}
notReady := map[string]bool{}
for caseName := range testCases {
notReady[caseName] = true
}
for i := 0; i < maxTries; i++ {
for caseName, testCase := range testCases {
if notReady[caseName] {
reachability := NewReachability(model.AllPods(), true)
testCase.Reachability = reachability
ProbePodToPodConnectivity(k, model, testCase)
_, wrong, _, _ := reachability.Summary(ignoreLoopback)
if wrong == 0 {
framework.Logf("server %s is ready", caseName)
delete(notReady, caseName)
} else {
framework.Logf("server %s is not ready", caseName)
}
}
}
if len(notReady) == 0 {
return nil
}
time.Sleep(waitInterval)
}
return fmt.Errorf("after %d tries, %d HTTP servers are not ready", maxTries, len(notReady))
}

View File

@ -45,24 +45,6 @@ func NewWindowsModel(namespaces []string, podNames []string, ports []int32, dnsD
return NewModel(namespaces, podNames, ports, []v1.Protocol{v1.ProtocolTCP}, dnsDomain)
}
// GetProbeTimeoutSeconds returns a timeout for how long the probe should work before failing a check, and takes windows heuristics into account, where requests can take longer sometimes.
func (m *Model) GetProbeTimeoutSeconds() int {
timeoutSeconds := 1
if framework.NodeOSDistroIs("windows") {
timeoutSeconds = 3
}
return timeoutSeconds
}
// GetWorkers returns the number of workers suggested to run when testing, taking windows heuristics into account, where parallel probing is flakier.
func (m *Model) GetWorkers() int {
numberOfWorkers := 3
if framework.NodeOSDistroIs("windows") {
numberOfWorkers = 1 // See https://github.com/kubernetes/kubernetes/pull/97690
}
return numberOfWorkers
}
// NewModel instantiates a model based on:
// - namespaces
// - pods
@ -106,6 +88,24 @@ func NewModel(namespaces []string, podNames []string, ports []int32, protocols [
return model
}
// GetProbeTimeoutSeconds returns a timeout for how long the probe should work before failing a check, and takes windows heuristics into account, where requests can take longer sometimes.
func (m *Model) GetProbeTimeoutSeconds() int {
timeoutSeconds := 1
if framework.NodeOSDistroIs("windows") {
timeoutSeconds = 3
}
return timeoutSeconds
}
// GetWorkers returns the number of workers suggested to run when testing, taking windows heuristics into account, where parallel probing is flakier.
func (m *Model) GetWorkers() int {
numberOfWorkers := 3
if framework.NodeOSDistroIs("windows") {
numberOfWorkers = 1 // See https://github.com/kubernetes/kubernetes/pull/97690
}
return numberOfWorkers
}
// NewReachability instantiates a default-true reachability from the model's pods
func (m *Model) NewReachability() *Reachability {
return NewReachability(m.AllPods(), true)
@ -180,6 +180,7 @@ type Pod struct {
Namespace string
Name string
Containers []*Container
ServiceIP string
}
// PodString returns a corresponding pod string

View File

@ -119,21 +119,22 @@ and what is happening in practice:
var _ = common.SIGDescribe("Netpol", func() {
f := framework.NewDefaultFramework("netpol")
var model *Model
ginkgo.Context("NetworkPolicy between server and client", func() {
ginkgo.BeforeEach(func() {
initializeResourcesByFixedNS(f)
model = initializeResourcesByFixedNS(f)
})
ginkgo.AfterEach(func() {
if !useFixedNamespaces {
_, _, _, model, k8s := getK8SModel(f)
k8s := newKubeManager(f)
framework.ExpectNoError(k8s.deleteNamespaces(model.NamespaceNames), "unable to clean up netpol namespaces")
}
})
ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func() {
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
policy := GenNetworkPolicyWithNameAndPodSelector("deny-ingress", metav1.LabelSelector{}, SetSpecIngressRules())
CreatePolicy(k8s, policy, nsX)
@ -148,7 +149,7 @@ var _ = common.SIGDescribe("Netpol", func() {
egressRule.Ports = append(egressRule.Ports, networkingv1.NetworkPolicyPort{Protocol: &protocolUDP, Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53}})
policy := GenNetworkPolicyWithNameAndPodSelector("deny-all-tcp-allow-dns", metav1.LabelSelector{}, SetSpecIngressRules(), SetSpecEgressRules(egressRule))
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
CreatePolicy(k8s, policy, nsX)
reachability := NewReachability(model.AllPods(), true)
@ -168,7 +169,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{PodSelector: &allowedPods})
policy := GenNetworkPolicyWithNameAndPodMatchLabel("x-a-allows-x-b", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule))
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
CreatePolicy(k8s, policy, nsX)
reachability := NewReachability(model.AllPods(), true)
@ -179,8 +180,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should enforce policy to allow ingress traffic for a target [Feature:NetworkPolicy] ", func() {
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
ginkgo.By("having a deny all ingress policy", func() {
// Deny all Ingress traffic policy to pods on namespace nsX
policy := GenNetworkPolicyWithNameAndPodSelector("deny-all", metav1.LabelSelector{}, SetSpecIngressRules())
@ -202,8 +202,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should enforce policy to allow ingress traffic from pods in all namespaces [Feature:NetworkPolicy]", func() {
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
ingressRule := networkingv1.NetworkPolicyIngressRule{}
ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{}}})
policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-from-another-ns", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule))
@ -214,7 +213,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy]", func() {
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
nsX, nsY, nsZ, k8s := getK8sNamespaces(f)
ingressRule := networkingv1.NetworkPolicyIngressRule{}
ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"ns": nsY}}})
policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-client-a-via-ns-selector", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule))
@ -240,7 +239,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{PodSelector: &allowedPods})
policy := GenNetworkPolicyWithNameAndPodMatchLabel("x-a-allows-x-b", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule))
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
CreatePolicy(k8s, policy, nsX)
reachability := NewReachability(model.AllPods(), true)
@ -251,7 +250,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy]", func() {
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
nsX, nsY, nsZ, k8s := getK8sNamespaces(f)
allowedNamespaces := &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{
Key: "ns",
@ -273,7 +272,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy]", func() {
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
allowedNamespaces := &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{
Key: "ns",
@ -299,7 +298,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() {
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
nsX, nsY, nsZ, k8s := getK8sNamespaces(f)
allowedNamespaces := &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{
Key: "ns",
@ -326,7 +325,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy]", func() {
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
nsX, nsY, nsZ, k8s := getK8sNamespaces(f)
allowedNamespaces := &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{
Key: "ns",
@ -356,8 +355,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should enforce policy based on any PodSelectors [Feature:NetworkPolicy]", func() {
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
ingressRule := networkingv1.NetworkPolicyIngressRule{}
for _, label := range []map[string]string{{"pod": "b"}, {"pod": "c"}} {
ingressRule.From = append(ingressRule.From, networkingv1.NetworkPolicyPeer{PodSelector: &metav1.LabelSelector{MatchLabels: label}})
@ -376,7 +374,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() {
nsX, nsY, _, model, k8s := getK8SModel(f)
nsX, nsY, _, k8s := getK8sNamespaces(f)
allowedNamespaces := &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns": nsY,
@ -401,7 +399,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() {
ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)")
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
nsX, nsY, nsZ, k8s := getK8sNamespaces(f)
allowedLabels := &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns": nsY,
@ -423,7 +421,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ginkgo.It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() {
ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)")
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
nsX, nsY, nsZ, k8s := getK8sNamespaces(f)
allowedLabels := &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns": nsY,
@ -463,7 +461,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ginkgo.It("should support allow-all policy [Feature:NetworkPolicy]", func() {
ginkgo.By("Creating a network policy which allows all traffic.")
policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-all", map[string]string{}, SetSpecIngressRules(networkingv1.NetworkPolicyIngressRule{}))
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
CreatePolicy(k8s, policy, nsX)
ginkgo.By("Testing pods can connect to both ports when an 'allow-all' policy is present.")
@ -476,7 +474,7 @@ var _ = common.SIGDescribe("Netpol", func() {
IngressRules := networkingv1.NetworkPolicyIngressRule{}
IngressRules.Ports = append(IngressRules.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{Type: intstr.String, StrVal: "serve-81-tcp"}})
policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-all", map[string]string{}, SetSpecIngressRules(IngressRules))
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
CreatePolicy(k8s, policy, nsX)
ginkgo.By("Blocking all ports other then 81 in the entire namespace")
@ -491,7 +489,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should allow ingress access from namespace on one named port [Feature:NetworkPolicy]", func() {
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
nsX, nsY, nsZ, k8s := getK8sNamespaces(f)
allowedLabels := &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns": nsY,
@ -524,7 +522,7 @@ var _ = common.SIGDescribe("Netpol", func() {
egressRule.Ports = append(egressRule.Ports, networkingv1.NetworkPolicyPort{Protocol: &protocolUDP, Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53}})
policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-egress", map[string]string{}, SetSpecEgressRules(egressRule))
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
CreatePolicy(k8s, policy, nsX)
reachabilityPort80 := NewReachability(model.AllPods(), true)
@ -540,7 +538,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ginkgo.By("Using the simplest possible mutation: start with allow all, then switch to deny all")
// part 1) allow all
policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-all-mutate-to-deny-all", map[string]string{}, SetSpecIngressRules(networkingv1.NetworkPolicyIngressRule{}))
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
CreatePolicy(k8s, policy, nsX)
reachability := NewReachability(model.AllPods(), true)
@ -556,7 +554,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should allow ingress access from updated namespace [Feature:NetworkPolicy]", func() {
nsX, nsY, _, model, k8s := getK8SModel(f)
nsX, nsY, _, k8s := getK8sNamespaces(f)
defer ResetNamespaceLabels(k8s, nsY)
allowedLabels := &metav1.LabelSelector{
@ -588,7 +586,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should allow ingress access from updated pod [Feature:NetworkPolicy]", func() {
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
podXB, err := model.FindPod(nsX, "b")
framework.ExpectNoError(err, "find pod x/b")
defer ResetPodLabels(k8s, podXB)
@ -617,7 +615,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should deny ingress from pods on other namespaces [Feature:NetworkPolicy]", func() {
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
nsX, nsY, nsZ, k8s := getK8sNamespaces(f)
IngressRules := networkingv1.NetworkPolicyIngressRule{}
IngressRules.From = append(IngressRules.From, networkingv1.NetworkPolicyPeer{PodSelector: &metav1.LabelSelector{MatchLabels: map[string]string{}}})
@ -632,7 +630,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should deny ingress access to updated pod [Feature:NetworkPolicy]", func() {
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
podXA, err := model.FindPod(nsX, "a")
framework.ExpectNoError(err, "find pod x/a")
defer ResetPodLabels(k8s, podXA)
@ -653,7 +651,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should deny egress from pods based on PodSelector [Feature:NetworkPolicy] ", func() {
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
policy := GenNetworkPolicyWithNameAndPodSelector("deny-egress-pod-a", metav1.LabelSelector{MatchLabels: map[string]string{"pod": "a"}}, SetSpecEgressRules())
CreatePolicy(k8s, policy, nsX)
@ -664,7 +662,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should deny egress from all pods in a namespace [Feature:NetworkPolicy] ", func() {
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
policy := GenNetworkPolicyWithNameAndPodSelector("deny-egress-ns-x", metav1.LabelSelector{}, SetSpecEgressRules())
CreatePolicy(k8s, policy, nsX)
@ -696,7 +694,7 @@ var _ = common.SIGDescribe("Netpol", func() {
},
}
policy.Spec.PolicyTypes = []networkingv1.PolicyType{networkingv1.PolicyTypeEgress, networkingv1.PolicyTypeIngress}
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
CreatePolicy(k8s, policy, nsX)
reachabilityPort80 := NewReachability(model.AllPods(), true)
@ -719,7 +717,7 @@ var _ = common.SIGDescribe("Netpol", func() {
// Ingress on y/a and y/b allow traffic from x/a
// Expectation: traffic from x/a to y/a allowed only, traffic from x/a to y/b denied by egress policy
nsX, nsY, _, model, k8s := getK8SModel(f)
nsX, nsY, _, k8s := getK8sNamespaces(f)
// Building egress policy for x/a to y/a only
allowedEgressNamespaces := &metav1.LabelSelector{
@ -809,7 +807,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() {
nsX, nsY, _, model, k8s := getK8SModel(f)
nsX, nsY, _, k8s := getK8sNamespaces(f)
allowedNamespaces := &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns": nsY,
@ -834,8 +832,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should enforce ingress policy allowing any port traffic to a server on a specific protocol [Feature:NetworkPolicy] [Feature:UDP]", func() {
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
ingressRule := networkingv1.NetworkPolicyIngressRule{}
ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Protocol: &protocolTCP})
policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ingress-by-proto", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule))
@ -850,7 +847,7 @@ var _ = common.SIGDescribe("Netpol", func() {
})
ginkgo.It("should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]", func() {
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
IngressRules := networkingv1.NetworkPolicyIngressRule{}
IngressRules.Ports = append(IngressRules.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 80}})
policyAllowOnlyPort80 := GenNetworkPolicyWithNameAndPodMatchLabel("allow-ingress-port-80", map[string]string{}, SetSpecIngressRules(IngressRules))
@ -876,7 +873,7 @@ var _ = common.SIGDescribe("Netpol", func() {
egressRule.Ports = append(egressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 80}})
egressRule.Ports = append(egressRule.Ports, networkingv1.NetworkPolicyPort{Protocol: &protocolUDP, Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53}})
policyAllowPort80 := GenNetworkPolicyWithNameAndPodMatchLabel("allow-egress-port-80", map[string]string{}, SetSpecEgressRules(egressRule))
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
CreatePolicy(k8s, policyAllowPort80, nsX)
ginkgo.By("Making sure ingress doesn't work other than port 80")
@ -899,7 +896,7 @@ var _ = common.SIGDescribe("Netpol", func() {
// Deny all traffic into and out of "x".
policy := GenNetworkPolicyWithNameAndPodSelector("deny-all", metav1.LabelSelector{}, SetSpecIngressRules(), SetSpecEgressRules())
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
CreatePolicy(k8s, policy, nsX)
reachability := NewReachability(model.AllPods(), true)
@ -921,7 +918,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ginkgo.It("should allow egress access to server in CIDR block [Feature:NetworkPolicy]", func() {
// Getting podServer's status to get podServer's IP, to create the CIDR
nsX, nsY, _, model, k8s := getK8SModel(f)
nsX, nsY, _, k8s := getK8sNamespaces(f)
podList, err := f.ClientSet.CoreV1().Pods(nsY).List(context.TODO(), metav1.ListOptions{LabelSelector: "pod=b"})
framework.ExpectNoError(err, "Failing to list pods in namespace y")
pod := podList.Items[0]
@ -947,7 +944,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ginkgo.It("should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]", func() {
// Getting podServer's status to get podServer's IP, to create the CIDR with except clause
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
podList, err := f.ClientSet.CoreV1().Pods(nsX).List(context.TODO(), metav1.ListOptions{LabelSelector: "pod=a"})
framework.ExpectNoError(err, "Failing to find pod x/a")
podA := podList.Items[0]
@ -980,7 +977,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ginkgo.It("should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]", func() {
// Getting podServer's status to get podServer's IP, to create the CIDR with except clause
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
podList, err := f.ClientSet.CoreV1().Pods(nsX).List(context.TODO(), metav1.ListOptions{LabelSelector: "pod=a"})
framework.ExpectNoError(err, "Failing to find pod x/a")
podA := podList.Items[0]
@ -1043,7 +1040,7 @@ var _ = common.SIGDescribe("Netpol", func() {
allowEgressPolicy := GenNetworkPolicyWithNameAndPodSelector("allow-egress-for-target",
metav1.LabelSelector{MatchLabels: targetLabels}, SetSpecEgressRules(networkingv1.NetworkPolicyEgressRule{}))
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
CreatePolicy(k8s, allowEgressPolicy, nsX)
allowEgressReachability := NewReachability(model.AllPods(), true)
@ -1063,7 +1060,8 @@ var _ = common.SIGDescribe("Netpol", func() {
// security hole if you fail this test, because you are allowing TCP
// traffic that is supposed to be blocked.
ginkgo.It("should not mistakenly treat 'protocol: SCTP' as 'protocol: TCP', even if the plugin doesn't support SCTP [Feature:NetworkPolicy]", func() {
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
ginkgo.By("Creating a default-deny ingress policy.")
// Empty podSelector blocks the entire namespace
policy := GenNetworkPolicyWithNameAndPodSelector("deny-ingress", metav1.LabelSelector{}, SetSpecIngressRules())
@ -1089,7 +1087,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ingressRule := networkingv1.NetworkPolicyIngressRule{}
ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{IntVal: 80}, Protocol: &protocolSCTP})
policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-only-sctp-ingress-on-port-80", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule))
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
CreatePolicy(k8s, policy, nsX)
ginkgo.By("Trying to connect to TCP port 81, which should be blocked by implicit isolation.")
@ -1102,7 +1100,7 @@ var _ = common.SIGDescribe("Netpol", func() {
ingressRule := networkingv1.NetworkPolicyIngressRule{}
ingressRule.Ports = append(ingressRule.Ports, networkingv1.NetworkPolicyPort{Port: &intstr.IntOrString{IntVal: 81}, Protocol: &protocolUDP})
policy := GenNetworkPolicyWithNameAndPodMatchLabel("allow-only-udp-ingress-on-port-81", map[string]string{"pod": "a"}, SetSpecIngressRules(ingressRule))
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
CreatePolicy(k8s, policy, nsX)
ginkgo.By("Creating a network policy for the server which allows traffic only via UDP on port 81.")
@ -1113,8 +1111,9 @@ var _ = common.SIGDescribe("Netpol", func() {
ValidateOrFail(k8s, model, &TestCase{ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability})
})
// Note that this default ns functionality is maintained by the APIMachinery group, but we test it here anyways because its an important feature.
ginkgo.It("should enforce policy to allow traffic based on NamespaceSelector with MatchLabels using default ns label [Feature:NetworkPolicy]", func() {
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
nsX, nsY, nsZ, k8s := getK8sNamespaces(f)
allowedLabels := &metav1.LabelSelector{
MatchLabels: map[string]string{
v1.LabelMetadataName: nsY,
@ -1132,8 +1131,9 @@ var _ = common.SIGDescribe("Netpol", func() {
ValidateOrFail(k8s, model, &TestCase{ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
})
// Note that this default ns functionality is maintained by the APIMachinery group, but we test it here anyways because its an important feature.
ginkgo.It("should enforce policy based on NamespaceSelector with MatchExpressions using default ns label [Feature:NetworkPolicy]", func() {
nsX, nsY, _, model, k8s := getK8SModel(f)
nsX, nsY, _, k8s := getK8sNamespaces(f)
allowedNamespaces := &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{{
Key: v1.LabelMetadataName,
@ -1155,7 +1155,7 @@ var _ = common.SIGDescribe("Netpol", func() {
var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() {
f := framework.NewDefaultFramework("udp-network-policy")
var model *Model
ginkgo.BeforeEach(func() {
// Windows does not support UDP testing via agnhost.
e2eskipper.SkipIfNodeOSDistroIs("windows")
@ -1163,18 +1163,18 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() {
ginkgo.Context("NetworkPolicy between server and client using UDP", func() {
ginkgo.BeforeEach(func() {
initializeResourcesByFixedNS(f)
model = initializeResourcesByFixedNS(f)
})
ginkgo.AfterEach(func() {
if !useFixedNamespaces {
_, _, _, model, k8s := getK8SModel(f)
k8s := newKubeManager(f)
framework.ExpectNoError(k8s.deleteNamespaces(model.NamespaceNames), "unable to clean up UDP netpol namespaces")
}
})
ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func() {
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
policy := GenNetworkPolicyWithNameAndPodSelector("deny-all", metav1.LabelSelector{}, SetSpecIngressRules())
CreatePolicy(k8s, policy, nsX)
@ -1186,7 +1186,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() {
ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() {
ginkgo.By("Creating a network policy allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)")
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
nsX, nsY, nsZ, k8s := getK8sNamespaces(f)
allowedLabels := &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns": nsY,
@ -1207,7 +1207,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() {
})
ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() {
nsX, nsY, _, model, k8s := getK8SModel(f)
nsX, nsY, _, k8s := getK8sNamespaces(f)
allowedNamespaces := &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns": nsY,
@ -1234,7 +1234,7 @@ var _ = common.SIGDescribe("Netpol [LinuxOnly]", func() {
var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly][Disruptive]", func() {
f := framework.NewDefaultFramework("sctp-network-policy")
var model *Model
ginkgo.BeforeEach(func() {
// Windows does not support network policies.
e2eskipper.SkipIfNodeOSDistroIs("windows")
@ -1243,18 +1243,18 @@ var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly][Disrupt
ginkgo.Context("NetworkPolicy between server and client using SCTP", func() {
ginkgo.BeforeEach(func() {
addSCTPContainers = true
initializeResourcesByFixedNS(f)
model = initializeResourcesByFixedNS(f)
})
ginkgo.AfterEach(func() {
if !useFixedNamespaces {
_, _, _, model, k8s := getK8SModel(f)
k8s := newKubeManager(f)
framework.ExpectNoError(k8s.deleteNamespaces(model.NamespaceNames), "unable to clean up SCTP netpol namespaces")
}
})
ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func() {
nsX, _, _, model, k8s := getK8SModel(f)
nsX, _, _, k8s := getK8sNamespaces(f)
policy := GenNetworkPolicyWithNameAndPodSelector("deny-all", metav1.LabelSelector{}, SetSpecIngressRules())
CreatePolicy(k8s, policy, nsX)
@ -1266,7 +1266,7 @@ var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly][Disrupt
ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() {
ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)")
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
nsX, nsY, nsZ, k8s := getK8sNamespaces(f)
allowedLabels := &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns": nsY,
@ -1286,7 +1286,7 @@ var _ = common.SIGDescribe("Netpol [Feature:SCTPConnectivity][LinuxOnly][Disrupt
})
ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() {
nsX, nsY, _, model, k8s := getK8SModel(f)
nsX, nsY, _, k8s := getK8sNamespaces(f)
allowedNamespaces := &metav1.LabelSelector{
MatchLabels: map[string]string{
"ns": nsY,
@ -1338,24 +1338,18 @@ func defaultModel(namespaces []string, dnsDomain string) *Model {
return NewModel(namespaces, []string{"a", "b", "c"}, []int32{80, 81}, protocols, dnsDomain)
}
// getK8sModel generates a network policy model using the framework's root namespace and cluster DNS domain.
// This function is deterministic and has no side effects, so may be safely called multiple times.
func getK8SModel(f *framework.Framework) (string, string, string, *Model, *kubeManager) {
k8s := newKubeManager(f)
rootNs := f.Namespace.GetName()
nsX, nsY, nsZ, namespaces := getNamespaces(rootNs)
model := defaultModel(namespaces, framework.TestContext.ClusterDNSDomain)
return nsX, nsY, nsZ, model, k8s
// getK8sNamespaces returns the canonical set of namespaces using the framework's root namespace
func getK8sNamespaces(f *framework.Framework) (string, string, string, *kubeManager) {
nsX, nsY, nsZ, _ := getNamespaces(f.Namespace.GetName())
return nsX, nsY, nsZ, newKubeManager(f)
}
// initializeResourcesByFixedNS uses the e2e framework to create all necessary namespace resources, cleaning up
// network policies from the namespace if useFixedNamespace is set true, avoiding policies overlap of new tests.
func initializeResourcesByFixedNS(f *framework.Framework) {
func initializeResourcesByFixedNS(f *framework.Framework) *Model {
if useFixedNamespaces {
_ = initializeResources(f)
_, _, _, model, k8s := getK8SModel(f)
model, _ := initializeResources(f)
k8s := newKubeManager(f)
framework.ExpectNoError(k8s.cleanNetworkPolicies(model.NamespaceNames), "unable to clean network policies")
err := wait.Poll(waitInterval, waitTimeout, func() (done bool, err error) {
for _, ns := range model.NamespaceNames {
@ -1368,26 +1362,37 @@ func initializeResourcesByFixedNS(f *framework.Framework) {
return true, nil
})
framework.ExpectNoError(err, "unable to wait for network policy deletion")
return model
} else {
framework.Logf("Using %v as the default dns domain for this cluster... ", framework.TestContext.ClusterDNSDomain)
framework.ExpectNoError(initializeResources(f), "unable to initialize resources")
model, err := initializeResources(f)
framework.ExpectNoError(err, "unable to initialize resources")
return model
}
}
// initializeResources uses the e2e framework to create all necessary namespace resources, based on the network policy
// model derived from the framework. It then waits for the resources described by the model to be up and running
// (i.e. all pods are ready and running in their namespaces).
func initializeResources(f *framework.Framework) error {
_, _, _, model, k8s := getK8SModel(f)
func initializeResources(f *framework.Framework) (*Model, error) {
k8s := newKubeManager(f)
rootNs := f.Namespace.GetName()
_, _, _, namespaces := getNamespaces(rootNs)
model := defaultModel(namespaces, framework.TestContext.ClusterDNSDomain)
framework.Logf("initializing cluster: ensuring namespaces, deployments, and pods exist and are ready")
err := k8s.initializeCluster(model)
if err != nil {
return err
return nil, err
}
framework.Logf("finished initializing cluster state")
return k8s.waitForHTTPServers(model)
err = waitForHTTPServers(k8s, model)
if err != nil {
return nil, err
}
return model, nil
}

View File

@ -17,11 +17,19 @@ limitations under the License.
package netpol
import (
"fmt"
"net"
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
// decouple us from k8smanager.go
type Prober interface {
probeConnectivity(nsFrom string, podFrom string, containerFrom string, addrTo string, protocol v1.Protocol, toPort int, timeoutSeconds int) (bool, string, error)
}
// ProbeJob packages the data for the input of a pod->pod connectivity probe
type ProbeJob struct {
PodFrom *Pod
@ -40,13 +48,13 @@ type ProbeJobResults struct {
}
// ProbePodToPodConnectivity runs a series of probes in kube, and records the results in `testCase.Reachability`
func ProbePodToPodConnectivity(k8s *kubeManager, model *Model, testCase *TestCase) {
func ProbePodToPodConnectivity(prober Prober, model *Model, testCase *TestCase) {
allPods := model.AllPods()
size := len(allPods) * len(allPods)
jobs := make(chan *ProbeJob, size)
results := make(chan *ProbeJobResults, size)
for i := 0; i < model.GetWorkers(); i++ {
go probeWorker(k8s, jobs, results, model.GetProbeTimeoutSeconds())
go probeWorker(prober, jobs, results, model.GetProbeTimeoutSeconds())
}
for _, podFrom := range allPods {
for _, podTo := range allPods {
@ -83,11 +91,24 @@ func ProbePodToPodConnectivity(k8s *kubeManager, model *Model, testCase *TestCas
// probeWorker continues polling a pod connectivity status, until the incoming "jobs" channel is closed, and writes results back out to the "results" channel.
// it only writes pass/fail status to a channel and has no failure side effects, this is by design since we do not want to fail inside a goroutine.
func probeWorker(k8s *kubeManager, jobs <-chan *ProbeJob, results chan<- *ProbeJobResults, timeoutSeconds int) {
func probeWorker(prober Prober, jobs <-chan *ProbeJob, results chan<- *ProbeJobResults, timeoutSeconds int) {
defer ginkgo.GinkgoRecover()
for job := range jobs {
podFrom := job.PodFrom
connected, command, err := k8s.probeConnectivity(podFrom.Namespace, podFrom.Name, podFrom.Containers[0].Name(), job.PodTo.QualifiedServiceAddress(job.ToPodDNSDomain), job.Protocol, job.ToPort, timeoutSeconds)
if net.ParseIP(job.PodTo.ServiceIP) == nil {
results <- &ProbeJobResults{
Job: job,
IsConnected: false,
Err: fmt.Errorf("empty service ip"),
}
}
// note that we can probe a dnsName instead of ServiceIP by using dnsName like so:
// we stopped doing this because we wanted to support netpol testing in non dns enabled
// clusters, but might re-enable it later.
// dnsName := job.PodTo.QualifiedServiceAddress(job.ToPodDNSDomain)
// TODO make this work on dual-stack clusters...
connected, command, err := prober.probeConnectivity(podFrom.Namespace, podFrom.Name, podFrom.Containers[0].Name(), job.PodTo.ServiceIP, job.Protocol, job.ToPort, timeoutSeconds)
result := &ProbeJobResults{
Job: job,
IsConnected: connected,

View File

@ -18,14 +18,15 @@ package netpol
import (
"context"
"time"
"fmt"
"github.com/onsi/ginkgo"
"github.com/pkg/errors"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
"sigs.k8s.io/yaml"
"time"
)
const (
@ -64,6 +65,48 @@ func UpdatePolicy(k8s *kubeManager, policy *networkingv1.NetworkPolicy, namespac
framework.ExpectNoError(err, "Unable to update netpol %s/%s", namespace, policy.Name)
}
// waitForHTTPServers waits for all webservers to be up, on all protocols sent in the input, and then validates them using the same probe logic as the rest of the suite.
func waitForHTTPServers(k *kubeManager, model *Model) error {
const maxTries = 10
framework.Logf("waiting for HTTP servers (ports 80 and 81) to become ready")
testCases := map[string]*TestCase{}
for _, port := range model.Ports {
// Protocols is provided as input so that we can skip udp polling for windows
for _, protocol := range model.Protocols {
fromPort := 81
desc := fmt.Sprintf("%d->%d,%s", fromPort, port, protocol)
testCases[desc] = &TestCase{ToPort: int(port), Protocol: protocol}
}
}
notReady := map[string]bool{}
for caseName := range testCases {
notReady[caseName] = true
}
for i := 0; i < maxTries; i++ {
for caseName, testCase := range testCases {
if notReady[caseName] {
reachability := NewReachability(model.AllPods(), true)
testCase.Reachability = reachability
ProbePodToPodConnectivity(k, model, testCase)
_, wrong, _, _ := reachability.Summary(ignoreLoopback)
if wrong == 0 {
framework.Logf("server %s is ready", caseName)
delete(notReady, caseName)
} else {
framework.Logf("server %s is not ready", caseName)
}
}
}
if len(notReady) == 0 {
return nil
}
time.Sleep(waitInterval)
}
return errors.Errorf("after %d tries, %d HTTP servers are not ready", maxTries, len(notReady))
}
// ValidateOrFail validates connectivity
func ValidateOrFail(k8s *kubeManager, model *Model, testCase *TestCase) {
ginkgo.By("Validating reachability matrix...")