mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Merge pull request #49593 from gunjan5/np-e2e
Automatic merge from submit-queue (batch tested with PRs 53567, 53197, 52944, 49593). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Refactor network policy e2e tests, and add additional logging when tests fail **What this PR does / why we need it**: - Restructure tests, clean up duplicate code - Add additional logging, so it's easier to debug when the tests fail - Add retry on the client side pods because sometimes kubelet will start the pod when the network is not ready, resulting in flaky tests - Add readiness probe for the server pods to further reduce the flakiness. Current failure messages when a test fails: ``` checking client-can-connect could communicate with server. Expected error: <*errors.errorString | 0xc420868820>: { s: "pod \"client-can-connect\" failed with status: {Phase:Failed Conditions:[{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2017-07-25 15:54:48 -0700 PDT Reason: Message:} {Type:Ready Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2017-07-25 15:54:57 -0700 PDT Reason:ContainersNotReady Message:containers with unready status: [client-can-connect-container]} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2017-07-25 15:54:48 -0700 PDT Reason: Message:}] Message: Reason: HostIP:10.138.0.2 PodIP:10.60.1.180 StartTime:2017-07-25 15:54:48 -0700 PDT InitContainerStatuses:[] ContainerStatuses:[{Name:client-can-connect-container State:{Waiting:nil Running:nil Terminated:&ContainerStateTerminated{ExitCode:1,Signal:0,Reason:Error,Message:,StartedAt:2017-07-25 15:54:49 -0700 PDT,FinishedAt:2017-07-25 15:54:57 -0700 PDT,ContainerID:docker://c91c33139672c20a5231bcba9437d74e12d35a6d19275b41bb8e571148b4ab57,}} LastTerminationState:{Waiting:nil Running:nil Terminated:nil} Ready:false RestartCount:0 Image:gcr.io/google_containers/redis:e2e ImageID:docker://sha256:e5e67996c442f903cda78dd983ea6e94bb4e542950fd2eba666b44cbd303df42 ContainerID:docker://c91c33139672c20a5231bcba9437d74e12d35a6d19275b41bb8e571148b4ab57}] QOSClass:BestEffort}", } pod "client-can-connect" failed with status: {Phase:Failed Conditions:[{Type:Initialized Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2017-07-25 15:54:48 -0700 PDT Reason: Message:} {Type:Ready Status:False LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2017-07-25 15:54:57 -0700 PDT Reason:ContainersNotReady Message:containers with unready status: [client-can-connect-container]} {Type:PodScheduled Status:True LastProbeTime:0001-01-01 00:00:00 +0000 UTC LastTransitionTime:2017-07-25 15:54:48 -0700 PDT Reason: Message:}] Message: Reason: HostIP:10.138.0.2 PodIP:10.60.1.180 StartTime:2017-07-25 15:54:48 -0700 PDT InitContainerStatuses:[] ContainerStatuses:[{Name:client-can-connect-container State:{Waiting:nil Running:nil Terminated:&ContainerStateTerminated{ExitCode:1,Signal:0,Reason:Error,Message:,StartedAt:2017-07-25 15:54:49 -0700 PDT,FinishedAt:2017-07-25 15:54:57 -0700 PDT,ContainerID:docker://c91c33139672c20a5231bcba9437d74e12d35a6d19275b41bb8e571148b4ab57,}} LastTerminationState:{Waiting:nil Running:nil Terminated:nil} Ready:false RestartCount:0 Image:gcr.io/google_containers/redis:e2e ImageID:docker://sha256:e5e67996c442f903cda78dd983ea6e94bb4e542950fd2eba666b44cbd303df42 ContainerID:docker://c91c33139672c20a5231bcba9437d74e12d35a6d19275b41bb8e571148b4ab57}] QOSClass:BestEffort} not to have occurred ``` Failure message with this PR: ``` should support a 'default-deny' policy [Feature:NetworkPolicy] [It] /Users/gunjan/go/src/k8s.io/kubernetes/_output/local/go/src/k8s.io/kubernetes/test/e2e/network/network_policy.go:81 Jul 25 14:35:55.978: Pod client-cannot-connect should be able to connect to service svc-server, but was not able to connect. Pod logs: Connecting to svc-server.e2e-tests-network-policy-tbzm8:80 (10.63.254.124:80) wget: download timed out Connecting to svc-server.e2e-tests-network-policy-tbzm8:80 (10.63.254.124:80) wget: download timed out Connecting to svc-server.e2e-tests-network-policy-tbzm8:80 (10.63.254.124:80) wget: download timed out Connecting to svc-server.e2e-tests-network-policy-tbzm8:80 (10.63.254.124:80) wget: download timed out Connecting to svc-server.e2e-tests-network-policy-tbzm8:80 (10.63.254.124:80) wget: download timed out Current NetworkPolicies: [{{ } {deny-all e2e-tests-network-policy-tbzm8 /apis/networking.k8s.io/v1/namespaces/e2e-tests-network-policy-tbzm8/networkpolicies/deny-all 20fa92ab-7181-11e7-855b-42010a8a0003 87694 1 2017-07-25 14:35:07 -0700 PDT <nil> <nil> map[] map[] [] nil [] } {{map[] []} []}}] Pods: [Pod: server, Status: &PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2017-07-25 14:34:52 -0700 PDT } {Ready True 0001-01-01 00:00:00 +0000 UTC 2017-07-25 14:35:00 -0700 PDT } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2017-07-25 14:34:52 -0700 PDT }],Message:,Reason:,HostIP:10.138.0.2,PodIP:10.60.1.145,StartTime:2017-07-25 14:34:52 -0700 PDT,ContainerStatuses:[{server-container-80 {nil ContainerStateRunning{StartedAt:2017-07-25 14:34:53 -0700 PDT,} nil} {nil nil nil} true 0 gcr.io/google_containers/porter:4524579c0eb935c056c8e75563b4e1eda31587e0 docker://sha256:711ccbc1c74f3417f2a0f403c194017b3bde09eec3c3b42dc9c838db3894c223 docker://3fa73da57380ef28520f8bbf53b3540f523ab76d45a1b42cc9a1e4573eb45639}],QOSClass:BestEffort,InitContainerStatuses:[],} Pod: server, Status: &PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2017-07-25 14:34:52 -0700 PDT } {Ready True 0001-01-01 00:00:00 +0000 UTC 2017-07-25 14:35:00 -0700 PDT } {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2017-07-25 14:34:52 -0700 PDT }],Message:,Reason:,HostIP:10.138.0.2,PodIP:10.60.1.145,StartTime:2017-07-25 14:34:52 -0700 PDT,ContainerStatuses:[{server-container-80 {nil ContainerStateRunning{StartedAt:2017-07-25 14:34:53 -0700 PDT,} nil} {nil nil nil} true 0 gcr.io/google_containers/porter:4524579c0eb935c056c8e75563b4e1eda31587e0 docker://sha256:711ccbc1c74f3417f2a0f403c194017b3bde09eec3c3b42dc9c838db3894c223 docker://3fa73da57380ef28520f8bbf53b3540f523ab76d45a1b42cc9a1e4573eb45639}],QOSClass:BestEffort,InitContainerStatuses:[],} ] ``` **Release note**: ```release-note NONE ```
This commit is contained in:
commit
c7f970c560
@ -22,12 +22,12 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"fmt"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
/*
|
||||
@ -36,290 +36,247 @@ are correctly enforced by a networking plugin. It accomplishes this by launching
|
||||
a simple netcat server, and two clients with different
|
||||
attributes. Each test case creates a network policy which should only allow
|
||||
connections from one of the clients. The test then asserts that the clients
|
||||
failed or succesfully connected as expected.
|
||||
failed or successfully connected as expected.
|
||||
*/
|
||||
|
||||
var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
var service *v1.Service
|
||||
var podServer *v1.Pod
|
||||
f := framework.NewDefaultFramework("network-policy")
|
||||
|
||||
It("should support a 'default-deny' policy [Feature:NetworkPolicy]", func() {
|
||||
ns := f.Namespace
|
||||
Context("NetworkPolicy between server and client", func() {
|
||||
BeforeEach(func() {
|
||||
By("Creating a simple server that serves on port 80 and 81.")
|
||||
podServer, service = createServerPodAndService(f, f.Namespace, "server", []int{80, 81})
|
||||
|
||||
By("Create a simple server.")
|
||||
podServer, service := createServerPodAndService(f, ns, "server", []int{80})
|
||||
defer cleanupServerPodAndService(f, podServer, service)
|
||||
framework.Logf("Waiting for Server to come up.")
|
||||
err := framework.WaitForPodRunningInNamespace(f.ClientSet, podServer)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By("Waiting for pod ready", func() {
|
||||
err := f.WaitForPodReady(podServer.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// Create a pod with name 'client-can-connect', which should be able to communicate with server.
|
||||
By("Creating client which will be able to contact the server since no policies are present.")
|
||||
testCanConnect(f, ns, "client-can-connect", service, 80)
|
||||
|
||||
By("Creating a network policy denying all traffic.")
|
||||
policy := &networking.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "deny-all",
|
||||
},
|
||||
Spec: networking.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{},
|
||||
Ingress: []networking.NetworkPolicyIngressRule{},
|
||||
},
|
||||
}
|
||||
|
||||
policy, err = f.InternalClientset.Networking().NetworkPolicies(ns.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
// Create a pod with name 'client-cannot-connect', which will attempt to comunicate with the server,
|
||||
// but should not be able to now that isolation is on.
|
||||
testCannotConnect(f, ns, "client-cannot-connect", service, 80)
|
||||
})
|
||||
|
||||
It("should enforce policy based on PodSelector [Feature:NetworkPolicy]", func() {
|
||||
ns := f.Namespace
|
||||
|
||||
By("Creating a simple server.")
|
||||
serverPod, service := createServerPodAndService(f, ns, "server", []int{80})
|
||||
defer cleanupServerPodAndService(f, serverPod, service)
|
||||
framework.Logf("Waiting for Server to come up.")
|
||||
err := framework.WaitForPodRunningInNamespace(f.ClientSet, serverPod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating a network policy for the server which allows traffic from the pod 'client-a'.")
|
||||
|
||||
policy := &networking.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-client-a-via-pod-selector",
|
||||
},
|
||||
Spec: networking.NetworkPolicySpec{
|
||||
// Apply this policy to the Server
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod-name": serverPod.Name,
|
||||
},
|
||||
},
|
||||
// Allow traffic only from client-a
|
||||
Ingress: []networking.NetworkPolicyIngressRule{{
|
||||
From: []networking.NetworkPolicyPeer{{
|
||||
PodSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod-name": "client-a",
|
||||
},
|
||||
},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
policy, err = f.InternalClientset.Networking().NetworkPolicies(ns.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
By("Creating client-a which should be able to contact the server.")
|
||||
testCanConnect(f, ns, "client-a", service, 80)
|
||||
testCannotConnect(f, ns, "client-b", service, 80)
|
||||
})
|
||||
|
||||
It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() {
|
||||
ns := f.Namespace
|
||||
|
||||
// Create Server with Service
|
||||
By("Creating a simple server.")
|
||||
serverPod, service := createServerPodAndService(f, ns, "server", []int{80, 81})
|
||||
defer cleanupServerPodAndService(f, serverPod, service)
|
||||
framework.Logf("Waiting for Server to come up.")
|
||||
err := framework.WaitForPodRunningInNamespace(f.ClientSet, serverPod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Testing pods can connect to both ports when no policy is present.")
|
||||
testCanConnect(f, ns, "basecase-reachable-80", service, 80)
|
||||
testCanConnect(f, ns, "basecase-reachable-81", service, 81)
|
||||
|
||||
By("Creating a network policy for the Service which allows traffic only to one port.")
|
||||
policy := &networking.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-ingress-on-port-81",
|
||||
},
|
||||
Spec: networking.NetworkPolicySpec{
|
||||
// Apply to server
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod-name": serverPod.Name,
|
||||
},
|
||||
},
|
||||
// Allow traffic only to one port.
|
||||
Ingress: []networking.NetworkPolicyIngressRule{{
|
||||
Ports: []networking.NetworkPolicyPort{{
|
||||
Port: &intstr.IntOrString{IntVal: 81},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
policy, err = f.InternalClientset.Networking().NetworkPolicies(ns.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
By("Testing pods can connect only to the port allowed by the policy.")
|
||||
testCannotConnect(f, ns, "client-a", service, 80)
|
||||
testCanConnect(f, ns, "client-b", service, 81)
|
||||
})
|
||||
|
||||
It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() {
|
||||
ns := f.Namespace
|
||||
|
||||
// Create Server with Service
|
||||
By("Creating a simple server.")
|
||||
serverPod, service := createServerPodAndService(f, ns, "server", []int{80, 81})
|
||||
defer cleanupServerPodAndService(f, serverPod, service)
|
||||
framework.Logf("Waiting for Server to come up.")
|
||||
err := framework.WaitForPodRunningInNamespace(f.ClientSet, serverPod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Testing pods can connect to both ports when no policy is present.")
|
||||
testCanConnect(f, ns, "test-a", service, 80)
|
||||
testCanConnect(f, ns, "test-b", service, 81)
|
||||
|
||||
By("Creating a network policy for the Service which allows traffic only to one port.")
|
||||
policy := &networking.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-ingress-on-port-80",
|
||||
},
|
||||
Spec: networking.NetworkPolicySpec{
|
||||
// Apply to server
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod-name": serverPod.Name,
|
||||
},
|
||||
},
|
||||
// Allow traffic only to one port.
|
||||
Ingress: []networking.NetworkPolicyIngressRule{{
|
||||
Ports: []networking.NetworkPolicyPort{{
|
||||
Port: &intstr.IntOrString{IntVal: 80},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
policy, err = f.InternalClientset.Networking().NetworkPolicies(ns.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
By("Creating a network policy for the Service which allows traffic only to another port.")
|
||||
policy2 := &networking.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-ingress-on-port-81",
|
||||
},
|
||||
Spec: networking.NetworkPolicySpec{
|
||||
// Apply to server
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod-name": serverPod.Name,
|
||||
},
|
||||
},
|
||||
// Allow traffic only to one port.
|
||||
Ingress: []networking.NetworkPolicyIngressRule{{
|
||||
Ports: []networking.NetworkPolicyPort{{
|
||||
Port: &intstr.IntOrString{IntVal: 81},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
policy2, err = f.InternalClientset.Networking().NetworkPolicies(ns.Name).Create(policy2)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy2)
|
||||
|
||||
By("Testing pods can connect to both ports when both policies are present.")
|
||||
testCanConnect(f, ns, "client-a", service, 80)
|
||||
testCanConnect(f, ns, "client-b", service, 81)
|
||||
})
|
||||
|
||||
It("should support allow-all policy [Feature:NetworkPolicy]", func() {
|
||||
ns := f.Namespace
|
||||
|
||||
// Create Server with Service
|
||||
By("Creating a simple server.")
|
||||
serverPod, service := createServerPodAndService(f, ns, "server", []int{80, 81})
|
||||
defer cleanupServerPodAndService(f, serverPod, service)
|
||||
framework.Logf("Waiting for Server to come up.")
|
||||
err := framework.WaitForPodRunningInNamespace(f.ClientSet, serverPod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Testing pods can connect to both ports when no policy is present.")
|
||||
testCanConnect(f, ns, "test-a", service, 80)
|
||||
testCanConnect(f, ns, "test-b", service, 81)
|
||||
|
||||
By("Creating a network policy which allows all traffic.")
|
||||
policy := &networking.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-all",
|
||||
},
|
||||
Spec: networking.NetworkPolicySpec{
|
||||
// Allow all traffic
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{},
|
||||
},
|
||||
Ingress: []networking.NetworkPolicyIngressRule{{}},
|
||||
},
|
||||
}
|
||||
policy, err = f.InternalClientset.Networking().NetworkPolicies(ns.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
By("Testing pods can connect to both ports when an 'allow-all' policy is present.")
|
||||
testCanConnect(f, ns, "client-a", service, 80)
|
||||
testCanConnect(f, ns, "client-b", service, 81)
|
||||
})
|
||||
|
||||
It("should enforce policy based on NamespaceSelector [Feature:NetworkPolicy]", func() {
|
||||
nsA := f.Namespace
|
||||
nsBName := f.BaseName + "-b"
|
||||
// The CreateNamespace helper uses the input name as a Name Generator, so the namespace itself
|
||||
// will have a different name than what we are setting as the value of ns-name.
|
||||
// This is fine as long as we don't try to match the label as nsB.Name in our policy.
|
||||
nsB, err := f.CreateNamespace(nsBName, map[string]string{
|
||||
"ns-name": nsBName,
|
||||
// Create pods, which should be able to communicate with the server on port 80 and 81.
|
||||
By("Testing pods can connect to both ports when no policy is present.")
|
||||
testCanConnect(f, f.Namespace, "client-can-connect-80", service, 80)
|
||||
testCanConnect(f, f.Namespace, "client-can-connect-81", service, 81)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create Server with Service in NS-B
|
||||
By("Creating a webserver tied to a service.")
|
||||
serverPod, service := createServerPodAndService(f, nsA, "server", []int{80})
|
||||
defer cleanupServerPodAndService(f, serverPod, service)
|
||||
framework.Logf("Waiting for server to come up.")
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, serverPod)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
AfterEach(func() {
|
||||
cleanupServerPodAndService(f, podServer, service)
|
||||
})
|
||||
|
||||
// Create Policy for that service that allows traffic only via namespace B
|
||||
By("Creating a network policy for the server which allows traffic from namespace-b.")
|
||||
policy := &networking.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-ns-b-via-namespace-selector",
|
||||
},
|
||||
Spec: networking.NetworkPolicySpec{
|
||||
// Apply to server
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod-name": serverPod.Name,
|
||||
},
|
||||
It("should support a 'default-deny' policy [Feature:NetworkPolicy]", func() {
|
||||
policy := &networking.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "deny-all",
|
||||
},
|
||||
// Allow traffic only from NS-B
|
||||
Ingress: []networking.NetworkPolicyIngressRule{{
|
||||
From: []networking.NetworkPolicyPeer{{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"ns-name": nsBName,
|
||||
},
|
||||
},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
policy, err = f.InternalClientset.Networking().NetworkPolicies(nsA.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
Spec: networking.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{},
|
||||
Ingress: []networking.NetworkPolicyIngressRule{},
|
||||
},
|
||||
}
|
||||
|
||||
testCannotConnect(f, nsA, "client-a", service, 80)
|
||||
testCanConnect(f, nsB, "client-b", service, 80)
|
||||
policy, err := f.InternalClientset.Networking().NetworkPolicies(f.Namespace.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
// Create a pod with name 'client-cannot-connect', which will attempt to communicate with the server,
|
||||
// but should not be able to now that isolation is on.
|
||||
testCannotConnect(f, f.Namespace, "client-cannot-connect", service, 80)
|
||||
})
|
||||
|
||||
It("should enforce policy based on PodSelector [Feature:NetworkPolicy]", func() {
|
||||
By("Creating a network policy for the server which allows traffic from the pod 'client-a'.")
|
||||
policy := &networking.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-client-a-via-pod-selector",
|
||||
},
|
||||
Spec: networking.NetworkPolicySpec{
|
||||
// Apply this policy to the Server
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod-name": podServer.Name,
|
||||
},
|
||||
},
|
||||
// Allow traffic only from client-a
|
||||
Ingress: []networking.NetworkPolicyIngressRule{{
|
||||
From: []networking.NetworkPolicyPeer{{
|
||||
PodSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod-name": "client-a",
|
||||
},
|
||||
},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
policy, err := f.InternalClientset.Networking().NetworkPolicies(f.Namespace.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
By("Creating client-a which should be able to contact the server.", func() {
|
||||
testCanConnect(f, f.Namespace, "client-a", service, 80)
|
||||
})
|
||||
By("Creating client-b which should not be able to contact the server.", func() {
|
||||
testCannotConnect(f, f.Namespace, "client-b", service, 80)
|
||||
})
|
||||
})
|
||||
|
||||
It("should enforce policy based on NamespaceSelector [Feature:NetworkPolicy]", func() {
|
||||
nsA := f.Namespace
|
||||
nsBName := f.BaseName + "-b"
|
||||
// The CreateNamespace helper uses the input name as a Name Generator, so the namespace itself
|
||||
// will have a different name than what we are setting as the value of ns-name.
|
||||
// This is fine as long as we don't try to match the label as nsB.Name in our policy.
|
||||
nsB, err := f.CreateNamespace(nsBName, map[string]string{
|
||||
"ns-name": nsBName,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create Server with Service in NS-B
|
||||
framework.Logf("Waiting for server to come up.")
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, podServer)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create Policy for that service that allows traffic only via namespace B
|
||||
By("Creating a network policy for the server which allows traffic from namespace-b.")
|
||||
policy := &networking.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-ns-b-via-namespace-selector",
|
||||
},
|
||||
Spec: networking.NetworkPolicySpec{
|
||||
// Apply to server
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod-name": podServer.Name,
|
||||
},
|
||||
},
|
||||
// Allow traffic only from NS-B
|
||||
Ingress: []networking.NetworkPolicyIngressRule{{
|
||||
From: []networking.NetworkPolicyPeer{{
|
||||
NamespaceSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"ns-name": nsBName,
|
||||
},
|
||||
},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
policy, err = f.InternalClientset.Networking().NetworkPolicies(nsA.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
testCannotConnect(f, nsA, "client-a", service, 80)
|
||||
testCanConnect(f, nsB, "client-b", service, 80)
|
||||
})
|
||||
|
||||
It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() {
|
||||
By("Creating a network policy for the Service which allows traffic only to one port.")
|
||||
policy := &networking.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-ingress-on-port-81",
|
||||
},
|
||||
Spec: networking.NetworkPolicySpec{
|
||||
// Apply to server
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod-name": podServer.Name,
|
||||
},
|
||||
},
|
||||
// Allow traffic only to one port.
|
||||
Ingress: []networking.NetworkPolicyIngressRule{{
|
||||
Ports: []networking.NetworkPolicyPort{{
|
||||
Port: &intstr.IntOrString{IntVal: 81},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
policy, err := f.InternalClientset.Networking().NetworkPolicies(f.Namespace.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
By("Testing pods can connect only to the port allowed by the policy.")
|
||||
testCannotConnect(f, f.Namespace, "client-a", service, 80)
|
||||
testCanConnect(f, f.Namespace, "client-b", service, 81)
|
||||
})
|
||||
|
||||
It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() {
|
||||
By("Creating a network policy for the Service which allows traffic only to one port.")
|
||||
policy := &networking.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-ingress-on-port-80",
|
||||
},
|
||||
Spec: networking.NetworkPolicySpec{
|
||||
// Apply to server
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod-name": podServer.Name,
|
||||
},
|
||||
},
|
||||
// Allow traffic only to one port.
|
||||
Ingress: []networking.NetworkPolicyIngressRule{{
|
||||
Ports: []networking.NetworkPolicyPort{{
|
||||
Port: &intstr.IntOrString{IntVal: 80},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
policy, err := f.InternalClientset.Networking().NetworkPolicies(f.Namespace.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
By("Creating a network policy for the Service which allows traffic only to another port.")
|
||||
policy2 := &networking.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-ingress-on-port-81",
|
||||
},
|
||||
Spec: networking.NetworkPolicySpec{
|
||||
// Apply to server
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod-name": podServer.Name,
|
||||
},
|
||||
},
|
||||
// Allow traffic only to one port.
|
||||
Ingress: []networking.NetworkPolicyIngressRule{{
|
||||
Ports: []networking.NetworkPolicyPort{{
|
||||
Port: &intstr.IntOrString{IntVal: 81},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
policy2, err = f.InternalClientset.Networking().NetworkPolicies(f.Namespace.Name).Create(policy2)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy2)
|
||||
|
||||
By("Testing pods can connect to both ports when both policies are present.")
|
||||
testCanConnect(f, f.Namespace, "client-a", service, 80)
|
||||
testCanConnect(f, f.Namespace, "client-b", service, 81)
|
||||
})
|
||||
|
||||
It("should support allow-all policy [Feature:NetworkPolicy]", func() {
|
||||
By("Creating a network policy which allows all traffic.")
|
||||
policy := &networking.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-all",
|
||||
},
|
||||
Spec: networking.NetworkPolicySpec{
|
||||
// Allow all traffic
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{},
|
||||
},
|
||||
Ingress: []networking.NetworkPolicyIngressRule{{}},
|
||||
},
|
||||
}
|
||||
policy, err := f.InternalClientset.Networking().NetworkPolicies(f.Namespace.Name).Create(policy)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer cleanupNetworkPolicy(f, policy)
|
||||
|
||||
By("Testing pods can connect to both ports when an 'allow-all' policy is present.")
|
||||
testCanConnect(f, f.Namespace, "client-a", service, 80)
|
||||
testCanConnect(f, f.Namespace, "client-b", service, 81)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@ -339,7 +296,35 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se
|
||||
|
||||
framework.Logf("Waiting for %s to complete.", podClient.Name)
|
||||
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("checking %s could communicate with server.", podClient.Name))
|
||||
if err != nil {
|
||||
// Collect pod logs when we see a failure.
|
||||
logs, logErr := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName))
|
||||
if logErr != nil {
|
||||
framework.Failf("Error getting container logs: %s", logErr)
|
||||
}
|
||||
|
||||
// Collect current NetworkPolicies applied in the test namespace.
|
||||
policies, err := f.InternalClientset.Networking().NetworkPolicies(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
|
||||
}
|
||||
|
||||
// Collect the list of pods running in the test namespace.
|
||||
podsInNS, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
|
||||
if err != nil {
|
||||
framework.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
|
||||
}
|
||||
|
||||
pods := []string{}
|
||||
for _, p := range podsInNS {
|
||||
pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String()))
|
||||
}
|
||||
|
||||
framework.Failf("Pod %s should be able to connect to service %s, but was not able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podName, service.Name, logs, policies.Items, pods)
|
||||
|
||||
// Dump debug information for the test namespace.
|
||||
framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, service *v1.Service, targetPort int) {
|
||||
@ -354,7 +339,38 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string,
|
||||
|
||||
framework.Logf("Waiting for %s to complete.", podClient.Name)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
|
||||
Expect(err).To(HaveOccurred(), fmt.Sprintf("checking %s could not communicate with server.", podName))
|
||||
|
||||
// We expect an error here since it's a cannot connect test.
|
||||
// Dump debug information if the error was nil.
|
||||
if err == nil {
|
||||
// Collect pod logs when we see a failure.
|
||||
logs, logErr := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName))
|
||||
if logErr != nil {
|
||||
framework.Failf("Error getting container logs: %s", logErr)
|
||||
}
|
||||
|
||||
// Collect current NetworkPolicies applied in the test namespace.
|
||||
policies, err := f.InternalClientset.Networking().NetworkPolicies(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
|
||||
}
|
||||
|
||||
// Collect the list of pods running in the test namespace.
|
||||
podsInNS, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
|
||||
if err != nil {
|
||||
framework.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
|
||||
}
|
||||
|
||||
pods := []string{}
|
||||
for _, p := range podsInNS {
|
||||
pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String()))
|
||||
}
|
||||
|
||||
framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t %v\n\n", podName, service.Name, logs, policies.Items, pods)
|
||||
|
||||
// Dump debug information for the test namespace.
|
||||
framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Create a server pod with a listening container for each port in ports[].
|
||||
@ -369,13 +385,25 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
|
||||
// Build the containers for the server pod.
|
||||
containers = append(containers, v1.Container{
|
||||
Name: fmt.Sprintf("%s-container-%d", podName, port),
|
||||
Image: imageutils.GetE2EImage(imageutils.Redis),
|
||||
Args: []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
fmt.Sprintf("/bin/nc -kl %d", port),
|
||||
Image: imageutils.GetE2EImage(imageutils.Porter),
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: fmt.Sprintf("SERVE_PORT_%d", port),
|
||||
Value: "foo",
|
||||
},
|
||||
},
|
||||
Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
|
||||
ReadinessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/",
|
||||
Port: intstr.IntOrString{
|
||||
IntVal: int32(port),
|
||||
},
|
||||
Scheme: v1.URISchemeHTTP,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// Build the Service Ports for the service.
|
||||
@ -433,7 +461,7 @@ func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1
|
||||
}
|
||||
|
||||
// Create a client pod which will attempt a netcat to the provided service, on the specified port.
|
||||
// This client will attempt a oneshot connection, then die, without restarting the pod.
|
||||
// This client will attempt a one-shot connection, then die, without restarting the pod.
|
||||
// Test can then be asserted based on whether the pod quit with an error or not.
|
||||
func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, podName string, targetService *v1.Service, targetPort int) *v1.Pod {
|
||||
pod, err := f.ClientSet.Core().Pods(namespace.Name).Create(&v1.Pod{
|
||||
@ -448,11 +476,12 @@ func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, pod
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: fmt.Sprintf("%s-container", podName),
|
||||
Image: imageutils.GetE2EImage(imageutils.Redis),
|
||||
Image: imageutils.GetBusyBoxImage(),
|
||||
Args: []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
fmt.Sprintf("/usr/bin/printf dummy-data | /bin/nc -w 8 %s.%s %d", targetService.Name, targetService.Namespace, targetPort),
|
||||
fmt.Sprintf("for i in $(seq 1 5); do wget -T 8 %s.%s:%d -O - && exit 0 || sleep 1; done; exit 1",
|
||||
targetService.Name, targetService.Namespace, targetPort),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -460,6 +489,7 @@ func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, pod
|
||||
})
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user