mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 18:02:01 +00:00
Merge pull request #91592 from jayunit100/netpol-impl2
new NetworkPolicy Validation suite
This commit is contained in:
commit
fec1a366c3
@ -86,6 +86,7 @@ go_library(
|
||||
"//test/e2e/framework/service:go_default_library",
|
||||
"//test/e2e/framework/skipper:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/network/netpol:go_default_library",
|
||||
"//test/e2e/network/scale:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
@ -110,6 +111,7 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e/network/netpol:all-srcs",
|
||||
"//test/e2e/network/scale:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
|
46
test/e2e/network/netpol/BUILD
Normal file
46
test/e2e/network/netpol/BUILD
Normal file
@ -0,0 +1,46 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"k8s_util.go",
|
||||
"model.go",
|
||||
"network_policy.go",
|
||||
"policies.go",
|
||||
"probe.go",
|
||||
"reachability.go",
|
||||
"test_helper.go",
|
||||
"truthtable.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/network/netpol",
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/networking/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
"//vendor/k8s.io/utils/net:go_default_library",
|
||||
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
351
test/e2e/network/netpol/k8s_util.go
Normal file
351
test/e2e/network/netpol/k8s_util.go
Normal file
@ -0,0 +1,351 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// Scenario provides a convenience interface to kube functionality that we leverage for polling NetworkPolicy connections.
|
||||
type Scenario struct {
|
||||
mutex sync.Mutex
|
||||
podCache map[string][]v1.Pod
|
||||
framework *framework.Framework
|
||||
ClientSet clientset.Interface
|
||||
}
|
||||
|
||||
// NewScenario is a utility function that wraps creation of the stuff we're using for creating a cluster that expresses the global policy scenario we're testing.
|
||||
func NewScenario(framework *framework.Framework) *Scenario {
|
||||
return &Scenario{
|
||||
podCache: map[string][]v1.Pod{},
|
||||
framework: framework,
|
||||
ClientSet: framework.ClientSet,
|
||||
}
|
||||
}
|
||||
|
||||
// InitializeCluster checks the state of the cluster, creating or updating namespaces and deployments as needed
|
||||
func (k *Scenario) InitializeCluster(model *Model) error {
|
||||
var createdPods []*v1.Pod
|
||||
for _, ns := range model.Namespaces {
|
||||
_, err := k.CreateNamespace(ns.Spec())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pod := range ns.Pods {
|
||||
framework.Logf("creating/updating pod %s/%s", ns.Name, pod.Name)
|
||||
|
||||
kubePod, err := k.CreatePod(pod.KubePod())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
createdPods = append(createdPods, kubePod)
|
||||
|
||||
_, err = k.CreateService(pod.Service())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, podString := range model.AllPodStrings() {
|
||||
k8sPod, err := k.GetPodFromCache(podString.Namespace(), podString.PodName())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if k8sPod == nil {
|
||||
return errors.Errorf("unable to find pod in ns %s with key/val pod=%s", podString.Namespace(), podString.PodName())
|
||||
}
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(k.ClientSet, k8sPod.Name, k8sPod.Namespace)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to wait for pod %s/%s", podString.Namespace(), podString.PodName())
|
||||
}
|
||||
}
|
||||
|
||||
for _, createdPod := range createdPods {
|
||||
err := e2epod.WaitForPodRunningInNamespace(k.ClientSet, createdPod)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to wait for pod %s/%s", createdPod.Namespace, createdPod.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPodFromCache returns a pod with the matching namespace and name
|
||||
func (k *Scenario) GetPodFromCache(ns string, name string) (*v1.Pod, error) {
|
||||
pods, err := k.getPodsUncached(ns, "pod", name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return &pods[0], nil
|
||||
}
|
||||
|
||||
func (k *Scenario) getPodsUncached(ns string, key string, val string) ([]v1.Pod, error) {
|
||||
v1PodList, err := k.ClientSet.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%v=%v", key, val),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to list Pods in ns %s with key/val %s=%s", ns, key, val)
|
||||
}
|
||||
return v1PodList.Items, nil
|
||||
}
|
||||
|
||||
// GetPodsFromCacheByKeyVal returns an array of all Pods in the given namespace having a k/v label pair.
|
||||
func (k *Scenario) GetPodsFromCacheByKeyVal(ns string, key string, val string) ([]v1.Pod, error) {
|
||||
k.mutex.Lock()
|
||||
p, ok := k.podCache[fmt.Sprintf("%v_%v_%v", ns, key, val)]
|
||||
k.mutex.Unlock()
|
||||
if ok {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
v1PodList, err := k.getPodsUncached(ns, key, val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
k.mutex.Lock()
|
||||
k.podCache[fmt.Sprintf("%v_%v_%v", ns, key, val)] = v1PodList
|
||||
k.mutex.Unlock()
|
||||
|
||||
return v1PodList, nil
|
||||
}
|
||||
|
||||
// GetPod gets a pod by namespace and name
|
||||
func (k *Scenario) GetPod(ns string, name string) (*v1.Pod, error) {
|
||||
kubePod, err := k.ClientSet.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to get pod %s/%s", ns, name)
|
||||
}
|
||||
return kubePod, nil
|
||||
}
|
||||
|
||||
// Probe execs into a pod and checks its connectivity to another pod.
|
||||
func (k *Scenario) Probe(nsFrom string, podFrom string, containerFrom string, addrTo string, protocol v1.Protocol, toPort int) (bool, string, error) {
|
||||
fromPods, err := k.GetPodsFromCacheByKeyVal(nsFrom, "pod", podFrom)
|
||||
if err != nil {
|
||||
return false, "", err
|
||||
}
|
||||
if len(fromPods) == 0 {
|
||||
return false, "", errors.Errorf("pod %s/%s not found", nsFrom, podFrom)
|
||||
}
|
||||
fromPod := fromPods[0]
|
||||
|
||||
var cmd []string
|
||||
switch protocol {
|
||||
case v1.ProtocolSCTP:
|
||||
cmd = []string{"/agnhost", "connect", fmt.Sprintf("%s:%d", addrTo, toPort), "--timeout=1s", "--protocol=sctp"}
|
||||
case v1.ProtocolTCP:
|
||||
cmd = []string{"/agnhost", "connect", fmt.Sprintf("%s:%d", addrTo, toPort), "--timeout=1s", "--protocol=tcp"}
|
||||
case v1.ProtocolUDP:
|
||||
cmd = []string{"nc", "-v", "-z", "-w", "1", "-u", addrTo, fmt.Sprintf("%d", toPort)}
|
||||
default:
|
||||
framework.Failf("protocol %s not supported", protocol)
|
||||
}
|
||||
|
||||
commandDebugString := fmt.Sprintf("kubectl exec %s -c %s -n %s -- %s", fromPod.Name, containerFrom, fromPod.Namespace, strings.Join(cmd, " "))
|
||||
stdout, stderr, err := k.ExecuteRemoteCommand(fromPod, containerFrom, cmd)
|
||||
if err != nil {
|
||||
framework.Logf("%s/%s -> %s: error when running command: err - %v /// stdout - %s /// stderr - %s", nsFrom, podFrom, addrTo, err, stdout, stderr)
|
||||
return false, commandDebugString, nil
|
||||
}
|
||||
return true, commandDebugString, nil
|
||||
}
|
||||
|
||||
// ExecuteRemoteCommand executes a remote shell command on the given pod
|
||||
func (k *Scenario) ExecuteRemoteCommand(pod v1.Pod, containerName string, command []string) (string, string, error) {
|
||||
return k.framework.ExecWithOptions(framework.ExecOptions{
|
||||
Command: command,
|
||||
Namespace: pod.Namespace,
|
||||
PodName: pod.Name,
|
||||
ContainerName: containerName,
|
||||
Stdin: nil,
|
||||
CaptureStdout: true,
|
||||
CaptureStderr: true,
|
||||
PreserveWhitespace: false,
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// CreateNamespace is a convenience function for namespace setup
|
||||
func (k *Scenario) CreateNamespace(ns *v1.Namespace) (*v1.Namespace, error) {
|
||||
createdNamespace, err := k.ClientSet.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to update namespace %s", ns.Name)
|
||||
}
|
||||
return createdNamespace, nil
|
||||
}
|
||||
|
||||
// CreateService is a convenience function for service setup
|
||||
func (k *Scenario) CreateService(service *v1.Service) (*v1.Service, error) {
|
||||
ns := service.Namespace
|
||||
name := service.Name
|
||||
|
||||
createdService, err := k.ClientSet.CoreV1().Services(ns).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to create service %s/%s", ns, name)
|
||||
}
|
||||
return createdService, nil
|
||||
}
|
||||
|
||||
// CreatePod is a convenience function for pod setup
|
||||
func (k *Scenario) CreatePod(pod *v1.Pod) (*v1.Pod, error) {
|
||||
ns := pod.Namespace
|
||||
framework.Logf("creating pod %s/%s", ns, pod.Name)
|
||||
|
||||
createdPod, err := k.ClientSet.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to update pod %s/%s", ns, pod.Name)
|
||||
}
|
||||
return createdPod, nil
|
||||
}
|
||||
|
||||
// CleanNetworkPolicies is a convenience function for deleting network policies before startup of any new test.
|
||||
func (k *Scenario) CleanNetworkPolicies(namespaces []string) error {
|
||||
for _, ns := range namespaces {
|
||||
framework.Logf("deleting policies in %s ..........", ns)
|
||||
l, err := k.ClientSet.NetworkingV1().NetworkPolicies(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to list network policies in ns %s", ns)
|
||||
}
|
||||
for _, np := range l.Items {
|
||||
framework.Logf("deleting network policy %s/%s", ns, np.Name)
|
||||
err = k.ClientSet.NetworkingV1().NetworkPolicies(ns).Delete(context.TODO(), np.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to delete network policy %s/%s", ns, np.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ClearCache clears the kube pod cache
|
||||
func (k *Scenario) ClearCache() {
|
||||
framework.Logf("Clearing pod cache")
|
||||
k.mutex.Lock()
|
||||
k.podCache = map[string][]v1.Pod{}
|
||||
k.mutex.Unlock()
|
||||
framework.Logf("Pod cache successfully cleared")
|
||||
}
|
||||
|
||||
// CreateNetworkPolicy is a convenience function for creating netpols
|
||||
func (k *Scenario) CreateNetworkPolicy(ns string, netpol *networkingv1.NetworkPolicy) (*networkingv1.NetworkPolicy, error) {
|
||||
framework.Logf("creating network policy %s/%s", ns, netpol.Name)
|
||||
netpol.ObjectMeta.Namespace = ns
|
||||
np, err := k.ClientSet.NetworkingV1().NetworkPolicies(ns).Create(context.TODO(), netpol, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to create network policy %s/%s", ns, netpol.Name)
|
||||
}
|
||||
return np, nil
|
||||
}
|
||||
|
||||
// UpdateNetworkPolicy is a convenience function for updating netpols
|
||||
func (k *Scenario) UpdateNetworkPolicy(ns string, netpol *networkingv1.NetworkPolicy) (*networkingv1.NetworkPolicy, error) {
|
||||
framework.Logf("updating network policy %s/%s", ns, netpol.Name)
|
||||
netpol.ObjectMeta.Namespace = ns
|
||||
np, err := k.ClientSet.NetworkingV1().NetworkPolicies(ns).Update(context.TODO(), netpol, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
return np, errors.Wrapf(err, "unable to update network policy %s/%s", ns, netpol.Name)
|
||||
}
|
||||
return np, nil
|
||||
}
|
||||
|
||||
func (k *Scenario) getNamespace(ns string) (*v1.Namespace, error) {
|
||||
selectedNameSpace, err := k.ClientSet.CoreV1().Namespaces().Get(context.TODO(), ns, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to get namespace %s", ns)
|
||||
}
|
||||
return selectedNameSpace, nil
|
||||
}
|
||||
|
||||
func (k *Scenario) setNamespaceLabels(ns string, labels map[string]string) error {
|
||||
selectedNameSpace, err := k.getNamespace(ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
selectedNameSpace.ObjectMeta.Labels = labels
|
||||
_, err = k.ClientSet.CoreV1().Namespaces().Update(context.TODO(), selectedNameSpace, metav1.UpdateOptions{})
|
||||
return errors.Wrapf(err, "unable to update namespace %s", ns)
|
||||
}
|
||||
|
||||
func (k *Scenario) deleteNamespaces(namespaces []string) error {
|
||||
for _, ns := range namespaces {
|
||||
err := k.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), ns, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to delete namespace %s", ns)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForHTTPServers waits for all webservers to be up, on all protocols, and then validates them using the same probe logic as the rest of the suite.
|
||||
func (k *Scenario) waitForHTTPServers(model *Model) error {
|
||||
const maxTries = 10
|
||||
framework.Logf("waiting for HTTP servers (ports 80 and 81) to become ready")
|
||||
|
||||
testCases := map[string]*TestCase{}
|
||||
for _, port := range model.Ports {
|
||||
for _, protocol := range model.Protocols {
|
||||
fromPort := 81
|
||||
desc := fmt.Sprintf("%d->%d,%s", fromPort, port, protocol)
|
||||
testCases[desc] = &TestCase{FromPort: fromPort, ToPort: int(port), Protocol: protocol}
|
||||
}
|
||||
}
|
||||
notReady := map[string]bool{}
|
||||
for caseName := range testCases {
|
||||
notReady[caseName] = true
|
||||
}
|
||||
|
||||
for i := 0; i < maxTries; i++ {
|
||||
for caseName, testCase := range testCases {
|
||||
if notReady[caseName] {
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
testCase.Reachability = reachability
|
||||
ProbePodToPodConnectivity(k, model, testCase)
|
||||
_, wrong, _, _ := reachability.Summary(ignoreLoopback)
|
||||
if wrong == 0 {
|
||||
framework.Logf("server %s is ready", caseName)
|
||||
delete(notReady, caseName)
|
||||
} else {
|
||||
framework.Logf("server %s is not ready", caseName)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(notReady) == 0 {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(waitInterval)
|
||||
}
|
||||
return errors.Errorf("after %d tries, %d HTTP servers are not ready", maxTries, len(notReady))
|
||||
}
|
300
test/e2e/network/netpol/model.go
Normal file
300
test/e2e/network/netpol/model.go
Normal file
@ -0,0 +1,300 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// Model defines the namespaces, deployments, services, pods, containers and associated
|
||||
// data for network policy test cases and provides the source of truth
|
||||
type Model struct {
|
||||
Namespaces []*Namespace
|
||||
allPodStrings *[]PodString
|
||||
allPods *[]*Pod
|
||||
// the raw data
|
||||
NamespaceNames []string
|
||||
PodNames []string
|
||||
Ports []int32
|
||||
Protocols []v1.Protocol
|
||||
DNSDomain string
|
||||
}
|
||||
|
||||
// NewModel instantiates a model based on:
|
||||
// - namespaces
|
||||
// - pods
|
||||
// - ports to listen on
|
||||
// - protocols to listen on
|
||||
// The total number of pods is the number of namespaces x the number of pods per namespace.
|
||||
// The number of containers per pod is the number of ports x the number of protocols.
|
||||
// The *total* number of containers is namespaces x pods x ports x protocols.
|
||||
func NewModel(namespaces []string, podNames []string, ports []int32, protocols []v1.Protocol, dnsDomain string) *Model {
|
||||
model := &Model{
|
||||
NamespaceNames: namespaces,
|
||||
PodNames: podNames,
|
||||
Ports: ports,
|
||||
Protocols: protocols,
|
||||
DNSDomain: dnsDomain,
|
||||
}
|
||||
framework.Logf("DnsDomain %v", model.DNSDomain)
|
||||
|
||||
// build the entire "model" for the overall test, which means, building
|
||||
// namespaces, pods, containers for each protocol.
|
||||
for _, ns := range namespaces {
|
||||
var pods []*Pod
|
||||
for _, podName := range podNames {
|
||||
var containers []*Container
|
||||
for _, port := range ports {
|
||||
for _, protocol := range protocols {
|
||||
containers = append(containers, &Container{
|
||||
Port: port,
|
||||
Protocol: protocol,
|
||||
})
|
||||
}
|
||||
}
|
||||
pods = append(pods, &Pod{
|
||||
Namespace: ns,
|
||||
Name: podName,
|
||||
Containers: containers,
|
||||
})
|
||||
}
|
||||
model.Namespaces = append(model.Namespaces, &Namespace{Name: ns, Pods: pods})
|
||||
}
|
||||
return model
|
||||
}
|
||||
|
||||
// NewReachability instantiates a default-true reachability from the model's pods
|
||||
func (m *Model) NewReachability() *Reachability {
|
||||
return NewReachability(m.AllPods(), true)
|
||||
}
|
||||
|
||||
// AllPodStrings returns a slice of all pod strings
|
||||
func (m *Model) AllPodStrings() []PodString {
|
||||
if m.allPodStrings == nil {
|
||||
var pods []PodString
|
||||
for _, ns := range m.Namespaces {
|
||||
for _, pod := range ns.Pods {
|
||||
pods = append(pods, pod.PodString())
|
||||
}
|
||||
}
|
||||
m.allPodStrings = &pods
|
||||
}
|
||||
return *m.allPodStrings
|
||||
}
|
||||
|
||||
// AllPods returns a slice of all pods
|
||||
func (m *Model) AllPods() []*Pod {
|
||||
if m.allPods == nil {
|
||||
var pods []*Pod
|
||||
for _, ns := range m.Namespaces {
|
||||
for _, pod := range ns.Pods {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
}
|
||||
m.allPods = &pods
|
||||
}
|
||||
return *m.allPods
|
||||
}
|
||||
|
||||
// FindPod returns the pod of matching namespace and name, or an error
|
||||
func (m *Model) FindPod(ns string, name string) (*Pod, error) {
|
||||
for _, namespace := range m.Namespaces {
|
||||
for _, pod := range namespace.Pods {
|
||||
if namespace.Name == ns && pod.Name == name {
|
||||
return pod, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, errors.Errorf("unable to find pod %s/%s", ns, name)
|
||||
}
|
||||
|
||||
// Namespace is the abstract representation of what matters to network policy
|
||||
// tests for a namespace; i.e. it ignores kube implementation details
|
||||
type Namespace struct {
|
||||
Name string
|
||||
Pods []*Pod
|
||||
}
|
||||
|
||||
// Spec builds a kubernetes namespace spec
|
||||
func (ns *Namespace) Spec() *v1.Namespace {
|
||||
return &v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns.Name,
|
||||
Labels: ns.LabelSelector(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// LabelSelector returns the default labels that should be placed on a namespace
|
||||
// in order for it to be uniquely selectable by label selectors
|
||||
func (ns *Namespace) LabelSelector() map[string]string {
|
||||
return map[string]string{"ns": ns.Name}
|
||||
}
|
||||
|
||||
// Pod is the abstract representation of what matters to network policy tests for
|
||||
// a pod; i.e. it ignores kube implementation details
|
||||
type Pod struct {
|
||||
Namespace string
|
||||
Name string
|
||||
Containers []*Container
|
||||
}
|
||||
|
||||
// FindContainer returns the container matching port and protocol; otherwise, an error
|
||||
func (p *Pod) FindContainer(port int32, protocol v1.Protocol) (*Container, error) {
|
||||
for _, cont := range p.Containers {
|
||||
if cont.Port == port && cont.Protocol == protocol {
|
||||
return cont, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.Errorf("unable to find container in pod %s/%s, port %d, protocol %s", p.Namespace, p.Name, port, protocol)
|
||||
}
|
||||
|
||||
// PodString returns a corresponding pod string
|
||||
func (p *Pod) PodString() PodString {
|
||||
return NewPodString(p.Namespace, p.Name)
|
||||
}
|
||||
|
||||
// ContainerSpecs builds kubernetes container specs for the pod
|
||||
func (p *Pod) ContainerSpecs() []v1.Container {
|
||||
var containers []v1.Container
|
||||
for _, cont := range p.Containers {
|
||||
containers = append(containers, cont.Spec())
|
||||
}
|
||||
return containers
|
||||
}
|
||||
|
||||
func (p *Pod) labelSelectorKey() string {
|
||||
return "pod"
|
||||
}
|
||||
|
||||
func (p *Pod) labelSelectorValue() string {
|
||||
return p.Name
|
||||
}
|
||||
|
||||
// LabelSelector returns the default labels that should be placed on a pod/deployment
|
||||
// in order for it to be uniquely selectable by label selectors
|
||||
func (p *Pod) LabelSelector() map[string]string {
|
||||
return map[string]string{
|
||||
p.labelSelectorKey(): p.labelSelectorValue(),
|
||||
}
|
||||
}
|
||||
|
||||
// KubePod returns the kube pod
|
||||
func (p *Pod) KubePod() *v1.Pod {
|
||||
zero := int64(0)
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: p.Name,
|
||||
Labels: p.LabelSelector(),
|
||||
Namespace: p.Namespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: p.ContainerSpecs(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// QualifiedServiceAddress returns the address that can be used to hit a service from
|
||||
// any namespace in the cluster
|
||||
func (p *Pod) QualifiedServiceAddress(dnsDomain string) string {
|
||||
return fmt.Sprintf("%s.%s.svc.%s", p.ServiceName(), p.Namespace, dnsDomain)
|
||||
}
|
||||
|
||||
// ServiceName returns the unqualified service name
|
||||
func (p *Pod) ServiceName() string {
|
||||
return fmt.Sprintf("s-%s-%s", p.Namespace, p.Name)
|
||||
}
|
||||
|
||||
// Service returns a kube service spec
|
||||
func (p *Pod) Service() *v1.Service {
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: p.ServiceName(),
|
||||
Namespace: p.Namespace,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: p.LabelSelector(),
|
||||
},
|
||||
}
|
||||
for _, container := range p.Containers {
|
||||
service.Spec.Ports = append(service.Spec.Ports, v1.ServicePort{
|
||||
Name: fmt.Sprintf("service-port-%s-%d", strings.ToLower(string(container.Protocol)), container.Port),
|
||||
Protocol: container.Protocol,
|
||||
Port: container.Port,
|
||||
})
|
||||
}
|
||||
return service
|
||||
}
|
||||
|
||||
// Container is the abstract representation of what matters to network policy tests for
|
||||
// a container; i.e. it ignores kube implementation details
|
||||
type Container struct {
|
||||
Port int32
|
||||
Protocol v1.Protocol
|
||||
}
|
||||
|
||||
// Name returns the container name
|
||||
func (c *Container) Name() string {
|
||||
return fmt.Sprintf("cont-%d-%s", c.Port, strings.ToLower(string(c.Protocol)))
|
||||
}
|
||||
|
||||
// PortName returns the container port name
|
||||
func (c *Container) PortName() string {
|
||||
return fmt.Sprintf("serve-%d-%s", c.Port, strings.ToLower(string(c.Protocol)))
|
||||
}
|
||||
|
||||
// Spec returns the kube container spec
|
||||
func (c *Container) Spec() v1.Container {
|
||||
var (
|
||||
// agnHostImage is the image URI of AgnHost
|
||||
agnHostImage = imageutils.GetE2EImage(imageutils.Agnhost)
|
||||
cmd []string
|
||||
)
|
||||
|
||||
switch c.Protocol {
|
||||
case v1.ProtocolTCP:
|
||||
cmd = []string{"/agnhost", "serve-hostname", "--tcp", "--http=false", "--port", fmt.Sprintf("%d", c.Port)}
|
||||
case v1.ProtocolUDP:
|
||||
cmd = []string{"/agnhost", "serve-hostname", "--udp", "--http=false", "--port", fmt.Sprintf("%d", c.Port)}
|
||||
case v1.ProtocolSCTP:
|
||||
cmd = []string{"/agnhost", "netexec", "--sctp-port", fmt.Sprintf("%d", c.Port)}
|
||||
default:
|
||||
framework.Failf("invalid protocol %v", c.Protocol)
|
||||
}
|
||||
return v1.Container{
|
||||
Name: c.Name(),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
Image: agnHostImage,
|
||||
Command: cmd,
|
||||
SecurityContext: &v1.SecurityContext{},
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: c.Port,
|
||||
Name: c.PortName(),
|
||||
Protocol: c.Protocol,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
909
test/e2e/network/netpol/network_policy.go
Normal file
909
test/e2e/network/netpol/network_policy.go
Normal file
@ -0,0 +1,909 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
utilnet "k8s.io/utils/net"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
addSCTPContainers = false
|
||||
isVerbose = true
|
||||
|
||||
// useFixedNamespaces is useful when working on these tests: instead of creating new pods and
|
||||
// new namespaces for each test run, it creates a fixed set of namespaces and pods, and then
|
||||
// reuses them for each test case.
|
||||
// The result: tests run much faster. However, this should only be used as a convenience for
|
||||
// working on the tests during development. It should not be enabled in production.
|
||||
useFixedNamespaces = false
|
||||
|
||||
// See https://github.com/kubernetes/kubernetes/issues/95879
|
||||
// The semantics of the effect of network policies on loopback calls may be undefined: should
|
||||
// they always be ALLOWED; how do Services affect this?
|
||||
// Calico, Cillium, Antrea seem to do different things.
|
||||
// Since different CNIs have different results, that causes tests including loopback to fail
|
||||
// on some CNIs. So let's just ignore loopback calls for the purposes of deciding test pass/fail.
|
||||
ignoreLoopback = true
|
||||
)
|
||||
|
||||
/*
|
||||
You might be wondering, why are there multiple namespaces used for each test case?
|
||||
|
||||
These tests are based on "truth tables" that compare the expected and actual connectivity of each pair of pods.
|
||||
Since network policies live in namespaces, and peers can be selected by namespace,
|
||||
howing the connectivity of pods in other namespaces is key information to show whether a network policy is working as intended or not.
|
||||
|
||||
We use 3 namespaces each with 3 pods, and probe all combinations ( 9 pods x 9 pods = 81 data points ) -- including cross-namespace calls.
|
||||
|
||||
Here's an example of a test run, showing the expected and actual connectivity, along with the differences. Note how the
|
||||
visual representation as a truth table greatly aids in understanding what a network policy is intended to do in theory
|
||||
and what is happening in practice:
|
||||
|
||||
Oct 19 10:34:16.907: INFO: expected:
|
||||
|
||||
- x/a x/b x/c y/a y/b y/c z/a z/b z/c
|
||||
x/a X . . . . . . . .
|
||||
x/b X . . . . . . . .
|
||||
x/c X . . . . . . . .
|
||||
y/a . . . . . . . . .
|
||||
y/b . . . . . . . . .
|
||||
y/c . . . . . . . . .
|
||||
z/a X . . . . . . . .
|
||||
z/b X . . . . . . . .
|
||||
z/c X . . . . . . . .
|
||||
|
||||
Oct 19 10:34:16.907: INFO: observed:
|
||||
|
||||
- x/a x/b x/c y/a y/b y/c z/a z/b z/c
|
||||
x/a X . . . . . . . .
|
||||
x/b X . . . . . . . .
|
||||
x/c X . . . . . . . .
|
||||
y/a . . . . . . . . .
|
||||
y/b . . . . . . . . .
|
||||
y/c . . . . . . . . .
|
||||
z/a X . . . . . . . .
|
||||
z/b X . . . . . . . .
|
||||
z/c X . . . . . . . .
|
||||
|
||||
Oct 19 10:34:16.907: INFO: comparison:
|
||||
|
||||
- x/a x/b x/c y/a y/b y/c z/a z/b z/c
|
||||
x/a . . . . . . . . .
|
||||
x/b . . . . . . . . .
|
||||
x/c . . . . . . . . .
|
||||
y/a . . . . . . . . .
|
||||
y/b . . . . . . . . .
|
||||
y/c . . . . . . . . .
|
||||
z/a . . . . . . . . .
|
||||
z/b . . . . . . . . .
|
||||
z/c . . . . . . . . .
|
||||
*/
|
||||
|
||||
// SIGDescribeCopy function SIGDescribe is COPIED from test/e2e/network/framework.go , so that we can avoid a cyclic dependency while we incubate these new tests.
|
||||
func SIGDescribeCopy(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-network] "+text, body)
|
||||
}
|
||||
|
||||
var _ = SIGDescribeCopy("Netpol [LinuxOnly]", func() {
|
||||
f := framework.NewDefaultFramework("netpol")
|
||||
|
||||
ginkgo.Context("NetworkPolicy between server and client", func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
if useFixedNamespaces {
|
||||
_ = initializeResources(f)
|
||||
|
||||
_, _, _, model, k8s := getK8SModel(f)
|
||||
framework.ExpectNoError(k8s.CleanNetworkPolicies(model.NamespaceNames), "unable to clean network policies")
|
||||
err := wait.Poll(waitInterval, waitTimeout, func() (done bool, err error) {
|
||||
for _, ns := range model.NamespaceNames {
|
||||
netpols, err := k8s.ClientSet.NetworkingV1().NetworkPolicies(ns).List(context.TODO(), metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "get network policies from ns %s", ns)
|
||||
if len(netpols.Items) > 0 {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "unable to wait for network policy deletion")
|
||||
} else {
|
||||
framework.Logf("Using %v as the default dns domain for this cluster... ", framework.TestContext.ClusterDNSDomain)
|
||||
framework.ExpectNoError(initializeResources(f), "unable to initialize resources")
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func() {
|
||||
if !useFixedNamespaces {
|
||||
_, _, _, model, k8s := getK8SModel(f)
|
||||
framework.ExpectNoError(k8s.deleteNamespaces(model.NamespaceNames), "unable to clean up netpol namespaces")
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should support a 'default-deny-ingress' policy [Feature:NetworkPolicy]", func() {
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
policy := GetDenyIngress("deny-ingress")
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectPeer(&Peer{}, &Peer{Namespace: nsX}, false)
|
||||
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should support a 'default-deny-all' policy [Feature:NetworkPolicy]", func() {
|
||||
np := &networkingv1.NetworkPolicy{}
|
||||
policy := `
|
||||
{
|
||||
"kind": "NetworkPolicy",
|
||||
"apiVersion": "networking.k8s.io/v1",
|
||||
"metadata": {
|
||||
"name": "deny-all-tcp-allow-dns"
|
||||
},
|
||||
"spec": {
|
||||
"podSelector": {
|
||||
"matchLabels": {}
|
||||
},
|
||||
"ingress": [],
|
||||
"egress": [{
|
||||
"ports": [
|
||||
{
|
||||
"protocol": "UDP",
|
||||
"port": 53
|
||||
}
|
||||
]
|
||||
}],
|
||||
"policyTypes": [
|
||||
"Ingress",
|
||||
"Egress"
|
||||
]
|
||||
}
|
||||
}
|
||||
`
|
||||
err := json.Unmarshal([]byte(policy), np)
|
||||
framework.ExpectNoError(err, "unmarshal network policy")
|
||||
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
CreatePolicy(k8s, np, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectPeer(&Peer{}, &Peer{Namespace: nsX}, false)
|
||||
reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{}, false)
|
||||
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce policy to allow traffic from pods within server namespace based on PodSelector [Feature:NetworkPolicy]", func() {
|
||||
allowedPods := metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod": "b",
|
||||
},
|
||||
}
|
||||
policy := GetAllowIngressByPod("x-a-allows-x-b", map[string]string{"pod": "a"}, &allowedPods)
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
reachability.Expect(NewPodString(nsX, "b"), NewPodString(nsX, "a"), true)
|
||||
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce policy to allow traffic only from a different namespace, based on NamespaceSelector [Feature:NetworkPolicy]", func() {
|
||||
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
|
||||
allowedLabels := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"ns": nsY,
|
||||
},
|
||||
}
|
||||
policy := GetAllowIngressByNamespace("allow-client-a-via-ns-selector", map[string]string{"pod": "a"}, allowedLabels)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
// disallow all traffic from the x or z namespaces
|
||||
reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{Namespace: nsX, Pod: "a"}, false)
|
||||
reachability.ExpectPeer(&Peer{Namespace: nsZ}, &Peer{Namespace: nsX, Pod: "a"}, false)
|
||||
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce policy based on PodSelector with MatchExpressions[Feature:NetworkPolicy]", func() {
|
||||
allowedPods := metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{{
|
||||
Key: "pod",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"b"},
|
||||
}},
|
||||
}
|
||||
policy := GetAllowIngressByPod("x-a-allows-x-b", map[string]string{"pod": "a"}, &allowedPods)
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
reachability.Expect(NewPodString(nsX, "b"), NewPodString(nsX, "a"), true)
|
||||
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce policy based on NamespaceSelector with MatchExpressions[Feature:NetworkPolicy]", func() {
|
||||
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
|
||||
allowedNamespaces := &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{{
|
||||
Key: "ns",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{nsY},
|
||||
}},
|
||||
}
|
||||
policy := GetAllowIngressByNamespace("allow-ns-y-match-selector", map[string]string{"pod": "a"}, allowedNamespaces)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
// disallow all traffic from the x or z namespaces
|
||||
reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{Namespace: nsX, Pod: "a"}, false)
|
||||
reachability.ExpectPeer(&Peer{Namespace: nsZ}, &Peer{Namespace: nsX, Pod: "a"}, false)
|
||||
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce policy based on PodSelector or NamespaceSelector [Feature:NetworkPolicy]", func() {
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
allowedNamespaces := &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{{
|
||||
Key: "ns",
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
Values: []string{nsX},
|
||||
}},
|
||||
}
|
||||
podBAllowlisting := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod": "b",
|
||||
},
|
||||
}
|
||||
policy := GetAllowIngressByNamespaceOrPod("allow-ns-y-match-selector", map[string]string{"pod": "a"}, allowedNamespaces, podBAllowlisting)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.Expect(NewPodString(nsX, "a"), NewPodString(nsX, "a"), false)
|
||||
reachability.Expect(NewPodString(nsX, "c"), NewPodString(nsX, "a"), false)
|
||||
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce policy based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() {
|
||||
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
|
||||
allowedNamespaces := &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{{
|
||||
Key: "ns",
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
Values: []string{nsX},
|
||||
}},
|
||||
}
|
||||
allowedPod := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod": "b",
|
||||
},
|
||||
}
|
||||
policy := GetAllowIngressByNamespaceAndPod("allow-ns-y-podselector-and-nsselector", map[string]string{"pod": "a"}, allowedNamespaces, allowedPod)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
reachability.Expect(NewPodString(nsY, "b"), NewPodString(nsX, "a"), true)
|
||||
reachability.Expect(NewPodString(nsZ, "b"), NewPodString(nsX, "a"), true)
|
||||
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce policy based on Multiple PodSelectors and NamespaceSelectors [Feature:NetworkPolicy]", func() {
|
||||
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
|
||||
allowedNamespaces := &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{{
|
||||
Key: "ns",
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
Values: []string{nsX},
|
||||
}},
|
||||
}
|
||||
allowedPod := &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{{
|
||||
Key: "pod",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"b", "c"},
|
||||
}},
|
||||
}
|
||||
policy := GetAllowIngressByNamespaceAndPod("allow-ns-y-z-pod-b-c", map[string]string{"pod": "a"}, allowedNamespaces, allowedPod)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{Namespace: nsX, Pod: "a"}, false)
|
||||
reachability.Expect(NewPodString(nsY, "a"), NewPodString(nsX, "a"), false)
|
||||
reachability.Expect(NewPodString(nsZ, "a"), NewPodString(nsX, "a"), false)
|
||||
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce policy to allow traffic only from a pod in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() {
|
||||
nsX, nsY, _, model, k8s := getK8SModel(f)
|
||||
allowedNamespaces := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"ns": nsY,
|
||||
},
|
||||
}
|
||||
allowedPods := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod": "a",
|
||||
},
|
||||
}
|
||||
policy := GetAllowIngressByNamespaceAndPod("allow-ns-y-pod-a-via-namespace-pod-selector", map[string]string{"pod": "a"}, allowedNamespaces, allowedPods)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
reachability.Expect(NewPodString(nsY, "a"), NewPodString(nsX, "a"), true)
|
||||
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce policy based on Ports [Feature:NetworkPolicy]", func() {
|
||||
ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)")
|
||||
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
|
||||
allowedLabels := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"ns": nsY,
|
||||
},
|
||||
}
|
||||
allowPort81Policy := GetAllowIngressByNamespaceAndPort("allow-client-a-via-ns-selector", map[string]string{"pod": "a"}, allowedLabels, &intstr.IntOrString{IntVal: 81})
|
||||
CreatePolicy(k8s, allowPort81Policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{Namespace: nsX, Pod: "a"}, false)
|
||||
reachability.ExpectPeer(&Peer{Namespace: nsY}, &Peer{Namespace: nsX, Pod: "a"}, true)
|
||||
reachability.ExpectPeer(&Peer{Namespace: nsZ}, &Peer{Namespace: nsX, Pod: "a"}, false)
|
||||
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce multiple, stacked policies with overlapping podSelectors [Feature:NetworkPolicy]", func() {
|
||||
ginkgo.By("Creating a network allowPort81Policy which only allows allow listed namespaces (y) to connect on exactly one port (81)")
|
||||
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
|
||||
allowedLabels := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"ns": nsY,
|
||||
},
|
||||
}
|
||||
allowPort81Policy := GetAllowIngressByNamespaceAndPort("allow-client-a-via-ns-selector", map[string]string{"pod": "a"}, allowedLabels, &intstr.IntOrString{IntVal: 81})
|
||||
CreatePolicy(k8s, allowPort81Policy, nsX)
|
||||
|
||||
reachabilityALLOW := NewReachability(model.AllPods(), true)
|
||||
reachabilityALLOW.ExpectPeer(&Peer{Namespace: nsX}, &Peer{Namespace: nsX, Pod: "a"}, false)
|
||||
reachabilityALLOW.ExpectPeer(&Peer{Namespace: nsY}, &Peer{Namespace: nsX, Pod: "a"}, true)
|
||||
reachabilityALLOW.ExpectPeer(&Peer{Namespace: nsZ}, &Peer{Namespace: nsX, Pod: "a"}, false)
|
||||
|
||||
ginkgo.By("Verifying traffic on port 81.")
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityALLOW})
|
||||
|
||||
reachabilityDENY := NewReachability(model.AllPods(), true)
|
||||
reachabilityDENY.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
|
||||
ginkgo.By("Verifying traffic on port 80.")
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityDENY})
|
||||
|
||||
allowPort80Policy := GetAllowIngressByNamespaceAndPort("allow-client-a-via-ns-selector-80", map[string]string{"pod": "a"}, allowedLabels, &intstr.IntOrString{IntVal: 80})
|
||||
CreatePolicy(k8s, allowPort80Policy, nsX)
|
||||
|
||||
ginkgo.By("Verifying that we can add a policy to unblock port 80")
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityALLOW})
|
||||
})
|
||||
|
||||
ginkgo.It("should support allow-all policy [Feature:NetworkPolicy]", func() {
|
||||
ginkgo.By("Creating a network policy which allows all traffic.")
|
||||
policy := GetAllowIngress("allow-all")
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
ginkgo.By("Testing pods can connect to both ports when an 'allow-all' policy is present.")
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should allow ingress access on one named port [Feature:NetworkPolicy]", func() {
|
||||
policy := GetAllowIngressByPort("allow-all", &intstr.IntOrString{Type: intstr.String, StrVal: "serve-81-tcp"})
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
ginkgo.By("Blocking all ports other then 81 in the entire namespace")
|
||||
|
||||
reachabilityPort81 := NewReachability(model.AllPods(), true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityPort81})
|
||||
|
||||
// disallow all traffic to the x namespace
|
||||
reachabilityPort80 := NewReachability(model.AllPods(), true)
|
||||
reachabilityPort80.ExpectPeer(&Peer{}, &Peer{Namespace: nsX}, false)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityPort80})
|
||||
})
|
||||
|
||||
ginkgo.It("should allow ingress access from namespace on one named port [Feature:NetworkPolicy]", func() {
|
||||
nsX, nsY, nsZ, model, k8s := getK8SModel(f)
|
||||
allowedLabels := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"ns": nsY,
|
||||
},
|
||||
}
|
||||
policy := GetAllowIngressByNamespaceAndPort("allow-client-a-via-ns-selector-80", map[string]string{"pod": "a"}, allowedLabels, &intstr.IntOrString{Type: intstr.String, StrVal: "serve-80-tcp"})
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
// disallow all traffic from the x or z namespaces
|
||||
reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{Namespace: nsX, Pod: "a"}, false)
|
||||
reachability.ExpectPeer(&Peer{Namespace: nsZ}, &Peer{Namespace: nsX, Pod: "a"}, false)
|
||||
|
||||
ginkgo.By("Verify that port 80 is allowed for namespace y")
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
|
||||
ginkgo.By("Verify that port 81 is blocked for all namespaces including y")
|
||||
reachabilityFAIL := NewReachability(model.AllPods(), true)
|
||||
reachabilityFAIL.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityFAIL})
|
||||
})
|
||||
|
||||
ginkgo.It("should allow egress access on one named port [Feature:NetworkPolicy]", func() {
|
||||
ginkgo.By("validating egress from port 81 to port 80")
|
||||
policy := GetAllowEgressByPort("allow-egress", &intstr.IntOrString{Type: intstr.String, StrVal: "serve-80-tcp"})
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachabilityPort80 := NewReachability(model.AllPods(), true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityPort80})
|
||||
|
||||
// meanwhile no traffic over 81 should work, since our egress policy is on 80
|
||||
reachabilityPort81 := NewReachability(model.AllPods(), true)
|
||||
reachabilityPort81.ExpectPeer(&Peer{Namespace: nsX}, &Peer{}, false)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityPort81})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce updated policy [Feature:NetworkPolicy]", func() {
|
||||
ginkgo.By("Using the simplest possible mutation: start with allow all, then switch to deny all")
|
||||
// part 1) allow all
|
||||
policy := GetAllowIngress("allow-all-mutate-to-deny-all")
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
|
||||
// part 2) update the policy to deny all
|
||||
policy.Spec.Ingress = []networkingv1.NetworkPolicyIngressRule{}
|
||||
UpdatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachabilityDeny := NewReachability(model.AllPods(), true)
|
||||
reachabilityDeny.ExpectPeer(&Peer{}, &Peer{Namespace: nsX}, false)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityDeny})
|
||||
})
|
||||
|
||||
ginkgo.It("should allow ingress access from updated namespace [Feature:NetworkPolicy]", func() {
|
||||
nsX, nsY, _, model, k8s := getK8SModel(f)
|
||||
defer ResetNamespaceLabels(k8s, nsY)
|
||||
|
||||
allowedLabels := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"ns2": "updated",
|
||||
},
|
||||
}
|
||||
policy := GetAllowIngressByNamespace("allow-client-a-via-ns-selector", map[string]string{"pod": "a"}, allowedLabels)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
|
||||
// add a new label, we'll remove it after this test is completed
|
||||
updatedLabels := map[string]string{
|
||||
"ns": nsY,
|
||||
"ns2": "updated",
|
||||
}
|
||||
UpdateNamespaceLabels(k8s, nsY, updatedLabels)
|
||||
|
||||
// anything from namespace 'y' should be able to get to x/a
|
||||
reachabilityWithLabel := NewReachability(model.AllPods(), true)
|
||||
reachabilityWithLabel.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
reachabilityWithLabel.ExpectPeer(&Peer{Namespace: nsY}, &Peer{}, true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityWithLabel})
|
||||
})
|
||||
|
||||
ginkgo.It("should allow ingress access from updated pod [Feature:NetworkPolicy]", func() {
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
podXB, err := model.FindPod(nsX, "b")
|
||||
framework.ExpectNoError(err, "find pod x/b")
|
||||
defer ResetPodLabels(k8s, podXB)
|
||||
|
||||
// add a new label, we'll remove it after this test is done
|
||||
matchLabels := map[string]string{"pod": "b", "pod2": "updated"}
|
||||
allowedLabels := &metav1.LabelSelector{MatchLabels: matchLabels}
|
||||
policy := GetAllowIngressByPod("allow-client-a-via-pod-selector", map[string]string{"pod": "a"}, allowedLabels)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
|
||||
// now update label in x namespace and pod b
|
||||
AddPodLabels(k8s, podXB, matchLabels)
|
||||
|
||||
ginkgo.By("x/b is able to reach x/a when label is updated")
|
||||
|
||||
reachabilityWithLabel := NewReachability(model.AllPods(), true)
|
||||
reachabilityWithLabel.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
reachabilityWithLabel.Expect(NewPodString(nsX, "b"), NewPodString(nsX, "a"), true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityWithLabel})
|
||||
})
|
||||
|
||||
ginkgo.It("should deny ingress access to updated pod [Feature:NetworkPolicy]", func() {
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
podXA, err := model.FindPod(nsX, "a")
|
||||
framework.ExpectNoError(err, "find pod x/a")
|
||||
defer ResetPodLabels(k8s, podXA)
|
||||
|
||||
policy := GetDenyIngressForTarget(metav1.LabelSelector{MatchLabels: map[string]string{"target": "isolated"}})
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
ginkgo.By("Verify that everything can reach x/a")
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
|
||||
AddPodLabels(k8s, podXA, map[string]string{"target": "isolated"})
|
||||
|
||||
reachabilityIsolated := NewReachability(model.AllPods(), true)
|
||||
reachabilityIsolated.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityIsolated})
|
||||
})
|
||||
|
||||
ginkgo.It("should work with Ingress, Egress specified together [Feature:NetworkPolicy]", func() {
|
||||
allowedPodLabels := &metav1.LabelSelector{MatchLabels: map[string]string{"pod": "b"}}
|
||||
policy := GetAllowIngressByPod("allow-client-a-via-pod-selector", map[string]string{"pod": "a"}, allowedPodLabels)
|
||||
// add an egress rule on to it...
|
||||
protocolUDP := v1.ProtocolUDP
|
||||
policy.Spec.Egress = []networkingv1.NetworkPolicyEgressRule{
|
||||
{
|
||||
Ports: []networkingv1.NetworkPolicyPort{
|
||||
{
|
||||
// dont use named ports
|
||||
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 80},
|
||||
},
|
||||
{
|
||||
Protocol: &protocolUDP,
|
||||
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
policy.Spec.PolicyTypes = []networkingv1.PolicyType{networkingv1.PolicyTypeEgress, networkingv1.PolicyTypeIngress}
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachabilityPort80 := NewReachability(model.AllPods(), true)
|
||||
reachabilityPort80.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
reachabilityPort80.Expect(NewPodString(nsX, "b"), NewPodString(nsX, "a"), true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityPort80})
|
||||
|
||||
ginkgo.By("validating that port 81 doesn't work")
|
||||
// meanwhile no egress traffic on 81 should work, since our egress policy is on 80
|
||||
reachabilityPort81 := NewReachability(model.AllPods(), true)
|
||||
reachabilityPort81.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
reachabilityPort81.ExpectAllEgress(NewPodString(nsX, "a"), false)
|
||||
reachabilityPort81.Expect(NewPodString(nsX, "b"), NewPodString(nsX, "a"), true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityPort81})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce egress policy allowing traffic to a server in a different namespace based on PodSelector and NamespaceSelector [Feature:NetworkPolicy]", func() {
|
||||
nsX, nsY, _, model, k8s := getK8SModel(f)
|
||||
allowedNamespaces := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"ns": nsY,
|
||||
},
|
||||
}
|
||||
allowedPods := &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod": "a",
|
||||
},
|
||||
}
|
||||
policy := GetAllowEgressByNamespaceAndPod("allow-to-ns-y-pod-a", map[string]string{"pod": "a"}, allowedNamespaces, allowedPods)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectAllEgress(NewPodString(nsX, "a"), false)
|
||||
reachability.Expect(NewPodString(nsX, "a"), NewPodString(nsY, "a"), true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce multiple ingress policies with ingress allow-all policy taking precedence [Feature:NetworkPolicy]", func() {
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
policyAllowOnlyPort80 := GetAllowIngressByPort("allow-ingress-port-80", &intstr.IntOrString{Type: intstr.Int, IntVal: 80})
|
||||
CreatePolicy(k8s, policyAllowOnlyPort80, nsX)
|
||||
|
||||
ginkgo.By("The policy targets port 80 -- so let's make sure traffic on port 81 is blocked")
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectPeer(&Peer{}, &Peer{Namespace: nsX}, false)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
|
||||
ginkgo.By("Allowing all ports")
|
||||
|
||||
policyAllowAll := GetAllowIngress("allow-ingress")
|
||||
CreatePolicy(k8s, policyAllowAll, nsX)
|
||||
|
||||
reachabilityAll := NewReachability(model.AllPods(), true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityAll})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce multiple egress policies with egress allow-all policy taking precedence [Feature:NetworkPolicy]", func() {
|
||||
policyAllowPort80 := GetAllowEgressByPort("allow-egress-port-80", &intstr.IntOrString{Type: intstr.Int, IntVal: 80})
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
CreatePolicy(k8s, policyAllowPort80, nsX)
|
||||
|
||||
ginkgo.By("Making sure ingress doesn't work other than port 80")
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{}, false)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
|
||||
ginkgo.By("Allowing all ports")
|
||||
|
||||
policyAllowAll := GetAllowEgress()
|
||||
CreatePolicy(k8s, policyAllowAll, nsX)
|
||||
|
||||
reachabilityAll := NewReachability(model.AllPods(), true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachabilityAll})
|
||||
})
|
||||
|
||||
ginkgo.It("should stop enforcing policies after they are deleted [Feature:NetworkPolicy]", func() {
|
||||
ginkgo.By("Creating a network policy for the server which denies all traffic.")
|
||||
|
||||
// Deny all traffic into and out of "x".
|
||||
policy := GetDenyAll("deny-all")
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
|
||||
// Expect all traffic into, and out of "x" to be False.
|
||||
reachability.ExpectPeer(&Peer{Namespace: nsX}, &Peer{}, false)
|
||||
reachability.ExpectPeer(&Peer{}, &Peer{Namespace: nsX}, false)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
|
||||
err := k8s.CleanNetworkPolicies(model.NamespaceNames)
|
||||
time.Sleep(3 * time.Second) // TODO we can remove this eventually, its just a hack to keep CI stable.
|
||||
framework.ExpectNoError(err, "unable to clean network policies")
|
||||
|
||||
// Now the policy is deleted, we expect all connectivity to work again.
|
||||
reachabilityAll := NewReachability(model.AllPods(), true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityAll})
|
||||
})
|
||||
|
||||
// TODO, figure out how the next 3 tests should work with dual stack : do we need a different abstraction then just "podIP"?
|
||||
|
||||
ginkgo.It("should allow egress access to server in CIDR block [Feature:NetworkPolicy]", func() {
|
||||
// Getting podServer's status to get podServer's IP, to create the CIDR
|
||||
nsX, nsY, _, model, k8s := getK8SModel(f)
|
||||
podList, err := f.ClientSet.CoreV1().Pods(nsY).List(context.TODO(), metav1.ListOptions{LabelSelector: "pod=b"})
|
||||
framework.ExpectNoError(err, "Failing to list pods in namespace y")
|
||||
pod := podList.Items[0]
|
||||
|
||||
hostMask := 32
|
||||
if utilnet.IsIPv6String(pod.Status.PodIP) {
|
||||
hostMask = 128
|
||||
}
|
||||
podServerCIDR := fmt.Sprintf("%s/%d", pod.Status.PodIP, hostMask)
|
||||
policyAllowCIDR := GetAllowEgressByCIDR("a", podServerCIDR)
|
||||
CreatePolicy(k8s, policyAllowCIDR, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectAllEgress(NewPodString(nsX, "a"), false)
|
||||
reachability.Expect(NewPodString(nsX, "a"), NewPodString(nsY, "b"), true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce except clause while egress access to server in CIDR block [Feature:NetworkPolicy]", func() {
|
||||
// Getting podServer's status to get podServer's IP, to create the CIDR with except clause
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
podList, err := f.ClientSet.CoreV1().Pods(nsX).List(context.TODO(), metav1.ListOptions{LabelSelector: "pod=a"})
|
||||
framework.ExpectNoError(err, "Failing to find pod x/a")
|
||||
podA := podList.Items[0]
|
||||
|
||||
podServerAllowCIDR := fmt.Sprintf("%s/4", podA.Status.PodIP)
|
||||
|
||||
podList, err = f.ClientSet.CoreV1().Pods(nsX).List(context.TODO(), metav1.ListOptions{LabelSelector: "pod=b"})
|
||||
framework.ExpectNoError(err, "Failing to find pod x/b")
|
||||
podB := podList.Items[0]
|
||||
|
||||
hostMask := 32
|
||||
if utilnet.IsIPv6String(podB.Status.PodIP) {
|
||||
hostMask = 128
|
||||
}
|
||||
podServerExceptList := []string{fmt.Sprintf("%s/%d", podB.Status.PodIP, hostMask)}
|
||||
|
||||
policyAllowCIDR := GetAllowEgressByCIDRExcept("a", podServerAllowCIDR, podServerExceptList)
|
||||
|
||||
CreatePolicy(k8s, policyAllowCIDR, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.Expect(NewPodString(nsX, "a"), NewPodString(nsX, "b"), false)
|
||||
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should ensure an IP overlapping both IPBlock.CIDR and IPBlock.Except is allowed [Feature:NetworkPolicy]", func() {
|
||||
// Getting podServer's status to get podServer's IP, to create the CIDR with except clause
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
podList, err := f.ClientSet.CoreV1().Pods(nsX).List(context.TODO(), metav1.ListOptions{LabelSelector: "pod=a"})
|
||||
framework.ExpectNoError(err, "Failing to find pod x/a")
|
||||
podA := podList.Items[0]
|
||||
|
||||
podList, err = f.ClientSet.CoreV1().Pods(nsX).List(context.TODO(), metav1.ListOptions{LabelSelector: "pod=b"})
|
||||
framework.ExpectNoError(err, "Failing to find pod x/b")
|
||||
podB := podList.Items[0]
|
||||
|
||||
// Exclude podServer's IP with an Except clause
|
||||
hostMask := 32
|
||||
if utilnet.IsIPv6String(podB.Status.PodIP) {
|
||||
hostMask = 128
|
||||
}
|
||||
|
||||
podServerAllowCIDR := fmt.Sprintf("%s/4", podA.Status.PodIP)
|
||||
podServerExceptList := []string{fmt.Sprintf("%s/%d", podB.Status.PodIP, hostMask)}
|
||||
policyAllowCIDR := GetAllowEgressByCIDRExcept("a", podServerAllowCIDR, podServerExceptList)
|
||||
CreatePolicy(k8s, policyAllowCIDR, nsX)
|
||||
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.Expect(NewPodString(nsX, "a"), NewPodString(nsX, "b"), false)
|
||||
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
|
||||
podBIP := fmt.Sprintf("%s/%d", podB.Status.PodIP, hostMask)
|
||||
//// Create NetworkPolicy which allows access to the podServer using podServer's IP in allow CIDR.
|
||||
allowPolicy := GetAllowEgressByCIDR("a", podBIP)
|
||||
// SHOULD THIS BE UPDATE OR CREATE JAY TESTING 10/31
|
||||
UpdatePolicy(k8s, allowPolicy, nsX)
|
||||
|
||||
reachabilityAllow := NewReachability(model.AllPods(), true)
|
||||
reachabilityAllow.ExpectAllEgress(NewPodString(nsX, "a"), false)
|
||||
reachabilityAllow.Expect(NewPodString(nsX, "a"), NewPodString(nsX, "b"), true)
|
||||
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: reachabilityAllow})
|
||||
})
|
||||
|
||||
ginkgo.It("should enforce policies to check ingress and egress policies can be controlled independently based on PodSelector [Feature:NetworkPolicy]", func() {
|
||||
/*
|
||||
Test steps:
|
||||
1. Verify every pod in every namespace can talk to each other
|
||||
- including a -> b and b -> a
|
||||
2. Create a policy to allow egress a -> b (target = a)
|
||||
3. Create a policy to *deny* ingress b -> a (target = a)
|
||||
4. Verify a -> b allowed; b -> a blocked
|
||||
*/
|
||||
targetLabels := map[string]string{"pod": "a"}
|
||||
|
||||
ginkgo.By("Creating a network policy for pod-a which allows Egress traffic to pod-b.")
|
||||
|
||||
allowEgressPolicy := GetAllowEgressForTarget(metav1.LabelSelector{MatchLabels: targetLabels})
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
CreatePolicy(k8s, allowEgressPolicy, nsX)
|
||||
|
||||
allowEgressReachability := NewReachability(model.AllPods(), true)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: allowEgressReachability})
|
||||
|
||||
ginkgo.By("Creating a network policy for pod-a that denies traffic from pod-b.")
|
||||
|
||||
denyAllIngressPolicy := GetDenyIngressForTarget(metav1.LabelSelector{MatchLabels: targetLabels})
|
||||
CreatePolicy(k8s, denyAllIngressPolicy, nsX)
|
||||
|
||||
denyIngressToXReachability := NewReachability(model.AllPods(), true)
|
||||
denyIngressToXReachability.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 80, Protocol: v1.ProtocolTCP, Reachability: denyIngressToXReachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should not allow access by TCP when a policy specifies only SCTP [Feature:NetworkPolicy] [Feature:SCTP]", func() {
|
||||
policy := GetAllowIngressOnProtocolByPort("allow-only-sctp-ingress-on-port-81", v1.ProtocolSCTP, map[string]string{"pod": "a"}, &intstr.IntOrString{IntVal: 81})
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
ginkgo.By("Creating a network policy for the server which allows traffic only via SCTP on port 81.")
|
||||
|
||||
// Probing with TCP, so all traffic should be dropped.
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
|
||||
ginkgo.It("should not allow access by TCP when a policy specifies only UDP [Feature:NetworkPolicy] [Feature:UDP]", func() {
|
||||
policy := GetAllowIngressOnProtocolByPort("allow-only-udp-ingress-on-port-81", v1.ProtocolUDP, map[string]string{"pod": "a"}, &intstr.IntOrString{IntVal: 81})
|
||||
nsX, _, _, model, k8s := getK8SModel(f)
|
||||
CreatePolicy(k8s, policy, nsX)
|
||||
|
||||
ginkgo.By("Creating a network policy for the server which allows traffic only via UDP on port 81.")
|
||||
|
||||
// Probing with TCP, so all traffic should be dropped.
|
||||
reachability := NewReachability(model.AllPods(), true)
|
||||
reachability.ExpectAllIngress(NewPodString(nsX, "a"), false)
|
||||
ValidateOrFail(k8s, model, &TestCase{FromPort: 81, ToPort: 81, Protocol: v1.ProtocolTCP, Reachability: reachability})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// getNamespaces returns the canonical set of namespaces used by this test, taking a root ns as input. This allows this test to run in parallel.
|
||||
func getNamespaces(rootNs string) (string, string, string, []string) {
|
||||
if useFixedNamespaces {
|
||||
rootNs = ""
|
||||
} else {
|
||||
rootNs = rootNs + "-"
|
||||
}
|
||||
nsX := fmt.Sprintf("%sx", rootNs)
|
||||
nsY := fmt.Sprintf("%sy", rootNs)
|
||||
nsZ := fmt.Sprintf("%sz", rootNs)
|
||||
return nsX, nsY, nsZ, []string{nsX, nsY, nsZ}
|
||||
}
|
||||
|
||||
// defaultModel creates a new "model" pod system under namespaces (x,y,z) which has pods a, b, and c. Thus resulting in the
|
||||
// truth table matrix that is identical for all tests, comprising 81 total connections between 9 pods (x/a, x/b, x/c, ..., z/c).
|
||||
func defaultModel(namespaces []string, dnsDomain string) *Model {
|
||||
protocols := []v1.Protocol{v1.ProtocolTCP, v1.ProtocolUDP}
|
||||
if addSCTPContainers {
|
||||
protocols = append(protocols, v1.ProtocolSCTP)
|
||||
}
|
||||
return NewModel(namespaces, []string{"a", "b", "c"}, []int32{80, 81}, protocols, dnsDomain)
|
||||
}
|
||||
|
||||
// getK8sModel uses the e2e framework to create all necessary namespace resources, and returns the default probing model used
|
||||
// in the scaffold of this test.
|
||||
func getK8SModel(f *framework.Framework) (string, string, string, *Model, *Scenario) {
|
||||
k8s := NewScenario(f)
|
||||
rootNs := f.Namespace.GetName()
|
||||
nsX, nsY, nsZ, namespaces := getNamespaces(rootNs)
|
||||
|
||||
model := defaultModel(namespaces, framework.TestContext.ClusterDNSDomain)
|
||||
|
||||
return nsX, nsY, nsZ, model, k8s
|
||||
}
|
||||
|
||||
// initializeResources generates a model and then waits for the model to be up-and-running (i.e. all pods are ready and running in their namespaces).
|
||||
func initializeResources(f *framework.Framework) error {
|
||||
_, _, _, model, k8s := getK8SModel(f)
|
||||
|
||||
framework.Logf("initializing cluster: ensuring namespaces, deployments, and pods exist and are ready")
|
||||
|
||||
err := k8s.InitializeCluster(model)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
framework.Logf("finished initializing cluster state")
|
||||
|
||||
return k8s.waitForHTTPServers(model)
|
||||
}
|
429
test/e2e/network/netpol/policies.go
Normal file
429
test/e2e/network/netpol/policies.go
Normal file
@ -0,0 +1,429 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// GetDenyIngress returns a default deny policy named 'name'.
|
||||
func GetDenyIngress(name string) *networkingv1.NetworkPolicy {
|
||||
return &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetRandomIngressPolicies returns "num" random policies that allow a unique:n label, i.e.
|
||||
// unique:1, unique:2, and so on. Used for creating a 'background' set of policies.
|
||||
func GetRandomIngressPolicies(num int) []*networkingv1.NetworkPolicy {
|
||||
policies := []*networkingv1.NetworkPolicy{}
|
||||
|
||||
for i := 0; i < num; i++ {
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("allow-all-%v", i),
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"unique": fmt.Sprintf("%v", i),
|
||||
},
|
||||
},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{{}},
|
||||
},
|
||||
}
|
||||
policies = append(policies, policy)
|
||||
}
|
||||
return policies
|
||||
}
|
||||
|
||||
// GetAllowIngress allows all ingress
|
||||
func GetAllowIngress(name string) *networkingv1.NetworkPolicy {
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{},
|
||||
},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{
|
||||
{},
|
||||
},
|
||||
},
|
||||
}
|
||||
return policy
|
||||
}
|
||||
|
||||
// GetAllowIngressByPort allows ingress by port
|
||||
func GetAllowIngressByPort(name string, port *intstr.IntOrString) *networkingv1.NetworkPolicy {
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{},
|
||||
},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{
|
||||
{
|
||||
Ports: []networkingv1.NetworkPolicyPort{
|
||||
{Port: port},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return policy
|
||||
}
|
||||
|
||||
// GetAllowEgressByPort allows egress by port
|
||||
func GetAllowEgressByPort(name string, port *intstr.IntOrString) *networkingv1.NetworkPolicy {
|
||||
protocolUDP := v1.ProtocolUDP
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{},
|
||||
},
|
||||
Egress: []networkingv1.NetworkPolicyEgressRule{
|
||||
{
|
||||
Ports: []networkingv1.NetworkPolicyPort{
|
||||
{Port: port},
|
||||
{
|
||||
Protocol: &protocolUDP,
|
||||
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
|
||||
},
|
||||
}
|
||||
return policy
|
||||
}
|
||||
|
||||
// GetDenyAll denies ingress traffic, AS WELL as egress traffic.
|
||||
// - BOTH policy types must be specified
|
||||
// - The Egress rule must (like the ingress default rule) be a array with 0 values.
|
||||
func GetDenyAll(name string) *networkingv1.NetworkPolicy {
|
||||
policy := GetDenyIngress(name)
|
||||
policy.Spec.PolicyTypes = []networkingv1.PolicyType{networkingv1.PolicyTypeEgress, networkingv1.PolicyTypeIngress}
|
||||
policy.Spec.Egress = []networkingv1.NetworkPolicyEgressRule{}
|
||||
policy.Spec.Ingress = []networkingv1.NetworkPolicyIngressRule{}
|
||||
return policy
|
||||
}
|
||||
|
||||
// GetAllowIngressByPod allows ingress by pod labels
|
||||
func GetAllowIngressByPod(name string, targetLabels map[string]string, peerPodSelector *metav1.LabelSelector) *networkingv1.NetworkPolicy {
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: targetLabels,
|
||||
},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{{
|
||||
From: []networkingv1.NetworkPolicyPeer{{
|
||||
PodSelector: peerPodSelector,
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
return policy
|
||||
}
|
||||
|
||||
// GetDenyIngressForTarget denies all ingress for target
|
||||
func GetDenyIngressForTarget(targetSelector metav1.LabelSelector) *networkingv1.NetworkPolicy {
|
||||
return &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "deny-ingress-via-label-selector",
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: targetSelector,
|
||||
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeIngress},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllowIngressByNamespace allows ingress for namespace
|
||||
func GetAllowIngressByNamespace(name string, targetLabels map[string]string, peerNamespaceSelector *metav1.LabelSelector) *networkingv1.NetworkPolicy {
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: targetLabels,
|
||||
},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{{
|
||||
From: []networkingv1.NetworkPolicyPeer{{
|
||||
NamespaceSelector: peerNamespaceSelector,
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
return policy
|
||||
}
|
||||
|
||||
// GetAllowIngressByNamespaceAndPort allows ingress for namespace AND port
|
||||
func GetAllowIngressByNamespaceAndPort(name string, targetLabels map[string]string, peerNamespaceSelector *metav1.LabelSelector, port *intstr.IntOrString) *networkingv1.NetworkPolicy {
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: targetLabels,
|
||||
},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{{
|
||||
From: []networkingv1.NetworkPolicyPeer{{
|
||||
NamespaceSelector: peerNamespaceSelector,
|
||||
}},
|
||||
Ports: []networkingv1.NetworkPolicyPort{
|
||||
{Port: port},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
return policy
|
||||
}
|
||||
|
||||
// GetAllowIngressByNamespaceOrPod allows ingress for pods with matching namespace OR pod labels
|
||||
func GetAllowIngressByNamespaceOrPod(name string, targetLabels map[string]string, peerNamespaceSelector *metav1.LabelSelector, peerPodSelector *metav1.LabelSelector) *networkingv1.NetworkPolicy {
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: targetLabels,
|
||||
},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{{
|
||||
From: []networkingv1.NetworkPolicyPeer{
|
||||
{
|
||||
NamespaceSelector: peerNamespaceSelector,
|
||||
},
|
||||
{
|
||||
PodSelector: peerPodSelector,
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
return policy
|
||||
}
|
||||
|
||||
// GetAllowIngressByNamespaceAndPod allows ingress for pods with matching namespace AND pod labels
|
||||
func GetAllowIngressByNamespaceAndPod(name string, targetLabels map[string]string, peerNamespaceSelector *metav1.LabelSelector, peerPodSelector *metav1.LabelSelector) *networkingv1.NetworkPolicy {
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: targetLabels,
|
||||
},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{{
|
||||
From: []networkingv1.NetworkPolicyPeer{{
|
||||
NamespaceSelector: peerNamespaceSelector,
|
||||
PodSelector: peerPodSelector,
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
return policy
|
||||
}
|
||||
|
||||
// GetAllowEgressByNamespaceAndPod allows egress for pods with matching namespace AND pod labels
|
||||
func GetAllowEgressByNamespaceAndPod(name string, targetLabels map[string]string, peerNamespaceSelector *metav1.LabelSelector, peerPodSelector *metav1.LabelSelector) *networkingv1.NetworkPolicy {
|
||||
protocolUDP := v1.ProtocolUDP
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: targetLabels,
|
||||
},
|
||||
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
|
||||
Egress: []networkingv1.NetworkPolicyEgressRule{
|
||||
{
|
||||
To: []networkingv1.NetworkPolicyPeer{
|
||||
{
|
||||
NamespaceSelector: peerNamespaceSelector,
|
||||
PodSelector: peerPodSelector,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Ports: []networkingv1.NetworkPolicyPort{
|
||||
{
|
||||
Protocol: &protocolUDP,
|
||||
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return policy
|
||||
}
|
||||
|
||||
// GetAllowEgress allows all egress
|
||||
func GetAllowEgress() *networkingv1.NetworkPolicy {
|
||||
return &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-egress",
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{},
|
||||
},
|
||||
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
|
||||
Egress: []networkingv1.NetworkPolicyEgressRule{{}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllowEgressForTarget allows all egress for a target
|
||||
func GetAllowEgressForTarget(targetSelector metav1.LabelSelector) *networkingv1.NetworkPolicy {
|
||||
return &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-egress-for-target",
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: targetSelector,
|
||||
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
|
||||
Egress: []networkingv1.NetworkPolicyEgressRule{{}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllowEgressByCIDR creates an egress netpol with an ipblock
|
||||
func GetAllowEgressByCIDR(podname string, podserverCIDR string) *networkingv1.NetworkPolicy {
|
||||
protocolUDP := v1.ProtocolUDP
|
||||
return &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-client-a-via-cidr-egress-rule",
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod": podname,
|
||||
},
|
||||
},
|
||||
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
|
||||
// Allow traffic to only one CIDR block.
|
||||
Egress: []networkingv1.NetworkPolicyEgressRule{
|
||||
{
|
||||
To: []networkingv1.NetworkPolicyPeer{
|
||||
{
|
||||
IPBlock: &networkingv1.IPBlock{
|
||||
CIDR: podserverCIDR,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Ports: []networkingv1.NetworkPolicyPort{
|
||||
{
|
||||
Protocol: &protocolUDP,
|
||||
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllowEgressByCIDRExcept creates an egress netpol with an ipblock and except
|
||||
func GetAllowEgressByCIDRExcept(podname string, podserverCIDR string, except []string) *networkingv1.NetworkPolicy {
|
||||
protocolUDP := v1.ProtocolUDP
|
||||
return &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "allow-client-a-via-cidr-egress-rule",
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"pod": podname,
|
||||
},
|
||||
},
|
||||
PolicyTypes: []networkingv1.PolicyType{networkingv1.PolicyTypeEgress},
|
||||
// Allow traffic to only one CIDR block.
|
||||
Egress: []networkingv1.NetworkPolicyEgressRule{
|
||||
{
|
||||
To: []networkingv1.NetworkPolicyPeer{
|
||||
{
|
||||
IPBlock: &networkingv1.IPBlock{
|
||||
CIDR: podserverCIDR,
|
||||
Except: except,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Ports: []networkingv1.NetworkPolicyPort{
|
||||
{
|
||||
Protocol: &protocolUDP,
|
||||
Port: &intstr.IntOrString{Type: intstr.Int, IntVal: 53},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetAllowIngressOnProtocolByPort is a base network policy template which distinguishes between the types of v1.Protocol available in v1 core
|
||||
func GetAllowIngressOnProtocolByPort(name string, protocol v1.Protocol, targetLabels map[string]string, portNum *intstr.IntOrString) *networkingv1.NetworkPolicy {
|
||||
policy := &networkingv1.NetworkPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1.NetworkPolicySpec{
|
||||
PodSelector: metav1.LabelSelector{
|
||||
MatchLabels: targetLabels,
|
||||
},
|
||||
Ingress: []networkingv1.NetworkPolicyIngressRule{{
|
||||
Ports: []networkingv1.NetworkPolicyPort{{
|
||||
Port: portNum,
|
||||
Protocol: &protocol,
|
||||
}},
|
||||
}},
|
||||
},
|
||||
}
|
||||
return policy
|
||||
}
|
117
test/e2e/network/netpol/probe.go
Normal file
117
test/e2e/network/netpol/probe.go
Normal file
@ -0,0 +1,117 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// ProbeJob packages the data for the input of a pod->pod connectivity probe
|
||||
type ProbeJob struct {
|
||||
PodFrom *Pod
|
||||
PodTo *Pod
|
||||
FromPort int
|
||||
ToPort int
|
||||
ToPodDNSDomain string
|
||||
Protocol v1.Protocol
|
||||
}
|
||||
|
||||
// ProbeJobResults packages the data for the results of a pod->pod connectivity probe
|
||||
type ProbeJobResults struct {
|
||||
Job *ProbeJob
|
||||
IsConnected bool
|
||||
Err error
|
||||
Command string
|
||||
}
|
||||
|
||||
// ProbePodToPodConnectivity runs a series of probes in kube, and records the results in `testCase.Reachability`
|
||||
func ProbePodToPodConnectivity(k8s *Scenario, model *Model, testCase *TestCase) {
|
||||
k8s.ClearCache()
|
||||
numberOfWorkers := 30
|
||||
allPods := model.AllPods()
|
||||
size := len(allPods) * len(allPods)
|
||||
jobs := make(chan *ProbeJob, size)
|
||||
results := make(chan *ProbeJobResults, size)
|
||||
for i := 0; i < numberOfWorkers; i++ {
|
||||
go probeWorker(k8s, jobs, results)
|
||||
}
|
||||
for _, podFrom := range allPods {
|
||||
for _, podTo := range allPods {
|
||||
jobs <- &ProbeJob{
|
||||
PodFrom: podFrom,
|
||||
PodTo: podTo,
|
||||
FromPort: testCase.FromPort,
|
||||
ToPort: testCase.ToPort,
|
||||
ToPodDNSDomain: model.DNSDomain,
|
||||
Protocol: testCase.Protocol,
|
||||
}
|
||||
}
|
||||
}
|
||||
close(jobs)
|
||||
|
||||
for i := 0; i < size; i++ {
|
||||
result := <-results
|
||||
job := result.Job
|
||||
if result.Err != nil {
|
||||
framework.Logf("unable to perform probe %s -> %s: %v", job.PodFrom.PodString(), job.PodTo.PodString(), result.Err)
|
||||
}
|
||||
testCase.Reachability.Observe(job.PodFrom.PodString(), job.PodTo.PodString(), result.IsConnected)
|
||||
expected := testCase.Reachability.Expected.Get(job.PodFrom.PodString().String(), job.PodTo.PodString().String())
|
||||
if result.IsConnected != expected {
|
||||
framework.Logf("Validation of %s -> %s FAILED !!!", job.PodFrom.PodString(), job.PodTo.PodString())
|
||||
framework.Logf("error %v ", result.Err)
|
||||
if expected {
|
||||
framework.Logf("Expected allowed pod connection was instead BLOCKED --- run '%v'", result.Command)
|
||||
} else {
|
||||
framework.Logf("Expected blocked pod connection was instead ALLOWED --- run '%v'", result.Command)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// probeWorker continues polling a pod connectivity status, until the incoming "jobs" channel is closed, and writes results back out to the "results" channel.
|
||||
// it only writes pass/fail status to a channel and has no failure side effects, this is by design since we do not want to fail inside a goroutine.
|
||||
func probeWorker(k8s *Scenario, jobs <-chan *ProbeJob, results chan<- *ProbeJobResults) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
for job := range jobs {
|
||||
podFrom := job.PodFrom
|
||||
containerFrom, err := podFrom.FindContainer(int32(job.FromPort), job.Protocol)
|
||||
// 1) sanity check that the pod container is found before we run the real test.
|
||||
if err != nil {
|
||||
result := &ProbeJobResults{
|
||||
Job: job,
|
||||
IsConnected: false,
|
||||
Err: err,
|
||||
Command: "(skipped, pod unavailable)",
|
||||
}
|
||||
results <- result
|
||||
} else {
|
||||
// 2) real test runs here...
|
||||
connected, command, err := k8s.Probe(podFrom.Namespace, podFrom.Name, containerFrom.Name(), job.PodTo.QualifiedServiceAddress(job.ToPodDNSDomain), job.Protocol, job.ToPort)
|
||||
result := &ProbeJobResults{
|
||||
Job: job,
|
||||
IsConnected: connected,
|
||||
Err: err,
|
||||
Command: command,
|
||||
}
|
||||
results <- result
|
||||
}
|
||||
}
|
||||
|
||||
}
|
188
test/e2e/network/netpol/reachability.go
Normal file
188
test/e2e/network/netpol/reachability.go
Normal file
@ -0,0 +1,188 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TestCase describes the data for a netpol test
|
||||
type TestCase struct {
|
||||
FromPort int
|
||||
ToPort int
|
||||
Protocol v1.Protocol
|
||||
Reachability *Reachability
|
||||
}
|
||||
|
||||
// PodString represents a namespace 'x' + pod 'a' as "x/a".
|
||||
type PodString string
|
||||
|
||||
// NewPodString instantiates a PodString from the given namespace and name.
|
||||
func NewPodString(namespace string, podName string) PodString {
|
||||
return PodString(fmt.Sprintf("%s/%s", namespace, podName))
|
||||
}
|
||||
|
||||
// String converts back to a string
|
||||
func (pod PodString) String() string {
|
||||
return string(pod)
|
||||
}
|
||||
|
||||
func (pod PodString) split() (string, string) {
|
||||
pieces := strings.Split(string(pod), "/")
|
||||
if len(pieces) != 2 {
|
||||
framework.Failf("expected ns/pod, found %+v", pieces)
|
||||
}
|
||||
return pieces[0], pieces[1]
|
||||
}
|
||||
|
||||
// Namespace extracts the namespace
|
||||
func (pod PodString) Namespace() string {
|
||||
ns, _ := pod.split()
|
||||
return ns
|
||||
}
|
||||
|
||||
// PodName extracts the pod name
|
||||
func (pod PodString) PodName() string {
|
||||
_, podName := pod.split()
|
||||
return podName
|
||||
}
|
||||
|
||||
// Peer is used for matching pods by either or both of the pod's namespace and name.
|
||||
type Peer struct {
|
||||
Namespace string
|
||||
Pod string
|
||||
}
|
||||
|
||||
// Matches checks whether the Peer matches the PodString:
|
||||
// - an empty namespace means the namespace will always match
|
||||
// - otherwise, the namespace must match the PodString's namespace
|
||||
// - same goes for Pod: empty matches everything, otherwise must match exactly
|
||||
func (p *Peer) Matches(pod PodString) bool {
|
||||
return (p.Namespace == "" || p.Namespace == pod.Namespace()) && (p.Pod == "" || p.Pod == pod.PodName())
|
||||
}
|
||||
|
||||
// Reachability packages the data for a cluster-wide connectivity probe
|
||||
type Reachability struct {
|
||||
Expected *TruthTable
|
||||
Observed *TruthTable
|
||||
Pods []*Pod
|
||||
}
|
||||
|
||||
// NewReachability instantiates a reachability
|
||||
func NewReachability(pods []*Pod, defaultExpectation bool) *Reachability {
|
||||
var podNames []string
|
||||
for _, pod := range pods {
|
||||
podNames = append(podNames, pod.PodString().String())
|
||||
}
|
||||
r := &Reachability{
|
||||
Expected: NewTruthTableFromItems(podNames, &defaultExpectation),
|
||||
Observed: NewTruthTableFromItems(podNames, nil),
|
||||
Pods: pods,
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// AllowLoopback expects all communication from a pod to itself to be allowed.
|
||||
// In general, call it after setting up any other rules since loopback logic follows no policy.
|
||||
func (r *Reachability) AllowLoopback() {
|
||||
for _, pod := range r.Pods {
|
||||
podName := pod.PodString().String()
|
||||
r.Expected.Set(podName, podName, true)
|
||||
}
|
||||
}
|
||||
|
||||
// Expect sets the expected value for a single observation
|
||||
func (r *Reachability) Expect(from PodString, to PodString, isConnected bool) {
|
||||
r.Expected.Set(string(from), string(to), isConnected)
|
||||
}
|
||||
|
||||
// ExpectAllIngress defines that any traffic going into the pod will be allowed/denied (true/false)
|
||||
func (r *Reachability) ExpectAllIngress(pod PodString, connected bool) {
|
||||
r.Expected.SetAllTo(string(pod), connected)
|
||||
if !connected {
|
||||
framework.Logf("Denying all traffic *to* %s", pod)
|
||||
}
|
||||
}
|
||||
|
||||
// ExpectAllEgress defines that any traffic going out of the pod will be allowed/denied (true/false)
|
||||
func (r *Reachability) ExpectAllEgress(pod PodString, connected bool) {
|
||||
r.Expected.SetAllFrom(string(pod), connected)
|
||||
if !connected {
|
||||
framework.Logf("Denying all traffic *from* %s", pod)
|
||||
}
|
||||
}
|
||||
|
||||
// ExpectPeer sets expected values using Peer matchers
|
||||
func (r *Reachability) ExpectPeer(from *Peer, to *Peer, connected bool) {
|
||||
for _, fromPod := range r.Pods {
|
||||
if from.Matches(fromPod.PodString()) {
|
||||
for _, toPod := range r.Pods {
|
||||
if to.Matches(toPod.PodString()) {
|
||||
r.Expected.Set(string(fromPod.PodString()), string(toPod.PodString()), connected)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Observe records a single connectivity observation
|
||||
func (r *Reachability) Observe(fromPod PodString, toPod PodString, isConnected bool) {
|
||||
r.Observed.Set(string(fromPod), string(toPod), isConnected)
|
||||
}
|
||||
|
||||
// Summary produces a useful summary of expected and observed data
|
||||
func (r *Reachability) Summary(ignoreLoopback bool) (trueObs int, falseObs int, ignoredObs int, comparison *TruthTable) {
|
||||
comparison = r.Expected.Compare(r.Observed)
|
||||
if !comparison.IsComplete() {
|
||||
framework.Failf("observations not complete!")
|
||||
}
|
||||
falseObs, trueObs, ignoredObs = 0, 0, 0
|
||||
for from, dict := range comparison.Values {
|
||||
for to, val := range dict {
|
||||
if ignoreLoopback && from == to {
|
||||
// Never fail on loopback, because its not yet defined.
|
||||
ignoredObs++
|
||||
} else if val {
|
||||
trueObs++
|
||||
} else {
|
||||
falseObs++
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// PrintSummary prints the summary
|
||||
func (r *Reachability) PrintSummary(printExpected bool, printObserved bool, printComparison bool) {
|
||||
right, wrong, ignored, comparison := r.Summary(ignoreLoopback)
|
||||
if ignored > 0 {
|
||||
framework.Logf("warning: the results of %d pod->pod cases have been ignored", ignored)
|
||||
}
|
||||
framework.Logf("reachability: correct:%v, incorrect:%v, result=%t\n\n", right, wrong, wrong == 0)
|
||||
if printExpected {
|
||||
framework.Logf("expected:\n\n%s\n\n\n", r.Expected.PrettyPrint(""))
|
||||
}
|
||||
if printObserved {
|
||||
framework.Logf("observed:\n\n%s\n\n\n", r.Observed.PrettyPrint(""))
|
||||
}
|
||||
if printComparison {
|
||||
framework.Logf("comparison:\n\n%s\n\n\n", comparison.PrettyPrint(""))
|
||||
}
|
||||
}
|
164
test/e2e/network/netpol/test_helper.go
Normal file
164
test/e2e/network/netpol/test_helper.go
Normal file
@ -0,0 +1,164 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
const (
|
||||
waitInterval = 1 * time.Second
|
||||
waitTimeout = 30 * time.Second
|
||||
)
|
||||
|
||||
// prettyPrint a networkPolicy
|
||||
func prettyPrint(policy *networkingv1.NetworkPolicy) string {
|
||||
raw, err := yaml.Marshal(policy)
|
||||
framework.ExpectNoError(err, "marshal network policy to yaml")
|
||||
return string(raw)
|
||||
}
|
||||
|
||||
// CreatePolicy creates a policy in the given namespace
|
||||
func CreatePolicy(k8s *Scenario, policy *networkingv1.NetworkPolicy, namespace string) {
|
||||
if isVerbose {
|
||||
framework.Logf("****************************************************************")
|
||||
framework.Logf("Network Policy creating %s/%s \n%s", namespace, policy.Name, prettyPrint(policy))
|
||||
framework.Logf("****************************************************************")
|
||||
}
|
||||
|
||||
_, err := k8s.CreateNetworkPolicy(namespace, policy)
|
||||
framework.ExpectNoError(err, "Unable to create netpol %s/%s", namespace, policy.Name)
|
||||
}
|
||||
|
||||
// UpdatePolicy updates a networkpolicy
|
||||
func UpdatePolicy(k8s *Scenario, policy *networkingv1.NetworkPolicy, namespace string) {
|
||||
if isVerbose {
|
||||
framework.Logf("****************************************************************")
|
||||
framework.Logf("Network Policy updating %s/%s \n%s", namespace, policy.Name, prettyPrint(policy))
|
||||
framework.Logf("****************************************************************")
|
||||
}
|
||||
|
||||
_, err := k8s.UpdateNetworkPolicy(namespace, policy)
|
||||
framework.ExpectNoError(err, "Unable to update netpol %s/%s", namespace, policy.Name)
|
||||
}
|
||||
|
||||
// ValidateOrFail validates connectivity
|
||||
func ValidateOrFail(k8s *Scenario, model *Model, testCase *TestCase) {
|
||||
ginkgo.By("Validating reachability matrix...")
|
||||
|
||||
// 1st try
|
||||
ginkgo.By("Validating reachability matrix... (FIRST TRY)")
|
||||
ProbePodToPodConnectivity(k8s, model, testCase)
|
||||
// 2nd try, in case first one failed
|
||||
if _, wrong, _, _ := testCase.Reachability.Summary(ignoreLoopback); wrong != 0 {
|
||||
framework.Logf("failed first probe %d wrong results ... retrying (SECOND TRY)", wrong)
|
||||
ProbePodToPodConnectivity(k8s, model, testCase)
|
||||
}
|
||||
|
||||
// at this point we know if we passed or failed, print final matrix and pass/fail the test.
|
||||
if _, wrong, _, _ := testCase.Reachability.Summary(ignoreLoopback); wrong != 0 {
|
||||
testCase.Reachability.PrintSummary(true, true, true)
|
||||
framework.Failf("Had %d wrong results in reachability matrix", wrong)
|
||||
}
|
||||
if isVerbose {
|
||||
testCase.Reachability.PrintSummary(true, true, true)
|
||||
}
|
||||
framework.Logf("VALIDATION SUCCESSFUL")
|
||||
}
|
||||
|
||||
// UpdateNamespaceLabels sets the labels for a namespace
|
||||
func UpdateNamespaceLabels(k8s *Scenario, ns string, newNsLabel map[string]string) {
|
||||
err := k8s.setNamespaceLabels(ns, newNsLabel)
|
||||
framework.ExpectNoError(err, "Update namespace %s labels", ns)
|
||||
err = wait.PollImmediate(waitInterval, waitTimeout, func() (done bool, err error) {
|
||||
namespace, err := k8s.getNamespace(ns)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for key, expected := range newNsLabel {
|
||||
if actual, ok := namespace.Labels[key]; !ok || (expected != actual) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Unable to wait for ns %s to update labels", ns)
|
||||
}
|
||||
|
||||
// AddPodLabels adds new labels to a deployment's template
|
||||
func AddPodLabels(k8s *Scenario, pod *Pod, newPodLabels map[string]string) {
|
||||
kubePod, err := k8s.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Unable to get pod %s/%s", pod.Namespace, pod.Name)
|
||||
if kubePod.Labels == nil {
|
||||
kubePod.Labels = map[string]string{}
|
||||
}
|
||||
for key, val := range newPodLabels {
|
||||
kubePod.Labels[key] = val
|
||||
}
|
||||
_, err = k8s.ClientSet.CoreV1().Pods(pod.Namespace).Update(context.TODO(), kubePod, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Unable to add pod %s/%s labels", pod.Namespace, pod.Name)
|
||||
|
||||
err = wait.PollImmediate(waitInterval, waitTimeout, func() (done bool, err error) {
|
||||
waitForPod, err := k8s.GetPod(pod.Namespace, pod.Name)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for key, expected := range newPodLabels {
|
||||
if actual, ok := waitForPod.Labels[key]; !ok || (expected != actual) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Unable to wait for pod %s/%s to update labels", pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
// ResetNamespaceLabels resets the labels for a namespace
|
||||
func ResetNamespaceLabels(k8s *Scenario, ns string) {
|
||||
UpdateNamespaceLabels(k8s, ns, (&Namespace{Name: ns}).LabelSelector())
|
||||
}
|
||||
|
||||
// ResetPodLabels resets the labels for a deployment's template
|
||||
func ResetPodLabels(k8s *Scenario, pod *Pod) {
|
||||
kubePod, err := k8s.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Unable to get pod %s/%s", pod.Namespace, pod.Name)
|
||||
kubePod.Labels = pod.LabelSelector()
|
||||
_, err = k8s.ClientSet.CoreV1().Pods(pod.Namespace).Update(context.TODO(), kubePod, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Unable to add pod %s/%s labels", pod.Namespace, pod.Name)
|
||||
|
||||
err = wait.PollImmediate(waitInterval, waitTimeout, func() (done bool, err error) {
|
||||
waitForPod, err := k8s.GetPod(pod.Namespace, pod.Name)
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
for key, expected := range pod.LabelSelector() {
|
||||
if actual, ok := waitForPod.Labels[key]; !ok || (expected != actual) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Unable to wait for pod %s/%s to update labels", pod.Namespace, pod.Name)
|
||||
}
|
171
test/e2e/network/netpol/truthtable.go
Normal file
171
test/e2e/network/netpol/truthtable.go
Normal file
@ -0,0 +1,171 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package netpol
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// TruthTable takes in n items and maintains an n x n table of booleans for each ordered pair
|
||||
type TruthTable struct {
|
||||
Froms []string
|
||||
Tos []string
|
||||
toSet map[string]bool
|
||||
Values map[string]map[string]bool
|
||||
}
|
||||
|
||||
// NewTruthTableFromItems creates a new truth table with items
|
||||
func NewTruthTableFromItems(items []string, defaultValue *bool) *TruthTable {
|
||||
return NewTruthTable(items, items, defaultValue)
|
||||
}
|
||||
|
||||
// NewTruthTable creates a new truth table with froms and tos
|
||||
func NewTruthTable(froms []string, tos []string, defaultValue *bool) *TruthTable {
|
||||
values := map[string]map[string]bool{}
|
||||
for _, from := range froms {
|
||||
values[from] = map[string]bool{}
|
||||
for _, to := range tos {
|
||||
if defaultValue != nil {
|
||||
values[from][to] = *defaultValue
|
||||
}
|
||||
}
|
||||
}
|
||||
toSet := map[string]bool{}
|
||||
for _, to := range tos {
|
||||
toSet[to] = true
|
||||
}
|
||||
return &TruthTable{
|
||||
Froms: froms,
|
||||
Tos: tos,
|
||||
toSet: toSet,
|
||||
Values: values,
|
||||
}
|
||||
}
|
||||
|
||||
// IsComplete returns true if there's a value set for every single pair of items, otherwise it returns false.
|
||||
func (tt *TruthTable) IsComplete() bool {
|
||||
for _, from := range tt.Froms {
|
||||
for _, to := range tt.Tos {
|
||||
if _, ok := tt.Values[from][to]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Set sets the value for from->to
|
||||
func (tt *TruthTable) Set(from string, to string, value bool) {
|
||||
dict, ok := tt.Values[from]
|
||||
if !ok {
|
||||
framework.Failf("from-key %s not found", from)
|
||||
}
|
||||
if _, ok := tt.toSet[to]; !ok {
|
||||
framework.Failf("to-key %s not allowed", to)
|
||||
}
|
||||
dict[to] = value
|
||||
}
|
||||
|
||||
// SetAllFrom sets all values where from = 'from'
|
||||
func (tt *TruthTable) SetAllFrom(from string, value bool) {
|
||||
dict, ok := tt.Values[from]
|
||||
if !ok {
|
||||
framework.Failf("from-key %s not found", from)
|
||||
}
|
||||
for _, to := range tt.Tos {
|
||||
dict[to] = value
|
||||
}
|
||||
}
|
||||
|
||||
// SetAllTo sets all values where to = 'to'
|
||||
func (tt *TruthTable) SetAllTo(to string, value bool) {
|
||||
if _, ok := tt.toSet[to]; !ok {
|
||||
framework.Failf("to-key %s not found", to)
|
||||
}
|
||||
for _, from := range tt.Froms {
|
||||
tt.Values[from][to] = value
|
||||
}
|
||||
}
|
||||
|
||||
// Get gets the specified value
|
||||
func (tt *TruthTable) Get(from string, to string) bool {
|
||||
dict, ok := tt.Values[from]
|
||||
if !ok {
|
||||
framework.Failf("from-key %s not found", from)
|
||||
}
|
||||
val, ok := dict[to]
|
||||
if !ok {
|
||||
framework.Failf("to-key %s not found in map (%+v)", to, dict)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// Compare is used to check two truth tables for equality, returning its
|
||||
// result in the form of a third truth table. Both tables are expected to
|
||||
// have identical items.
|
||||
func (tt *TruthTable) Compare(other *TruthTable) *TruthTable {
|
||||
if len(tt.Froms) != len(other.Froms) || len(tt.Tos) != len(other.Tos) {
|
||||
framework.Failf("cannot compare tables of different dimensions")
|
||||
}
|
||||
for i, fr := range tt.Froms {
|
||||
if other.Froms[i] != fr {
|
||||
framework.Failf("cannot compare: from keys at index %d do not match (%s vs %s)", i, other.Froms[i], fr)
|
||||
}
|
||||
}
|
||||
for i, to := range tt.Tos {
|
||||
if other.Tos[i] != to {
|
||||
framework.Failf("cannot compare: to keys at index %d do not match (%s vs %s)", i, other.Tos[i], to)
|
||||
}
|
||||
}
|
||||
|
||||
values := map[string]map[string]bool{}
|
||||
for from, dict := range tt.Values {
|
||||
values[from] = map[string]bool{}
|
||||
for to, val := range dict {
|
||||
values[from][to] = val == other.Values[from][to]
|
||||
}
|
||||
}
|
||||
return &TruthTable{
|
||||
Froms: tt.Froms,
|
||||
Tos: tt.Tos,
|
||||
toSet: tt.toSet,
|
||||
Values: values,
|
||||
}
|
||||
}
|
||||
|
||||
// PrettyPrint produces a nice visual representation.
|
||||
func (tt *TruthTable) PrettyPrint(indent string) string {
|
||||
header := indent + strings.Join(append([]string{"-\t"}, tt.Tos...), "\t")
|
||||
lines := []string{header}
|
||||
for _, from := range tt.Froms {
|
||||
line := []string{from}
|
||||
for _, to := range tt.Tos {
|
||||
mark := "X"
|
||||
val, ok := tt.Values[from][to]
|
||||
if !ok {
|
||||
mark = "?"
|
||||
} else if val {
|
||||
mark = "."
|
||||
}
|
||||
line = append(line, mark+"\t")
|
||||
}
|
||||
lines = append(lines, indent+strings.Join(line, "\t"))
|
||||
}
|
||||
return strings.Join(lines, "\n")
|
||||
}
|
@ -24,6 +24,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@ -36,10 +37,11 @@ import (
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
|
||||
// synthetic import of netpol suite, until these tests are replaced entirely, so that its included properly
|
||||
_ "k8s.io/kubernetes/test/e2e/network/netpol"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
utilnet "k8s.io/utils/net"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user