mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 23:47:50 +00:00
Vendor changes
Vendoring (NEW) in github.com/Microsoft/hcsshim
This commit is contained in:
@@ -1,3 +1,5 @@
|
||||
// +build windows
|
||||
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
@@ -17,9 +19,7 @@ limitations under the License.
|
||||
package winkernel
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -36,150 +36,14 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
"k8s.io/kubernetes/pkg/util/async"
|
||||
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
||||
iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
|
||||
"k8s.io/utils/exec"
|
||||
fakeexec "k8s.io/utils/exec/testing"
|
||||
)
|
||||
|
||||
func checkAllLines(t *testing.T, table utiliptables.Table, save []byte, expectedLines map[utiliptables.Chain]string) {
|
||||
chainLines := utiliptables.GetChainLines(table, save)
|
||||
for chain, line := range chainLines {
|
||||
if expected, exists := expectedLines[chain]; exists {
|
||||
if expected != line {
|
||||
t.Errorf("getChainLines expected chain line not present. For chain: %s Expected: %s Got: %s", chain, expected, line)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("getChainLines expected chain not present: %s", chain)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadLinesFromByteBuffer(t *testing.T) {
|
||||
testFn := func(byteArray []byte, expected []string) {
|
||||
index := 0
|
||||
readIndex := 0
|
||||
for ; readIndex < len(byteArray); index++ {
|
||||
line, n := utiliptables.ReadLine(readIndex, byteArray)
|
||||
readIndex = n
|
||||
if expected[index] != line {
|
||||
t.Errorf("expected:%q, actual:%q", expected[index], line)
|
||||
}
|
||||
} // for
|
||||
if readIndex < len(byteArray) {
|
||||
t.Errorf("Byte buffer was only partially read. Buffer length is:%d, readIndex is:%d", len(byteArray), readIndex)
|
||||
}
|
||||
if index < len(expected) {
|
||||
t.Errorf("All expected strings were not compared. expected arr length:%d, matched count:%d", len(expected), index-1)
|
||||
}
|
||||
}
|
||||
|
||||
byteArray1 := []byte("\n Line 1 \n\n\n L ine4 \nLine 5 \n \n")
|
||||
expected1 := []string{"", "Line 1", "", "", "L ine4", "Line 5", ""}
|
||||
testFn(byteArray1, expected1)
|
||||
|
||||
byteArray1 = []byte("")
|
||||
expected1 = []string{}
|
||||
testFn(byteArray1, expected1)
|
||||
|
||||
byteArray1 = []byte("\n\n")
|
||||
expected1 = []string{"", ""}
|
||||
testFn(byteArray1, expected1)
|
||||
}
|
||||
|
||||
func TestGetChainLines(t *testing.T) {
|
||||
iptables_save := `# Generated by iptables-save v1.4.7 on Wed Oct 29 14:56:01 2014
|
||||
*nat
|
||||
:PREROUTING ACCEPT [2136997:197881818]
|
||||
:POSTROUTING ACCEPT [4284525:258542680]
|
||||
:OUTPUT ACCEPT [5901660:357267963]
|
||||
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
|
||||
COMMIT
|
||||
# Completed on Wed Oct 29 14:56:01 2014`
|
||||
expected := map[utiliptables.Chain]string{
|
||||
utiliptables.ChainPrerouting: ":PREROUTING ACCEPT [2136997:197881818]",
|
||||
utiliptables.ChainPostrouting: ":POSTROUTING ACCEPT [4284525:258542680]",
|
||||
utiliptables.ChainOutput: ":OUTPUT ACCEPT [5901660:357267963]",
|
||||
}
|
||||
checkAllLines(t, utiliptables.TableNAT, []byte(iptables_save), expected)
|
||||
}
|
||||
|
||||
func TestGetChainLinesMultipleTables(t *testing.T) {
|
||||
iptables_save := `# Generated by iptables-save v1.4.21 on Fri Aug 7 14:47:37 2015
|
||||
*nat
|
||||
:PREROUTING ACCEPT [2:138]
|
||||
:INPUT ACCEPT [0:0]
|
||||
:OUTPUT ACCEPT [0:0]
|
||||
:POSTROUTING ACCEPT [0:0]
|
||||
:DOCKER - [0:0]
|
||||
:KUBE-NODEPORT-CONTAINER - [0:0]
|
||||
:KUBE-NODEPORT-HOST - [0:0]
|
||||
:KUBE-PORTALS-CONTAINER - [0:0]
|
||||
:KUBE-PORTALS-HOST - [0:0]
|
||||
:KUBE-SVC-1111111111111111 - [0:0]
|
||||
:KUBE-SVC-2222222222222222 - [0:0]
|
||||
:KUBE-SVC-3333333333333333 - [0:0]
|
||||
:KUBE-SVC-4444444444444444 - [0:0]
|
||||
:KUBE-SVC-5555555555555555 - [0:0]
|
||||
:KUBE-SVC-6666666666666666 - [0:0]
|
||||
-A PREROUTING -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-CONTAINER
|
||||
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
|
||||
-A PREROUTING -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-CONTAINER
|
||||
-A OUTPUT -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-HOST
|
||||
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
|
||||
-A OUTPUT -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-HOST
|
||||
-A POSTROUTING -s 10.246.1.0/24 ! -o cbr0 -j MASQUERADE
|
||||
-A POSTROUTING -s 10.0.2.15/32 -d 10.0.2.15/32 -m comment --comment "handle pod connecting to self" -j MASQUERADE
|
||||
-A KUBE-PORTALS-CONTAINER -d 10.247.0.1/32 -p tcp -m comment --comment "portal for default/kubernetes:" -m state --state NEW -m tcp --dport 443 -j KUBE-SVC-5555555555555555
|
||||
-A KUBE-PORTALS-CONTAINER -d 10.247.0.10/32 -p udp -m comment --comment "portal for kube-system/kube-dns:dns" -m state --state NEW -m udp --dport 53 -j KUBE-SVC-6666666666666666
|
||||
-A KUBE-PORTALS-CONTAINER -d 10.247.0.10/32 -p tcp -m comment --comment "portal for kube-system/kube-dns:dns-tcp" -m state --state NEW -m tcp --dport 53 -j KUBE-SVC-2222222222222222
|
||||
-A KUBE-PORTALS-HOST -d 10.247.0.1/32 -p tcp -m comment --comment "portal for default/kubernetes:" -m state --state NEW -m tcp --dport 443 -j KUBE-SVC-5555555555555555
|
||||
-A KUBE-PORTALS-HOST -d 10.247.0.10/32 -p udp -m comment --comment "portal for kube-system/kube-dns:dns" -m state --state NEW -m udp --dport 53 -j KUBE-SVC-6666666666666666
|
||||
-A KUBE-PORTALS-HOST -d 10.247.0.10/32 -p tcp -m comment --comment "portal for kube-system/kube-dns:dns-tcp" -m state --state NEW -m tcp --dport 53 -j KUBE-SVC-2222222222222222
|
||||
-A KUBE-SVC-1111111111111111 -p udp -m comment --comment "kube-system/kube-dns:dns" -m recent --set --name KUBE-SVC-1111111111111111 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.246.1.2:53
|
||||
-A KUBE-SVC-2222222222222222 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-SVC-3333333333333333
|
||||
-A KUBE-SVC-3333333333333333 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m recent --set --name KUBE-SVC-3333333333333333 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.246.1.2:53
|
||||
-A KUBE-SVC-4444444444444444 -p tcp -m comment --comment "default/kubernetes:" -m recent --set --name KUBE-SVC-4444444444444444 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.245.1.2:443
|
||||
-A KUBE-SVC-5555555555555555 -m comment --comment "default/kubernetes:" -j KUBE-SVC-4444444444444444
|
||||
-A KUBE-SVC-6666666666666666 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-SVC-1111111111111111
|
||||
COMMIT
|
||||
# Completed on Fri Aug 7 14:47:37 2015
|
||||
# Generated by iptables-save v1.4.21 on Fri Aug 7 14:47:37 2015
|
||||
*filter
|
||||
:INPUT ACCEPT [17514:83115836]
|
||||
:FORWARD ACCEPT [0:0]
|
||||
:OUTPUT ACCEPT [8909:688225]
|
||||
:DOCKER - [0:0]
|
||||
-A FORWARD -o cbr0 -j DOCKER
|
||||
-A FORWARD -o cbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
||||
-A FORWARD -i cbr0 ! -o cbr0 -j ACCEPT
|
||||
-A FORWARD -i cbr0 -o cbr0 -j ACCEPT
|
||||
COMMIT
|
||||
`
|
||||
expected := map[utiliptables.Chain]string{
|
||||
utiliptables.ChainPrerouting: ":PREROUTING ACCEPT [2:138]",
|
||||
utiliptables.Chain("INPUT"): ":INPUT ACCEPT [0:0]",
|
||||
utiliptables.Chain("OUTPUT"): ":OUTPUT ACCEPT [0:0]",
|
||||
utiliptables.ChainPostrouting: ":POSTROUTING ACCEPT [0:0]",
|
||||
utiliptables.Chain("DOCKER"): ":DOCKER - [0:0]",
|
||||
utiliptables.Chain("KUBE-NODEPORT-CONTAINER"): ":KUBE-NODEPORT-CONTAINER - [0:0]",
|
||||
utiliptables.Chain("KUBE-NODEPORT-HOST"): ":KUBE-NODEPORT-HOST - [0:0]",
|
||||
utiliptables.Chain("KUBE-PORTALS-CONTAINER"): ":KUBE-PORTALS-CONTAINER - [0:0]",
|
||||
utiliptables.Chain("KUBE-PORTALS-HOST"): ":KUBE-PORTALS-HOST - [0:0]",
|
||||
utiliptables.Chain("KUBE-SVC-1111111111111111"): ":KUBE-SVC-1111111111111111 - [0:0]",
|
||||
utiliptables.Chain("KUBE-SVC-2222222222222222"): ":KUBE-SVC-2222222222222222 - [0:0]",
|
||||
utiliptables.Chain("KUBE-SVC-3333333333333333"): ":KUBE-SVC-3333333333333333 - [0:0]",
|
||||
utiliptables.Chain("KUBE-SVC-4444444444444444"): ":KUBE-SVC-4444444444444444 - [0:0]",
|
||||
utiliptables.Chain("KUBE-SVC-5555555555555555"): ":KUBE-SVC-5555555555555555 - [0:0]",
|
||||
utiliptables.Chain("KUBE-SVC-6666666666666666"): ":KUBE-SVC-6666666666666666 - [0:0]",
|
||||
}
|
||||
checkAllLines(t, utiliptables.TableNAT, []byte(iptables_save), expected)
|
||||
}
|
||||
|
||||
func newFakeServiceInfo(service proxy.ServicePortName, ip net.IP, port int, protocol api.Protocol, onlyNodeLocalEndpoints bool) *serviceInfo {
|
||||
return &serviceInfo{
|
||||
sessionAffinityType: api.ServiceAffinityNone, // default
|
||||
stickyMaxAgeMinutes: 180, // TODO: paramaterize this in the API.
|
||||
stickyMaxAgeMinutes: 180,
|
||||
clusterIP: ip,
|
||||
port: port,
|
||||
protocol: protocol,
|
||||
@@ -381,187 +245,34 @@ func (fake *fakeHealthChecker) SyncEndpoints(newEndpoints map[types.NamespacedNa
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFakeHnsNetwork() *hnsNetworkInfo {
|
||||
return &hnsNetworkInfo{
|
||||
id: "00000000-0000-0000-0000-000000000001",
|
||||
name: "fakeNetwork",
|
||||
}, nil
|
||||
}
|
||||
|
||||
const testHostname = "test-hostname"
|
||||
|
||||
func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
|
||||
func NewFakeProxier() *Proxier {
|
||||
fakeHnsNetwork := getFakeHnsNetwork()
|
||||
// TODO: Call NewProxier after refactoring out the goroutine
|
||||
// invocation into a Run() method.
|
||||
p := &Proxier{
|
||||
exec: &fakeexec.FakeExec{},
|
||||
serviceMap: make(proxyServiceMap),
|
||||
serviceChanges: newServiceChangeMap(),
|
||||
endpointsMap: make(proxyEndpointsMap),
|
||||
endpointsChanges: newEndpointsChangeMap(testHostname),
|
||||
iptables: ipt,
|
||||
clusterCIDR: "10.0.0.0/24",
|
||||
hostname: testHostname,
|
||||
portsMap: make(map[localPort]closeable),
|
||||
portMapper: &fakePortOpener{[]*localPort{}},
|
||||
healthChecker: newFakeHealthChecker(),
|
||||
precomputedProbabilities: make([]string, 0, 1001),
|
||||
iptablesData: bytes.NewBuffer(nil),
|
||||
filterChains: bytes.NewBuffer(nil),
|
||||
filterRules: bytes.NewBuffer(nil),
|
||||
natChains: bytes.NewBuffer(nil),
|
||||
natRules: bytes.NewBuffer(nil),
|
||||
serviceMap: make(proxyServiceMap),
|
||||
serviceChanges: newServiceChangeMap(),
|
||||
endpointsMap: make(proxyEndpointsMap),
|
||||
endpointsChanges: newEndpointsChangeMap(testHostname),
|
||||
clusterCIDR: "10.0.0.0/24",
|
||||
hostname: testHostname,
|
||||
portsMap: make(map[localPort]closeable),
|
||||
healthChecker: newFakeHealthChecker(),
|
||||
network: fakeHnsNetwork,
|
||||
}
|
||||
p.syncRunner = async.NewBoundedFrequencyRunner("test-sync-runner", p.syncProxyRules, 0, time.Minute, 1)
|
||||
return p
|
||||
}
|
||||
|
||||
func hasJump(rules []iptablestest.Rule, destChain, destIP string, destPort int) bool {
|
||||
destPortStr := strconv.Itoa(destPort)
|
||||
match := false
|
||||
for _, r := range rules {
|
||||
if r[iptablestest.Jump] == destChain {
|
||||
match = true
|
||||
if destIP != "" {
|
||||
if strings.Contains(r[iptablestest.Destination], destIP) && (strings.Contains(r[iptablestest.DPort], destPortStr) || r[iptablestest.DPort] == "") {
|
||||
return true
|
||||
}
|
||||
match = false
|
||||
}
|
||||
if destPort != 0 {
|
||||
if strings.Contains(r[iptablestest.DPort], destPortStr) && (strings.Contains(r[iptablestest.Destination], destIP) || r[iptablestest.Destination] == "") {
|
||||
return true
|
||||
}
|
||||
match = false
|
||||
}
|
||||
}
|
||||
}
|
||||
return match
|
||||
}
|
||||
|
||||
func TestHasJump(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
rules []iptablestest.Rule
|
||||
destChain string
|
||||
destIP string
|
||||
destPort int
|
||||
expected bool
|
||||
}{
|
||||
"case 1": {
|
||||
// Match the 1st rule(both dest IP and dest Port)
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "10.20.30.41/32", "--dport ": "80", "-p ": "tcp", "-j ": "REJECT"},
|
||||
{"--dport ": "3001", "-p ": "tcp", "-j ": "KUBE-MARK-MASQ"},
|
||||
},
|
||||
destChain: "REJECT",
|
||||
destIP: "10.20.30.41",
|
||||
destPort: 80,
|
||||
expected: true,
|
||||
},
|
||||
"case 2": {
|
||||
// Match the 2nd rule(dest Port)
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
|
||||
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
|
||||
},
|
||||
destChain: "REJECT",
|
||||
destIP: "",
|
||||
destPort: 3001,
|
||||
expected: true,
|
||||
},
|
||||
"case 3": {
|
||||
// Match both dest IP and dest Port
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
|
||||
},
|
||||
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
|
||||
destIP: "1.2.3.4",
|
||||
destPort: 80,
|
||||
expected: true,
|
||||
},
|
||||
"case 4": {
|
||||
// Match dest IP but doesn't match dest Port
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
|
||||
},
|
||||
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
|
||||
destIP: "1.2.3.4",
|
||||
destPort: 8080,
|
||||
expected: false,
|
||||
},
|
||||
"case 5": {
|
||||
// Match dest Port but doesn't match dest IP
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
|
||||
},
|
||||
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
|
||||
destIP: "10.20.30.40",
|
||||
destPort: 80,
|
||||
expected: false,
|
||||
},
|
||||
"case 6": {
|
||||
// Match the 2nd rule(dest IP)
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
|
||||
{"-d ": "1.2.3.4/32", "-p ": "tcp", "-j ": "REJECT"},
|
||||
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
|
||||
},
|
||||
destChain: "REJECT",
|
||||
destIP: "1.2.3.4",
|
||||
destPort: 8080,
|
||||
expected: true,
|
||||
},
|
||||
"case 7": {
|
||||
// Match the 2nd rule(dest Port)
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
|
||||
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
|
||||
},
|
||||
destChain: "REJECT",
|
||||
destIP: "1.2.3.4",
|
||||
destPort: 3001,
|
||||
expected: true,
|
||||
},
|
||||
"case 8": {
|
||||
// Match the 1st rule(dest IP)
|
||||
rules: []iptablestest.Rule{
|
||||
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
|
||||
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
|
||||
},
|
||||
destChain: "REJECT",
|
||||
destIP: "10.20.30.41",
|
||||
destPort: 8080,
|
||||
expected: true,
|
||||
},
|
||||
"case 9": {
|
||||
rules: []iptablestest.Rule{
|
||||
{"-j ": "KUBE-SEP-LWSOSDSHMKPJHHJV"},
|
||||
},
|
||||
destChain: "KUBE-SEP-LWSOSDSHMKPJHHJV",
|
||||
destIP: "",
|
||||
destPort: 0,
|
||||
expected: true,
|
||||
},
|
||||
"case 10": {
|
||||
rules: []iptablestest.Rule{
|
||||
{"-j ": "KUBE-SEP-FOO"},
|
||||
},
|
||||
destChain: "KUBE-SEP-BAR",
|
||||
destIP: "",
|
||||
destPort: 0,
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for k, tc := range testCases {
|
||||
if got := hasJump(tc.rules, tc.destChain, tc.destIP, tc.destPort); got != tc.expected {
|
||||
t.Errorf("%v: expected %v, got %v", k, tc.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func hasDNAT(rules []iptablestest.Rule, endpoint string) bool {
|
||||
for _, r := range rules {
|
||||
if r[iptablestest.ToDest] == endpoint {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func errorf(msg string, rules []iptablestest.Rule, t *testing.T) {
|
||||
for _, r := range rules {
|
||||
t.Logf("%q", r)
|
||||
@@ -569,100 +280,8 @@ func errorf(msg string, rules []iptablestest.Rule, t *testing.T) {
|
||||
t.Errorf("%v", msg)
|
||||
}
|
||||
|
||||
func TestClusterIPReject(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcPortName := proxy.ServicePortName{
|
||||
NamespacedName: makeNSN("ns1", "svc1"),
|
||||
Port: "p80",
|
||||
}
|
||||
|
||||
makeServiceMap(fp,
|
||||
makeTestService(svcPortName.Namespace, svcPortName.Namespace, func(svc *api.Service) {
|
||||
svc.Spec.ClusterIP = svcIP
|
||||
svc.Spec.Ports = []api.ServicePort{{
|
||||
Name: svcPortName.Port,
|
||||
Port: int32(svcPort),
|
||||
Protocol: api.ProtocolTCP,
|
||||
}}
|
||||
}),
|
||||
)
|
||||
makeEndpointsMap(fp)
|
||||
fp.syncProxyRules()
|
||||
|
||||
svcChain := string(servicePortChainName(svcPortName.String(), strings.ToLower(string(api.ProtocolTCP))))
|
||||
svcRules := ipt.GetRules(svcChain)
|
||||
if len(svcRules) != 0 {
|
||||
errorf(fmt.Sprintf("Unexpected rule for chain %v service %v without endpoints", svcChain, svcPortName), svcRules, t)
|
||||
}
|
||||
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
||||
if !hasJump(kubeSvcRules, iptablestest.Reject, svcIP, svcPort) {
|
||||
errorf(fmt.Sprintf("Failed to find a %v rule for service %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterIPEndpointsJump(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcPortName := proxy.ServicePortName{
|
||||
NamespacedName: makeNSN("ns1", "svc1"),
|
||||
Port: "p80",
|
||||
}
|
||||
|
||||
makeServiceMap(fp,
|
||||
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *api.Service) {
|
||||
svc.Spec.ClusterIP = svcIP
|
||||
svc.Spec.Ports = []api.ServicePort{{
|
||||
Name: svcPortName.Port,
|
||||
Port: int32(svcPort),
|
||||
Protocol: api.ProtocolTCP,
|
||||
}}
|
||||
}),
|
||||
)
|
||||
|
||||
epIP := "10.180.0.1"
|
||||
makeEndpointsMap(fp,
|
||||
makeTestEndpoints(svcPortName.Namespace, svcPortName.Name, func(ept *api.Endpoints) {
|
||||
ept.Subsets = []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{
|
||||
IP: epIP,
|
||||
}},
|
||||
Ports: []api.EndpointPort{{
|
||||
Name: svcPortName.Port,
|
||||
Port: int32(svcPort),
|
||||
}},
|
||||
}}
|
||||
}),
|
||||
)
|
||||
|
||||
fp.syncProxyRules()
|
||||
|
||||
epStr := fmt.Sprintf("%s:%d", epIP, svcPort)
|
||||
svcChain := string(servicePortChainName(svcPortName.String(), strings.ToLower(string(api.ProtocolTCP))))
|
||||
epChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(api.ProtocolTCP)), epStr))
|
||||
|
||||
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
||||
if !hasJump(kubeSvcRules, svcChain, svcIP, svcPort) {
|
||||
errorf(fmt.Sprintf("Failed to find jump from KUBE-SERVICES to %v chain", svcChain), kubeSvcRules, t)
|
||||
}
|
||||
|
||||
svcRules := ipt.GetRules(svcChain)
|
||||
if !hasJump(svcRules, epChain, "", 0) {
|
||||
errorf(fmt.Sprintf("Failed to jump to ep chain %v", epChain), svcRules, t)
|
||||
}
|
||||
epRules := ipt.GetRules(epChain)
|
||||
if !hasDNAT(epRules, epStr) {
|
||||
errorf(fmt.Sprintf("Endpoint chain %v lacks DNAT to %v", epChain, epStr), epRules, t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBalancer(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
fp := NewFakeProxier()
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
@@ -710,20 +329,13 @@ func TestLoadBalancer(t *testing.T) {
|
||||
svcChain := string(servicePortChainName(svcPortName.String(), proto))
|
||||
//lbChain := string(serviceLBChainName(svcPortName.String(), proto))
|
||||
|
||||
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
||||
if !hasJump(kubeSvcRules, fwChain, svcLBIP, svcPort) {
|
||||
errorf(fmt.Sprintf("Failed to find jump to firewall chain %v", fwChain), kubeSvcRules, t)
|
||||
}
|
||||
// TODO
|
||||
|
||||
fwRules := ipt.GetRules(fwChain)
|
||||
if !hasJump(fwRules, svcChain, "", 0) || !hasJump(fwRules, string(KubeMarkMasqChain), "", 0) {
|
||||
errorf(fmt.Sprintf("Failed to find jump from firewall chain %v to svc chain %v", fwChain, svcChain), fwRules, t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodePort(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
@@ -765,15 +377,12 @@ func TestNodePort(t *testing.T) {
|
||||
proto := strings.ToLower(string(api.ProtocolTCP))
|
||||
svcChain := string(servicePortChainName(svcPortName.String(), proto))
|
||||
|
||||
kubeNodePortRules := ipt.GetRules(string(kubeNodePortsChain))
|
||||
if !hasJump(kubeNodePortRules, svcChain, "", svcNodePort) {
|
||||
errorf(fmt.Sprintf("Failed to find jump to svc chain %v", svcChain), kubeNodePortRules, t)
|
||||
}
|
||||
// TODO
|
||||
}
|
||||
|
||||
func TestExternalIPsReject(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcExternalIPs := "50.60.70.81"
|
||||
@@ -799,15 +408,11 @@ func TestExternalIPsReject(t *testing.T) {
|
||||
|
||||
fp.syncProxyRules()
|
||||
|
||||
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
||||
if !hasJump(kubeSvcRules, iptablestest.Reject, svcExternalIPs, svcPort) {
|
||||
errorf(fmt.Sprintf("Failed to a %v rule for externalIP %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodePortReject(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
@@ -832,10 +437,7 @@ func TestNodePortReject(t *testing.T) {
|
||||
|
||||
fp.syncProxyRules()
|
||||
|
||||
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
||||
if !hasJump(kubeSvcRules, iptablestest.Reject, svcIP, svcNodePort) {
|
||||
errorf(fmt.Sprintf("Failed to find a %v rule for service %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
|
||||
}
|
||||
// TODO
|
||||
}
|
||||
|
||||
func strPtr(s string) *string {
|
||||
@@ -843,8 +445,8 @@ func strPtr(s string) *string {
|
||||
}
|
||||
|
||||
func TestOnlyLocalLoadBalancing(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
svcNodePort := 3001
|
||||
@@ -902,43 +504,24 @@ func TestOnlyLocalLoadBalancing(t *testing.T) {
|
||||
nonLocalEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(api.ProtocolTCP)), epStrLocal))
|
||||
localEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(api.ProtocolTCP)), epStrNonLocal))
|
||||
|
||||
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
||||
if !hasJump(kubeSvcRules, fwChain, svcLBIP, svcPort) {
|
||||
errorf(fmt.Sprintf("Failed to find jump to firewall chain %v", fwChain), kubeSvcRules, t)
|
||||
}
|
||||
|
||||
fwRules := ipt.GetRules(fwChain)
|
||||
if !hasJump(fwRules, lbChain, "", 0) {
|
||||
errorf(fmt.Sprintf("Failed to find jump from firewall chain %v to svc chain %v", fwChain, lbChain), fwRules, t)
|
||||
}
|
||||
if hasJump(fwRules, string(KubeMarkMasqChain), "", 0) {
|
||||
errorf(fmt.Sprintf("Found jump from fw chain %v to MASQUERADE", fwChain), fwRules, t)
|
||||
}
|
||||
|
||||
lbRules := ipt.GetRules(lbChain)
|
||||
if hasJump(lbRules, nonLocalEpChain, "", 0) {
|
||||
errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, epStrLocal), lbRules, t)
|
||||
}
|
||||
if !hasJump(lbRules, localEpChain, "", 0) {
|
||||
errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, epStrNonLocal), lbRules, t)
|
||||
}
|
||||
// TODO
|
||||
}
|
||||
|
||||
func TestOnlyLocalNodePortsNoClusterCIDR(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
// set cluster CIDR to empty before test
|
||||
fp.clusterCIDR = ""
|
||||
onlyLocalNodePorts(t, fp, ipt)
|
||||
onlyLocalNodePorts(t, fp)
|
||||
}
|
||||
|
||||
func TestOnlyLocalNodePorts(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
onlyLocalNodePorts(t, fp, ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
onlyLocalNodePorts(t, fp)
|
||||
}
|
||||
|
||||
func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTables) {
|
||||
func onlyLocalNodePorts(t *testing.T, fp *Proxier) {
|
||||
shouldLBTOSVCRuleExist := len(fp.clusterCIDR) > 0
|
||||
svcIP := "10.20.30.41"
|
||||
svcPort := 80
|
||||
@@ -986,32 +569,7 @@ func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTable
|
||||
|
||||
fp.syncProxyRules()
|
||||
|
||||
proto := strings.ToLower(string(api.ProtocolTCP))
|
||||
lbChain := string(serviceLBChainName(svcPortName.String(), proto))
|
||||
|
||||
nonLocalEpChain := string(servicePortEndpointChainName(svcPortName.String(), proto, epStrLocal))
|
||||
localEpChain := string(servicePortEndpointChainName(svcPortName.String(), proto, epStrNonLocal))
|
||||
|
||||
kubeNodePortRules := ipt.GetRules(string(kubeNodePortsChain))
|
||||
if !hasJump(kubeNodePortRules, lbChain, "", svcNodePort) {
|
||||
errorf(fmt.Sprintf("Failed to find jump to lb chain %v", lbChain), kubeNodePortRules, t)
|
||||
}
|
||||
|
||||
svcChain := string(servicePortChainName(svcPortName.String(), proto))
|
||||
lbRules := ipt.GetRules(lbChain)
|
||||
if hasJump(lbRules, nonLocalEpChain, "", 0) {
|
||||
errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, epStrLocal), lbRules, t)
|
||||
}
|
||||
if hasJump(lbRules, svcChain, "", 0) != shouldLBTOSVCRuleExist {
|
||||
prefix := "Did not find "
|
||||
if !shouldLBTOSVCRuleExist {
|
||||
prefix = "Found "
|
||||
}
|
||||
errorf(fmt.Sprintf("%s jump from lb chain %v to svc %v", prefix, lbChain, svcChain), lbRules, t)
|
||||
}
|
||||
if !hasJump(lbRules, localEpChain, "", 0) {
|
||||
errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, epStrLocal), lbRules, t)
|
||||
}
|
||||
// TODO
|
||||
}
|
||||
|
||||
func makeTestService(namespace, name string, svcFunc func(*api.Service)) *api.Service {
|
||||
@@ -1040,8 +598,8 @@ func addTestPort(array []api.ServicePort, name string, protocol api.Protocol, po
|
||||
}
|
||||
|
||||
func TestBuildServiceMapAddRemove(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
|
||||
services := []*api.Service{
|
||||
makeTestService("somewhere-else", "cluster-ip", func(svc *api.Service) {
|
||||
@@ -1146,8 +704,8 @@ func TestBuildServiceMapAddRemove(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBuildServiceMapServiceHeadless(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
|
||||
makeServiceMap(fp,
|
||||
makeTestService("somewhere-else", "headless", func(svc *api.Service) {
|
||||
@@ -1178,8 +736,8 @@ func TestBuildServiceMapServiceHeadless(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
|
||||
makeServiceMap(fp,
|
||||
makeTestService("somewhere-else", "external-name", func(svc *api.Service) {
|
||||
@@ -1204,8 +762,7 @@ func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBuildServiceMapServiceUpdate(t *testing.T) {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
fp := NewFakeProxier()
|
||||
|
||||
servicev1 := makeTestService("somewhere", "some-service", func(svc *api.Service) {
|
||||
svc.Spec.Type = api.ServiceTypeClusterIP
|
||||
@@ -2415,8 +1972,8 @@ func Test_updateEndpointsMap(t *testing.T) {
|
||||
}
|
||||
|
||||
for tci, tc := range testCases {
|
||||
ipt := iptablestest.NewFake()
|
||||
fp := NewFakeProxier(ipt)
|
||||
|
||||
fp := NewFakeProxier()
|
||||
fp.hostname = nodeName
|
||||
|
||||
// First check that after adding all previous versions of endpoints,
|
||||
|
||||
Reference in New Issue
Block a user