From 3fdc343a983eea74a6ea6464cf0c7e87bc5b3e6b Mon Sep 17 00:00:00 2001 From: Mandar U Jog Date: Tue, 15 Nov 2016 11:29:36 -0800 Subject: [PATCH] Handle Empty clusterCIDR Empty clusterCIDR causes invalid rules generation. Fixes issue #36652 --- pkg/proxy/iptables/proxier.go | 22 +++++++++++++++------- pkg/proxy/iptables/proxier_test.go | 21 +++++++++++++++++++++ 2 files changed, 36 insertions(+), 7 deletions(-) diff --git a/pkg/proxy/iptables/proxier.go b/pkg/proxy/iptables/proxier.go index 85c7dd45a18..deabe872cd0 100644 --- a/pkg/proxy/iptables/proxier.go +++ b/pkg/proxy/iptables/proxier.go @@ -250,6 +250,10 @@ func NewProxier(ipt utiliptables.Interface, sysctl utilsysctl.Interface, exec ut nodeIP = net.ParseIP("127.0.0.1") } + if len(clusterCIDR) == 0 { + glog.Warningf("clusterCIDR not specified, unable to distinguish between internal and external traffic") + } + go healthcheck.Run() var throttle flowcontrol.RateLimiter @@ -1221,13 +1225,17 @@ func (proxier *Proxier) syncProxyRules() { } // First rule in the chain redirects all pod -> external vip traffic to the // Service's ClusterIP instead. This happens whether or not we have local - // endpoints. - args = []string{ - "-A", string(svcXlbChain), - "-m", "comment", "--comment", - fmt.Sprintf(`"Redirect pods trying to reach external loadbalancer VIP to clusterIP"`), + // endpoints; only if clusterCIDR is specified + if len(proxier.clusterCIDR) > 0 { + args = []string{ + "-A", string(svcXlbChain), + "-m", "comment", "--comment", + fmt.Sprintf(`"Redirect pods trying to reach external loadbalancer VIP to clusterIP"`), + "-s", proxier.clusterCIDR, + "-j", string(svcChain), + } + writeLine(natRules, args...) } - writeLine(natRules, append(args, "-s", proxier.clusterCIDR, "-j", string(svcChain))...) numLocalEndpoints := len(localEndpointChains) if numLocalEndpoints == 0 { @@ -1300,7 +1308,7 @@ func (proxier *Proxier) syncProxyRules() { glog.V(3).Infof("Restoring iptables rules: %s", lines) err = proxier.iptables.RestoreAll(lines, utiliptables.NoFlushTables, utiliptables.RestoreCounters) if err != nil { - glog.Errorf("Failed to execute iptables-restore: %v", err) + glog.Errorf("Failed to execute iptables-restore: %v\nRules:\n%s", err, lines) // Revert new local ports. revertPorts(replacementPortsMap, proxier.portsMap) return diff --git a/pkg/proxy/iptables/proxier_test.go b/pkg/proxy/iptables/proxier_test.go index ad0a028f10e..ac02b9ce643 100644 --- a/pkg/proxy/iptables/proxier_test.go +++ b/pkg/proxy/iptables/proxier_test.go @@ -696,9 +696,22 @@ func TestOnlyLocalLoadBalancing(t *testing.T) { } } +func TestOnlyLocalNodePortsNoClusterCIDR(t *testing.T) { + ipt := iptablestest.NewFake() + fp := NewFakeProxier(ipt) + // set cluster CIDR to empty before test + fp.clusterCIDR = "" + onlyLocalNodePorts(t, fp, ipt) +} + func TestOnlyLocalNodePorts(t *testing.T) { ipt := iptablestest.NewFake() fp := NewFakeProxier(ipt) + onlyLocalNodePorts(t, fp, ipt) +} + +func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTables) { + shouldLBTOSVCRuleExist := len(fp.clusterCIDR) > 0 svcName := "svc1" svcIP := net.IPv4(10, 20, 30, 41) @@ -724,10 +737,18 @@ func TestOnlyLocalNodePorts(t *testing.T) { errorf(fmt.Sprintf("Failed to find jump to lb chain %v", lbChain), kubeNodePortRules, t) } + svcChain := string(servicePortChainName(svc, strings.ToLower(string(api.ProtocolTCP)))) lbRules := ipt.GetRules(lbChain) if hasJump(lbRules, nonLocalEpChain, "", "") { errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, nonLocalEp), lbRules, t) } + if hasJump(lbRules, svcChain, "", "") != shouldLBTOSVCRuleExist { + prefix := "Did not find " + if !shouldLBTOSVCRuleExist { + prefix = "Found " + } + errorf(fmt.Sprintf("%s jump from lb chain %v to svc %v", prefix, lbChain, svcChain), lbRules, t) + } if !hasJump(lbRules, localEpChain, "", "") { errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, nonLocalEp), lbRules, t) }