Don't create no-op iptables rules for services with no endpoints

This commit is contained in:
Dan Winship 2017-12-18 14:06:50 -05:00
parent 6c91c420b6
commit 07ead7d8e2

View File

@ -824,7 +824,6 @@ func (proxier *Proxier) syncProxyRules() {
args := make([]string, 64)
// Build rules for each service.
var svcNameString string
for svcName, svc := range proxier.serviceMap {
svcInfo, ok := svc.(*serviceInfo)
if !ok {
@ -833,16 +832,19 @@ func (proxier *Proxier) syncProxyRules() {
}
isIPv6 := utilproxy.IsIPv6(svcInfo.clusterIP)
protocol := strings.ToLower(string(svcInfo.protocol))
svcNameString = svcInfo.serviceNameString
svcNameString := svcInfo.serviceNameString
hasEndpoints := len(proxier.endpointsMap[svcName]) > 0
// Create the per-service chain, retaining counters if possible.
svcChain := svcInfo.servicePortChainName
if hasEndpoints {
// Create the per-service chain, retaining counters if possible.
if chain, ok := existingNATChains[svcChain]; ok {
writeLine(proxier.natChains, chain)
} else {
writeLine(proxier.natChains, utiliptables.MakeChainLine(svcChain))
}
activeNATChains[svcChain] = true
}
svcXlbChain := svcInfo.serviceLBChainName
if svcInfo.onlyNodeLocalEndpoints {
@ -854,12 +856,10 @@ func (proxier *Proxier) syncProxyRules() {
writeLine(proxier.natChains, utiliptables.MakeChainLine(svcXlbChain))
}
activeNATChains[svcXlbChain] = true
} else if activeNATChains[svcXlbChain] {
// Cleanup the previously created XLB chain for this service
delete(activeNATChains, svcXlbChain)
}
// Capture the clusterIP.
if hasEndpoints {
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s cluster IP"`, svcNameString),
@ -878,6 +878,16 @@ func (proxier *Proxier) syncProxyRules() {
writeLine(proxier.natRules, append(args, "! -s", proxier.clusterCIDR, "-j", string(KubeMarkMasqChain))...)
}
writeLine(proxier.natRules, append(args, "-j", string(svcChain))...)
} else {
writeLine(proxier.filterRules,
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(svcInfo.clusterIP),
"--dport", strconv.Itoa(svcInfo.port),
"-j", "REJECT",
)
}
// Capture externalIPs.
for _, externalIP := range svcInfo.externalIPs {
@ -913,7 +923,9 @@ func (proxier *Proxier) syncProxyRules() {
}
replacementPortsMap[lp] = socket
}
} // We're holding the port, so it's OK to install iptables rules.
}
if hasEndpoints {
args = append(args[:0],
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s external IP"`, svcNameString),
@ -936,10 +948,7 @@ func (proxier *Proxier) syncProxyRules() {
// Allow traffic bound for external IPs that happen to be recognized as local IPs to stay local.
// This covers cases like GCE load-balancers which get added to the local routing table.
writeLine(proxier.natRules, append(dstLocalOnlyArgs, "-j", string(svcChain))...)
// If the service has no endpoints then reject packets coming via externalIP
// Install ICMP Reject rule in filter table for destination=externalIP and dport=svcport
if len(proxier.endpointsMap[svcName]) == 0 {
} else {
writeLine(proxier.filterRules,
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
@ -952,6 +961,7 @@ func (proxier *Proxier) syncProxyRules() {
}
// Capture load-balancer ingress.
if hasEndpoints {
fwChain := svcInfo.serviceFirewallChainName
for _, ingress := range svcInfo.loadBalancerStatus.Ingress {
if ingress.IP != "" {
@ -1017,6 +1027,8 @@ func (proxier *Proxier) syncProxyRules() {
writeLine(proxier.natRules, append(args, "-j", string(KubeMarkDropChain))...)
}
}
}
// FIXME: do we need REJECT rules for load-balancer ingress if !hasEndpoints?
// Capture nodeports. If we had more than 2 rules it might be
// worthwhile to make a new per-service chain for nodeport rules, but
@ -1050,8 +1062,9 @@ func (proxier *Proxier) syncProxyRules() {
}
}
replacementPortsMap[lp] = socket
} // We're holding the port, so it's OK to install iptables rules.
}
if hasEndpoints {
args = append(args[:0],
"-A", string(kubeNodePortsChain),
"-m", "comment", "--comment", svcNameString,
@ -1075,12 +1088,7 @@ func (proxier *Proxier) syncProxyRules() {
writeLine(proxier.natRules, append(args, "-s", loopback, "-j", string(KubeMarkMasqChain))...)
writeLine(proxier.natRules, append(args, "-j", string(svcXlbChain))...)
}
// If the service has no endpoints then reject packets. The filter
// table doesn't currently have the same per-service structure that
// the nat table does, so we just stick this into the kube-services
// chain.
if len(proxier.endpointsMap[svcName]) == 0 {
} else {
writeLine(proxier.filterRules,
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
@ -1092,21 +1100,10 @@ func (proxier *Proxier) syncProxyRules() {
}
}
// If the service has no endpoints then reject packets.
if len(proxier.endpointsMap[svcName]) == 0 {
writeLine(proxier.filterRules,
"-A", string(kubeServicesChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s has no endpoints"`, svcNameString),
"-m", protocol, "-p", protocol,
"-d", utilproxy.ToCIDR(svcInfo.clusterIP),
"--dport", strconv.Itoa(svcInfo.port),
"-j", "REJECT",
)
if !hasEndpoints {
continue
}
// From here on, we assume there are active endpoints.
// Generate the per-endpoint chains. We do this in multiple passes so we
// can group rules together.
// These two slices parallel each other - keep in sync