Merge pull request #44056 from thockin/proxy-sync-reason

Automatic merge from submit-queue

add a reason code to syncProxyRules

part of async prep
This commit is contained in:
Kubernetes Submit Queue 2017-04-04 19:56:56 -07:00 committed by GitHub
commit 2db4affb9d
2 changed files with 15 additions and 9 deletions

View File

@ -436,7 +436,7 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) {
func (proxier *Proxier) Sync() { func (proxier *Proxier) Sync() {
proxier.mu.Lock() proxier.mu.Lock()
defer proxier.mu.Unlock() defer proxier.mu.Unlock()
proxier.syncProxyRules() proxier.syncProxyRules(syncReasonForce)
} }
// SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return. // SyncLoop runs periodic work. This is expected to run as a goroutine or as the main loop of the app. It does not return.
@ -554,7 +554,7 @@ func (proxier *Proxier) OnServiceUpdate(allServices []*api.Service) {
if len(newServiceMap) != len(proxier.serviceMap) || !reflect.DeepEqual(newServiceMap, proxier.serviceMap) { if len(newServiceMap) != len(proxier.serviceMap) || !reflect.DeepEqual(newServiceMap, proxier.serviceMap) {
proxier.serviceMap = newServiceMap proxier.serviceMap = newServiceMap
proxier.syncProxyRules() proxier.syncProxyRules(syncReasonServices)
} else { } else {
glog.V(4).Infof("Skipping proxy iptables rule sync on service update because nothing changed") glog.V(4).Infof("Skipping proxy iptables rule sync on service update because nothing changed")
} }
@ -575,7 +575,7 @@ func (proxier *Proxier) OnEndpointsUpdate(allEndpoints []*api.Endpoints) {
newMap, staleConnections := buildNewEndpointsMap(proxier.allEndpoints, proxier.endpointsMap, proxier.hostname, proxier.healthChecker) newMap, staleConnections := buildNewEndpointsMap(proxier.allEndpoints, proxier.endpointsMap, proxier.hostname, proxier.healthChecker)
if len(newMap) != len(proxier.endpointsMap) || !reflect.DeepEqual(newMap, proxier.endpointsMap) { if len(newMap) != len(proxier.endpointsMap) || !reflect.DeepEqual(newMap, proxier.endpointsMap) {
proxier.endpointsMap = newMap proxier.endpointsMap = newMap
proxier.syncProxyRules() proxier.syncProxyRules(syncReasonEndpoints)
} else { } else {
glog.V(4).Infof("Skipping proxy iptables rule sync on endpoint update because nothing changed") glog.V(4).Infof("Skipping proxy iptables rule sync on endpoint update because nothing changed")
} }
@ -757,16 +757,22 @@ func (proxier *Proxier) deleteEndpointConnections(connectionMap map[endpointServ
} }
} }
type syncReason string
const syncReasonServices syncReason = "ServicesUpdate"
const syncReasonEndpoints syncReason = "EndpointsUpdate"
const syncReasonForce syncReason = "Force"
// This is where all of the iptables-save/restore calls happen. // This is where all of the iptables-save/restore calls happen.
// The only other iptables rules are those that are setup in iptablesInit() // The only other iptables rules are those that are setup in iptablesInit()
// assumes proxier.mu is held // assumes proxier.mu is held
func (proxier *Proxier) syncProxyRules() { func (proxier *Proxier) syncProxyRules(reason syncReason) {
if proxier.throttle != nil { if proxier.throttle != nil {
proxier.throttle.Accept() proxier.throttle.Accept()
} }
start := time.Now() start := time.Now()
defer func() { defer func() {
glog.V(4).Infof("syncProxyRules took %v", time.Since(start)) glog.V(4).Infof("syncProxyRules(%s) took %v", reason, time.Since(start))
}() }()
// don't sync rules till we've received services and endpoints // don't sync rules till we've received services and endpoints
if proxier.allEndpoints == nil || proxier.allServices == nil { if proxier.allEndpoints == nil || proxier.allServices == nil {

View File

@ -546,7 +546,7 @@ func TestClusterIPReject(t *testing.T) {
svc := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "ns1", Name: svcName}, Port: "p80"} svc := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "ns1", Name: svcName}, Port: "p80"}
fp.serviceMap[svc] = newFakeServiceInfo(svc, svcIP, 80, api.ProtocolTCP, false) fp.serviceMap[svc] = newFakeServiceInfo(svc, svcIP, 80, api.ProtocolTCP, false)
fp.syncProxyRules() fp.syncProxyRules(syncReasonForce)
svcChain := string(servicePortChainName(svc, strings.ToLower(string(api.ProtocolTCP)))) svcChain := string(servicePortChainName(svc, strings.ToLower(string(api.ProtocolTCP))))
svcRules := ipt.GetRules(svcChain) svcRules := ipt.GetRules(svcChain)
@ -638,7 +638,7 @@ func TestLoadBalancer(t *testing.T) {
}), }),
} }
fp.syncProxyRules() fp.syncProxyRules(syncReasonForce)
proto := strings.ToLower(string(api.ProtocolTCP)) proto := strings.ToLower(string(api.ProtocolTCP))
fwChain := string(serviceFirewallChainName(svc, proto)) fwChain := string(serviceFirewallChainName(svc, proto))
@ -683,7 +683,7 @@ func TestNodePort(t *testing.T) {
}), }),
} }
fp.syncProxyRules() fp.syncProxyRules(syncReasonForce)
proto := strings.ToLower(string(api.ProtocolTCP)) proto := strings.ToLower(string(api.ProtocolTCP))
svcChain := string(servicePortChainName(svc, strings.ToLower(proto))) svcChain := string(servicePortChainName(svc, strings.ToLower(proto)))
@ -705,7 +705,7 @@ func TestNodePortReject(t *testing.T) {
svcInfo.nodePort = 3001 svcInfo.nodePort = 3001
fp.serviceMap[svc] = svcInfo fp.serviceMap[svc] = svcInfo
fp.syncProxyRules() fp.syncProxyRules(syncReasonForce)
kubeSvcRules := ipt.GetRules(string(kubeServicesChain)) kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
if !hasJump(kubeSvcRules, iptablestest.Reject, svcIP.String(), 3001) { if !hasJump(kubeSvcRules, iptablestest.Reject, svcIP.String(), 3001) {