Merge pull request #53708 from m1093782566/cleanup-winkernel-proxy

Automatic merge from submit-queue (batch tested with PRs 52471, 53708). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

clean up winkernel proxy

**What this PR does / why we need it**:

clean up winkernel proxy

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

**Special notes for your reviewer**:

**Release note**:

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2017-10-20 22:46:12 -07:00 committed by GitHub
commit 48da128d83

View File

@ -1,7 +1,7 @@
// +build windows // +build windows
/* /*
Copyright 2015 The Kubernetes Authors. Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -925,8 +925,7 @@ func serviceToServiceMap(service *api.Service) proxyServiceMap {
return serviceMap return serviceMap
} }
// This is where all of the hns -save/restore calls happen. // This is where all of the hns save/restore calls happen.
// The only other hns rules are those that are setup in iptablesInit()
// assumes proxier.mu is held // assumes proxier.mu is held
func (proxier *Proxier) syncProxyRules() { func (proxier *Proxier) syncProxyRules() {
proxier.mu.Lock() proxier.mu.Lock()
@ -970,7 +969,7 @@ func (proxier *Proxier) syncProxyRules() {
var hnsEndpoints []hcsshim.HNSEndpoint var hnsEndpoints []hcsshim.HNSEndpoint
glog.V(4).Infof("====Applying Policy for %s====", svcName) glog.V(4).Infof("====Applying Policy for %s====", svcName)
// Create Remote endpoints for every endpoint, corresponding to the service // Create Remote endpoints for every endpoint, corresponding to the service
if len(proxier.endpointsMap[svcName]) > 0 {
for _, ep := range proxier.endpointsMap[svcName] { for _, ep := range proxier.endpointsMap[svcName] {
var newHnsEndpoint *hcsshim.HNSEndpoint var newHnsEndpoint *hcsshim.HNSEndpoint
hnsNetworkName := proxier.network.name hnsNetworkName := proxier.network.name
@ -1017,7 +1016,6 @@ func (proxier *Proxier) syncProxyRules() {
ep.refCount++ ep.refCount++
Log(ep, "Endpoint resource found", 3) Log(ep, "Endpoint resource found", 3)
} }
}
glog.V(3).Infof("Associated endpoints [%s] for service [%s]", spew.Sdump(hnsEndpoints), svcName) glog.V(3).Infof("Associated endpoints [%s] for service [%s]", spew.Sdump(hnsEndpoints), svcName)
@ -1050,7 +1048,7 @@ func (proxier *Proxier) syncProxyRules() {
svcInfo.hnsID = hnsLoadBalancer.ID svcInfo.hnsID = hnsLoadBalancer.ID
glog.V(3).Infof("Hns LoadBalancer resource created for cluster ip resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID) glog.V(3).Infof("Hns LoadBalancer resource created for cluster ip resources %v, Id [%s]", svcInfo.clusterIP, hnsLoadBalancer.ID)
// If nodePort is speficied, user should be able to use nodeIP:nodePort to reach the backend endpoints // If nodePort is specified, user should be able to use nodeIP:nodePort to reach the backend endpoints
if svcInfo.nodePort > 0 { if svcInfo.nodePort > 0 {
hnsLoadBalancer, err := getHnsLoadBalancer( hnsLoadBalancer, err := getHnsLoadBalancer(
hnsEndpoints, hnsEndpoints,