Handle review comments for Fix goroutine leak in federation service controller

This commit is contained in:
shashidharatd 2016-09-23 14:27:51 +05:30
parent d8ff4870cb
commit 690a06b9b8
3 changed files with 40 additions and 39 deletions

View File

@ -32,43 +32,44 @@ import (
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (sc *ServiceController) clusterEndpointWorker() {
// process all pending events in endpointWorkerDoneChan
eventPending := true
for eventPending {
ForLoop:
for {
select {
case clusterName := <-sc.endpointWorkerDoneChan:
sc.endpointWorkerMap[clusterName] = false
default:
// non-blocking, comes here if all existing events are processed
eventPending = false
break
break ForLoop
}
}
for clusterName, cache := range sc.clusterCache.clientMap {
workerExist, keyFound := sc.endpointWorkerMap[clusterName]
if keyFound && workerExist {
workerExist, found := sc.endpointWorkerMap[clusterName]
if found && workerExist {
continue
}
sc.endpointWorkerMap[clusterName] = true
// create a worker only if the previous worker has finished and gone out of scope
go func(cache *clusterCache, clusterName string) {
fedClient := sc.federationClient
for {
key, quit := cache.endpointQueue.Get()
// update endpoint cache
if quit {
// send signal that current worker has finished tasks and is going out of scope
sc.endpointWorkerDoneChan <- clusterName
return
}
defer cache.endpointQueue.Done(key)
err := sc.clusterCache.syncEndpoint(key.(string), clusterName, cache, sc.serviceCache, fedClient, sc)
if err != nil {
glog.V(2).Infof("Failed to sync endpoint: %+v", err)
}
func() {
key, quit := cache.endpointQueue.Get()
// update endpoint cache
if quit {
// send signal that current worker has finished tasks and is going out of scope
sc.endpointWorkerDoneChan <- clusterName
return
}
defer cache.endpointQueue.Done(key)
err := sc.clusterCache.syncEndpoint(key.(string), clusterName, cache, sc.serviceCache, fedClient, sc)
if err != nil {
glog.V(2).Infof("Failed to sync endpoint: %+v", err)
}
}()
}
}(cache, clusterName)
sc.endpointWorkerMap[clusterName] = true
}
}

View File

@ -36,43 +36,43 @@ import (
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (sc *ServiceController) clusterServiceWorker() {
// process all pending events in serviceWorkerDoneChan
eventPending := true
for eventPending {
ForLoop:
for {
select {
case clusterName := <-sc.serviceWorkerDoneChan:
sc.serviceWorkerMap[clusterName] = false
default:
// non-blocking, comes here if all existing events are processed
eventPending = false
break
break ForLoop
}
}
for clusterName, cache := range sc.clusterCache.clientMap {
workerExist, keyFound := sc.serviceWorkerMap[clusterName]
if keyFound && workerExist {
workerExist, found := sc.serviceWorkerMap[clusterName]
if found && workerExist {
continue
}
sc.serviceWorkerMap[clusterName] = true
// create a worker only if the previous worker has finished and gone out of scope
go func(cache *clusterCache, clusterName string) {
fedClient := sc.federationClient
for {
key, quit := cache.serviceQueue.Get()
if quit {
// send signal that current worker has finished tasks and is going out of scope
sc.serviceWorkerDoneChan <- clusterName
return
}
defer cache.serviceQueue.Done(key)
err := sc.clusterCache.syncService(key.(string), clusterName, cache, sc.serviceCache, fedClient, sc)
if err != nil {
glog.Errorf("Failed to sync service: %+v", err)
}
func() {
key, quit := cache.serviceQueue.Get()
if quit {
// send signal that current worker has finished tasks and is going out of scope
sc.serviceWorkerDoneChan <- clusterName
return
}
defer cache.serviceQueue.Done(key)
err := sc.clusterCache.syncService(key.(string), clusterName, cache, sc.serviceCache, fedClient, sc)
if err != nil {
glog.Errorf("Failed to sync service: %+v", err)
}
}()
}
}(cache, clusterName)
sc.serviceWorkerMap[clusterName] = true
}
}

View File

@ -68,7 +68,7 @@ const (
KubeAPIQPS = 20.0
KubeAPIBurst = 30
maxNoOfClusters = 256
maxNoOfClusters = 100
)
type cachedService struct {