Add an exponential backoff for reading log sizes

This commit is contained in:
gmarek 2015-12-29 10:10:17 +01:00
parent b00b41bc7f
commit 2388787c38

View File

@ -85,6 +85,7 @@ type LogsSizeData struct {
type WorkItem struct {
ip string
paths []string
backoffMultiplier int
}
func prepareData(masterAddress string, nodeAddresses []string) LogsSizeData {
@ -173,11 +174,13 @@ func (v *LogsSizeVerifier) Run() {
v.workChannel <- WorkItem{
ip: v.masterAddress,
paths: masterLogsToCheck,
backoffMultiplier: 1,
}
for _, node := range v.nodeAddresses {
v.workChannel <- WorkItem{
ip: node,
paths: nodeLogsToCheck,
backoffMultiplier: 1,
}
}
for _, worker := range v.workers {
@ -212,9 +215,13 @@ func (g *LogSizeGatherer) Work() bool {
)
if err != nil {
Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
if workItem.backoffMultiplier < 128 {
workItem.backoffMultiplier *= 2
}
g.workChannel <- workItem
return true
}
workItem.backoffMultiplier = 1
results := strings.Split(sshResult.Stdout, " ")
now := time.Now()
@ -228,8 +235,12 @@ func (g *LogSizeGatherer) Work() bool {
g.data.AddNewData(workItem.ip, path, now, size)
}
go func() {
time.Sleep(pollingPeriod)
select {
case <-time.After(time.Duration(workItem.backoffMultiplier) * pollingPeriod):
g.workChannel <- workItem
case <-g.stopChannel:
return
}
}()
return true
}