mirror of
https://github.com/k8sgpt-ai/k8sgpt.git
synced 2025-09-07 10:11:08 +00:00
feat: async calls (#311)
* feat: async calls Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * feat: added concurrency settings Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * feat: added in ability to set max concurrency Signed-off-by: Alex Jones <alexsimonjones@gmail.com> --------- Signed-off-by: Alex Jones <alexsimonjones@gmail.com> Co-authored-by: Matthis <99146727+matthisholleville@users.noreply.github.com>
This commit is contained in:
@@ -20,6 +20,7 @@ import (
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
@@ -32,14 +33,15 @@ import (
|
||||
)
|
||||
|
||||
type Analysis struct {
|
||||
Context context.Context
|
||||
Filters []string
|
||||
Client *kubernetes.Client
|
||||
AIClient ai.IAI
|
||||
Results []common.Result
|
||||
Namespace string
|
||||
NoCache bool
|
||||
Explain bool
|
||||
Context context.Context
|
||||
Filters []string
|
||||
Client *kubernetes.Client
|
||||
AIClient ai.IAI
|
||||
Results []common.Result
|
||||
Namespace string
|
||||
NoCache bool
|
||||
Explain bool
|
||||
MaxConcurrency int
|
||||
}
|
||||
|
||||
type AnalysisStatus string
|
||||
@@ -55,7 +57,7 @@ type JsonOutput struct {
|
||||
Results []common.Result `json:"results"`
|
||||
}
|
||||
|
||||
func NewAnalysis(backend string, language string, filters []string, namespace string, noCache bool, explain bool) (*Analysis, error) {
|
||||
func NewAnalysis(backend string, language string, filters []string, namespace string, noCache bool, explain bool, maxConcurrency int) (*Analysis, error) {
|
||||
var configAI ai.AIConfiguration
|
||||
err := viper.UnmarshalKey("ai", &configAI)
|
||||
if err != nil {
|
||||
@@ -99,13 +101,14 @@ func NewAnalysis(backend string, language string, filters []string, namespace st
|
||||
}
|
||||
|
||||
return &Analysis{
|
||||
Context: ctx,
|
||||
Filters: filters,
|
||||
Client: client,
|
||||
AIClient: aiClient,
|
||||
Namespace: namespace,
|
||||
NoCache: noCache,
|
||||
Explain: explain,
|
||||
Context: ctx,
|
||||
Filters: filters,
|
||||
Client: client,
|
||||
AIClient: aiClient,
|
||||
Namespace: namespace,
|
||||
NoCache: noCache,
|
||||
Explain: explain,
|
||||
MaxConcurrency: maxConcurrency,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -122,45 +125,86 @@ func (a *Analysis) RunAnalysis() []error {
|
||||
}
|
||||
|
||||
var errorList []error
|
||||
|
||||
semaphore := make(chan struct{}, a.MaxConcurrency)
|
||||
// if there are no filters selected and no active_filters then run all of them
|
||||
if len(a.Filters) == 0 && len(activeFilters) == 0 {
|
||||
var wg sync.WaitGroup
|
||||
var mutex sync.Mutex
|
||||
for _, analyzer := range analyzerMap {
|
||||
results, err := analyzer.Analyze(analyzerConfig)
|
||||
if err != nil {
|
||||
errorList = append(errorList, errors.New(fmt.Sprintf("[%s] %s", reflect.TypeOf(analyzer).Name(), err)))
|
||||
}
|
||||
a.Results = append(a.Results, results...)
|
||||
}
|
||||
return errorList
|
||||
}
|
||||
|
||||
// if the filters flag is specified
|
||||
if len(a.Filters) != 0 {
|
||||
for _, filter := range a.Filters {
|
||||
if analyzer, ok := analyzerMap[filter]; ok {
|
||||
wg.Add(1)
|
||||
semaphore <- struct{}{}
|
||||
go func(analyzer common.IAnalyzer, wg *sync.WaitGroup, semaphore chan struct{}) {
|
||||
defer wg.Done()
|
||||
results, err := analyzer.Analyze(analyzerConfig)
|
||||
if err != nil {
|
||||
errorList = append(errorList, errors.New(fmt.Sprintf("[%s] %s", filter, err)))
|
||||
mutex.Lock()
|
||||
errorList = append(errorList, fmt.Errorf(fmt.Sprintf("[%s] %s", reflect.TypeOf(analyzer).Name(), err)))
|
||||
mutex.Unlock()
|
||||
}
|
||||
mutex.Lock()
|
||||
a.Results = append(a.Results, results...)
|
||||
mutex.Unlock()
|
||||
<-semaphore
|
||||
}(analyzer, &wg, semaphore)
|
||||
|
||||
}
|
||||
wg.Wait()
|
||||
return errorList
|
||||
}
|
||||
semaphore = make(chan struct{}, a.MaxConcurrency)
|
||||
// if the filters flag is specified
|
||||
if len(a.Filters) != 0 {
|
||||
var wg sync.WaitGroup
|
||||
var mutex sync.Mutex
|
||||
for _, filter := range a.Filters {
|
||||
if analyzer, ok := analyzerMap[filter]; ok {
|
||||
semaphore <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(analyzer common.IAnalyzer) {
|
||||
defer wg.Done()
|
||||
results, err := analyzer.Analyze(analyzerConfig)
|
||||
if err != nil {
|
||||
mutex.Lock()
|
||||
errorList = append(errorList, fmt.Errorf(fmt.Sprintf("[%s] %s", filter, err)))
|
||||
mutex.Unlock()
|
||||
}
|
||||
mutex.Lock()
|
||||
a.Results = append(a.Results, results...)
|
||||
mutex.Unlock()
|
||||
<-semaphore
|
||||
}(analyzer)
|
||||
} else {
|
||||
errorList = append(errorList, errors.New(fmt.Sprintf("\"%s\" filter does not exist. Please run k8sgpt filters list.", filter)))
|
||||
errorList = append(errorList, fmt.Errorf(fmt.Sprintf("\"%s\" filter does not exist. Please run k8sgpt filters list.", filter)))
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
return errorList
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var mutex sync.Mutex
|
||||
semaphore = make(chan struct{}, a.MaxConcurrency)
|
||||
// use active_filters
|
||||
for _, filter := range activeFilters {
|
||||
if analyzer, ok := analyzerMap[filter]; ok {
|
||||
results, err := analyzer.Analyze(analyzerConfig)
|
||||
if err != nil {
|
||||
errorList = append(errorList, errors.New(fmt.Sprintf("[%s] %s", filter, err)))
|
||||
}
|
||||
a.Results = append(a.Results, results...)
|
||||
semaphore <- struct{}{}
|
||||
wg.Add(1)
|
||||
go func(analyzer common.IAnalyzer) {
|
||||
defer wg.Done()
|
||||
results, err := analyzer.Analyze(analyzerConfig)
|
||||
if err != nil {
|
||||
mutex.Lock()
|
||||
errorList = append(errorList, fmt.Errorf("[%s] %s", filter, err))
|
||||
mutex.Unlock()
|
||||
}
|
||||
mutex.Lock()
|
||||
a.Results = append(a.Results, results...)
|
||||
mutex.Unlock()
|
||||
<-semaphore
|
||||
}(analyzer)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
return errorList
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user