mirror of
https://github.com/k8sgpt-ai/k8sgpt.git
synced 2025-04-27 19:15:24 +00:00
feat: get official field doc (#457)
* fix(deps): update module github.com/aws/aws-sdk-go to v1.44.267 (#451) Signed-off-by: Renovate Bot <bot@renovateapp.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * feat: get official field doc Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * feat: use schema from server Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * feat: add configuration api route (#459) * feat: add configuration api route Signed-off-by: Matthis Holleville <matthish29@gmail.com> * feat: rename cache methods Signed-off-by: Matthis Holleville <matthish29@gmail.com> --------- Signed-off-by: Matthis Holleville <matthish29@gmail.com> Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * fix(deps): update module github.com/aws/aws-sdk-go to v1.44.269 (#458) Signed-off-by: Renovate Bot <bot@renovateapp.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * fix: updated list.go to handle k8sgpt cache list crashing issue (#455) * Update list.go Signed-off-by: Krishna Dutt Panchagnula <krishnadutt123@gmail.com> * fix: updated list.go to handle k8sgpt cache list crashing issue Signed-off-by: Krishna Dutt Panchagnula <krishnadutt123@gmail.com> --------- Signed-off-by: Krishna Dutt Panchagnula <krishnadutt123@gmail.com> Co-authored-by: Alex Jones <alexsimonjones@gmail.com> Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * chore(main): release 0.3.5 (#452) Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * chore(deps): update google-github-actions/release-please-action digest to 51ee8ae (#464) Signed-off-by: Renovate Bot <bot@renovateapp.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * fix: name of sa reference in deployment (#468) Signed-off-by: Johannes Kleinlercher <johannes@kleinlercher.at> Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * fix(deps): update module github.com/aws/aws-sdk-go to v1.44.270 (#465) Signed-off-by: Renovate Bot <bot@renovateapp.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * fix: typo (#463) Signed-off-by: Rakshit Gondwal <rakshitgondwal3@gmail.com> Co-authored-by: Thomas Schuetz <38893055+thschue@users.noreply.github.com> Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * fix(deps): update module github.com/aws/aws-sdk-go to v1.44.271 (#469) Signed-off-by: Renovate Bot <bot@renovateapp.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * fix(deps): update module github.com/aws/aws-sdk-go to v1.44.269 (#458) Signed-off-by: Renovate Bot <bot@renovateapp.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> * fix(deps): update module github.com/aws/aws-sdk-go to v1.44.270 (#465) Signed-off-by: Renovate Bot <bot@renovateapp.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * fix(deps): update module github.com/aws/aws-sdk-go to v1.44.271 (#469) Signed-off-by: Renovate Bot <bot@renovateapp.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * feat: Add with-doc flag to enable/disable kubernetes doc Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * use fmt.Sprintf in apireference.go Signed-off-by: David Sabatie <david.sabatie@notrenet.com> * add --with-doc to readme Signed-off-by: David Sabatie <david.sabatie@notrenet.com> --------- Signed-off-by: Renovate Bot <bot@renovateapp.com> Signed-off-by: David Sabatie <david.sabatie@notrenet.com> Signed-off-by: Matthis Holleville <matthish29@gmail.com> Signed-off-by: Krishna Dutt Panchagnula <krishnadutt123@gmail.com> Signed-off-by: Johannes Kleinlercher <johannes@kleinlercher.at> Signed-off-by: Rakshit Gondwal <rakshitgondwal3@gmail.com> Signed-off-by: golgoth31 <golgoth31@users.noreply.github.com> Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Co-authored-by: Matthis <99146727+matthisholleville@users.noreply.github.com> Co-authored-by: Krishna Dutt Panchagnula <krishnadutt123@gmail.com> Co-authored-by: Alex Jones <alexsimonjones@gmail.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: Johannes Kleinlercher <johannes@kleinlercher.at> Co-authored-by: Rakshit Gondwal <98955085+rakshitgondwal@users.noreply.github.com> Co-authored-by: Thomas Schuetz <38893055+thschue@users.noreply.github.com>
This commit is contained in:
parent
6052a5b4d7
commit
f9621af7e4
20
README.md
20
README.md
@ -128,6 +128,7 @@ _This mode of operation is ideal for continuous monitoring of your cluster and c
|
||||
* Run `k8sgpt filters` to manage the active filters used by the analyzer. By default, all filters are executed during analysis.
|
||||
* Run `k8sgpt analyze` to run a scan.
|
||||
* And use `k8sgpt analyze --explain` to get a more detailed explanation of the issues.
|
||||
* You also run `k8sgpt analyze --with-doc` (with or without the explain flag) to get the official documention from kubernetes.
|
||||
|
||||
## Analyzers
|
||||
|
||||
@ -163,6 +164,7 @@ _Run a scan with the default analyzers_
|
||||
k8sgpt generate
|
||||
k8sgpt auth add
|
||||
k8sgpt analyze --explain
|
||||
k8sgpt analyze --explain --with-doc
|
||||
```
|
||||
|
||||
_Filter on resource_
|
||||
@ -279,7 +281,7 @@ curl -X GET "http://localhost:8080/analyze?namespace=k8sgpt&explain=false"
|
||||
<details>
|
||||
<summary> LocalAI provider </summary>
|
||||
|
||||
To run local models, it is possible to use OpenAI compatible APIs, for instance [LocalAI](https://github.com/go-skynet/LocalAI) which uses [llama.cpp](https://github.com/ggerganov/llama.cpp) and [ggml](https://github.com/ggerganov/ggml) to run inference on consumer-grade hardware. Models supported by LocalAI for instance are Vicuna, Alpaca, LLaMA, Cerebras, GPT4ALL, GPT4ALL-J and koala.
|
||||
To run local models, it is possible to use OpenAI compatible APIs, for instance [LocalAI](https://github.com/go-skynet/LocalAI) which uses [llama.cpp](https://github.com/ggerganov/llama.cpp) and [ggml](https://github.com/ggerganov/ggml) to run inference on consumer-grade hardware. Models supported by LocalAI for instance are Vicuna, Alpaca, LLaMA, Cerebras, GPT4ALL, GPT4ALL-J and koala.
|
||||
|
||||
|
||||
To run local inference, you need to download the models first, for instance you can find `ggml` compatible models in [huggingface.com](https://huggingface.co/models?search=ggml) (for example vicuna, alpaca and koala).
|
||||
@ -309,16 +311,16 @@ k8sgpt analyze --explain --backend localai
|
||||
|
||||
<em>Prerequisites:</em> an Azure OpenAI deployment is needed, please visit MS official [documentation](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource) to create your own.
|
||||
|
||||
To authenticate with k8sgpt, you will need the Azure OpenAI endpoint of your tenant `"https://your Azure OpenAI Endpoint"`, the api key to access your deployment, the deployment name of your model and the model name itself.
|
||||
To authenticate with k8sgpt, you will need the Azure OpenAI endpoint of your tenant `"https://your Azure OpenAI Endpoint"`, the api key to access your deployment, the deployment name of your model and the model name itself.
|
||||
|
||||
|
||||
To run k8sgpt, run `k8sgpt auth` with the `azureopenai` backend:
|
||||
To run k8sgpt, run `k8sgpt auth` with the `azureopenai` backend:
|
||||
```
|
||||
k8sgpt auth add --backend azureopenai --baseurl https://<your Azure OpenAI endpoint> --engine <deployment_name> --model <model_name>
|
||||
```
|
||||
Lastly, enter your Azure API key, after the prompt.
|
||||
|
||||
Now you are ready to analyze with the azure openai backend:
|
||||
Now you are ready to analyze with the azure openai backend:
|
||||
```
|
||||
k8sgpt analyze --explain --backend azureopenai
|
||||
```
|
||||
@ -395,31 +397,31 @@ The Kubernetes system is trying to scale a StatefulSet named fake-deployment usi
|
||||
|
||||
Config file locations:
|
||||
| OS | Path |
|
||||
|---------|--------------------------------------------------|
|
||||
| ------- | ------------------------------------------------ |
|
||||
| MacOS | ~/Library/Application Support/k8sgpt/k8sgpt.yaml |
|
||||
| Linux | ~/.config/k8sgpt/k8sgpt.yaml |
|
||||
| Windows | %LOCALAPPDATA%/k8sgpt/k8sgpt.yaml |
|
||||
</details>
|
||||
|
||||
<details>
|
||||
There may be scenarios where caching remotely is prefered.
|
||||
There may be scenarios where caching remotely is prefered.
|
||||
In these scenarios K8sGPT supports AWS S3 Integration.
|
||||
|
||||
<summary> Remote caching </summary>
|
||||
|
||||
_As a prerequisite `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` are required as environmental variables._
|
||||
|
||||
|
||||
_Adding a remote cache_
|
||||
Note: this will create the bucket if it does not exist
|
||||
```
|
||||
k8sgpt cache add --region <aws region> --bucket <name>
|
||||
```
|
||||
|
||||
|
||||
_Listing cache items_
|
||||
```
|
||||
k8sgpt cache list
|
||||
```
|
||||
|
||||
|
||||
_Removing the remote cache_
|
||||
Note: this will not delete the bucket
|
||||
```
|
||||
|
@ -32,6 +32,7 @@ var (
|
||||
namespace string
|
||||
anonymize bool
|
||||
maxConcurrency int
|
||||
withDoc bool
|
||||
)
|
||||
|
||||
// AnalyzeCmd represents the problems command
|
||||
@ -45,7 +46,7 @@ var AnalyzeCmd = &cobra.Command{
|
||||
|
||||
// AnalysisResult configuration
|
||||
config, err := analysis.NewAnalysis(backend,
|
||||
language, filters, namespace, nocache, explain, maxConcurrency)
|
||||
language, filters, namespace, nocache, explain, maxConcurrency, withDoc)
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
@ -91,4 +92,6 @@ func init() {
|
||||
AnalyzeCmd.Flags().StringVarP(&language, "language", "l", "english", "Languages to use for AI (e.g. 'English', 'Spanish', 'French', 'German', 'Italian', 'Portuguese', 'Dutch', 'Russian', 'Chinese', 'Japanese', 'Korean')")
|
||||
// add max concurrency
|
||||
AnalyzeCmd.Flags().IntVarP(&maxConcurrency, "max-concurrency", "m", 10, "Maximum number of concurrent requests to the Kubernetes API server")
|
||||
// kubernetes doc flag
|
||||
AnalyzeCmd.Flags().BoolVarP(&withDoc, "with-doc", "d", false, "Give me the official documentation of the involved field")
|
||||
}
|
||||
|
2
go.mod
2
go.mod
@ -75,7 +75,7 @@ require (
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/google/gnostic v0.6.9 // indirect
|
||||
github.com/google/gnostic v0.6.9
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-containerregistry v0.14.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/fatih/color"
|
||||
openapi_v2 "github.com/google/gnostic/openapiv2"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/analyzer"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
|
||||
@ -45,6 +46,7 @@ type Analysis struct {
|
||||
Explain bool
|
||||
MaxConcurrency int
|
||||
AnalysisAIProvider string // The name of the AI Provider used for this analysis
|
||||
WithDoc bool
|
||||
}
|
||||
|
||||
type AnalysisStatus string
|
||||
@ -63,7 +65,7 @@ type JsonOutput struct {
|
||||
Results []common.Result `json:"results"`
|
||||
}
|
||||
|
||||
func NewAnalysis(backend string, language string, filters []string, namespace string, noCache bool, explain bool, maxConcurrency int) (*Analysis, error) {
|
||||
func NewAnalysis(backend string, language string, filters []string, namespace string, noCache bool, explain bool, maxConcurrency int, withDoc bool) (*Analysis, error) {
|
||||
var configAI ai.AIConfiguration
|
||||
err := viper.UnmarshalKey("ai", &configAI)
|
||||
if err != nil {
|
||||
@ -128,6 +130,7 @@ func NewAnalysis(backend string, language string, filters []string, namespace st
|
||||
Explain: explain,
|
||||
MaxConcurrency: maxConcurrency,
|
||||
AnalysisAIProvider: backend,
|
||||
WithDoc: withDoc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -136,11 +139,23 @@ func (a *Analysis) RunAnalysis() {
|
||||
|
||||
coreAnalyzerMap, analyzerMap := analyzer.GetAnalyzerMap()
|
||||
|
||||
// we get the openapi schema from the server only if required by the flag "with-doc"
|
||||
openapiSchema := &openapi_v2.Document{}
|
||||
if a.WithDoc {
|
||||
var openApiErr error
|
||||
|
||||
openapiSchema, openApiErr = a.Client.Client.Discovery().OpenAPISchema()
|
||||
if openApiErr != nil {
|
||||
a.Errors = append(a.Errors, fmt.Sprintf("[KubernetesDoc] %s", openApiErr))
|
||||
}
|
||||
}
|
||||
|
||||
analyzerConfig := common.Analyzer{
|
||||
Client: a.Client,
|
||||
Context: a.Context,
|
||||
Namespace: a.Namespace,
|
||||
AIClient: a.AIClient,
|
||||
Client: a.Client,
|
||||
Context: a.Context,
|
||||
Namespace: a.Namespace,
|
||||
AIClient: a.AIClient,
|
||||
OpenapiSchema: openapiSchema,
|
||||
}
|
||||
|
||||
semaphore := make(chan struct{}, a.MaxConcurrency)
|
||||
|
@ -78,6 +78,9 @@ func (a *Analysis) textOutput() ([]byte, error) {
|
||||
color.YellowString(result.Name), color.CyanString(result.ParentObject)))
|
||||
for _, err := range result.Error {
|
||||
output.WriteString(fmt.Sprintf("- %s %s\n", color.RedString("Error:"), color.RedString(err.Text)))
|
||||
if err.KubernetesDoc != "" {
|
||||
output.WriteString(fmt.Sprintf(" %s %s\n", color.RedString("Kubernetes Doc:"), color.RedString(err.KubernetesDoc)))
|
||||
}
|
||||
}
|
||||
output.WriteString(color.GreenString(result.Details + "\n"))
|
||||
}
|
||||
|
@ -18,9 +18,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
cron "github.com/robfig/cron/v3"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
type CronJobAnalyzer struct{}
|
||||
@ -28,6 +30,14 @@ type CronJobAnalyzer struct{}
|
||||
func (analyzer CronJobAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
|
||||
kind := "CronJob"
|
||||
apiDoc := kubernetes.K8sApiReference{
|
||||
Kind: kind,
|
||||
ApiVersion: schema.GroupVersion{
|
||||
Group: "batch",
|
||||
Version: "v1",
|
||||
},
|
||||
OpenapiSchema: a.OpenapiSchema,
|
||||
}
|
||||
|
||||
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
|
||||
"analyzer_name": kind,
|
||||
@ -43,8 +53,11 @@ func (analyzer CronJobAnalyzer) Analyze(a common.Analyzer) ([]common.Result, err
|
||||
for _, cronJob := range cronJobList.Items {
|
||||
var failures []common.Failure
|
||||
if cronJob.Spec.Suspend != nil && *cronJob.Spec.Suspend {
|
||||
doc := apiDoc.GetApiDocV2("spec.suspend")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("CronJob %s is suspended", cronJob.Name),
|
||||
Text: fmt.Sprintf("CronJob %s is suspended", cronJob.Name),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: cronJob.Namespace,
|
||||
@ -59,8 +72,11 @@ func (analyzer CronJobAnalyzer) Analyze(a common.Analyzer) ([]common.Result, err
|
||||
} else {
|
||||
// check the schedule format
|
||||
if _, err := CheckCronScheduleIsValid(cronJob.Spec.Schedule); err != nil {
|
||||
doc := apiDoc.GetApiDocV2("spec.schedule")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("CronJob %s has an invalid schedule: %s", cronJob.Name, err.Error()),
|
||||
Text: fmt.Sprintf("CronJob %s has an invalid schedule: %s", cronJob.Name, err.Error()),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: cronJob.Namespace,
|
||||
@ -78,9 +94,11 @@ func (analyzer CronJobAnalyzer) Analyze(a common.Analyzer) ([]common.Result, err
|
||||
if cronJob.Spec.StartingDeadlineSeconds != nil {
|
||||
deadline := time.Duration(*cronJob.Spec.StartingDeadlineSeconds) * time.Second
|
||||
if deadline < 0 {
|
||||
doc := apiDoc.GetApiDocV2("spec.startingDeadlineSeconds")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("CronJob %s has a negative starting deadline", cronJob.Name),
|
||||
Text: fmt.Sprintf("CronJob %s has a negative starting deadline", cronJob.Name),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: cronJob.Namespace,
|
||||
|
@ -18,8 +18,10 @@ import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
)
|
||||
|
||||
@ -31,6 +33,14 @@ type DeploymentAnalyzer struct {
|
||||
func (d DeploymentAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
|
||||
kind := "Deployment"
|
||||
apiDoc := kubernetes.K8sApiReference{
|
||||
Kind: kind,
|
||||
ApiVersion: schema.GroupVersion{
|
||||
Group: "apps",
|
||||
Version: "v1",
|
||||
},
|
||||
OpenapiSchema: a.OpenapiSchema,
|
||||
}
|
||||
|
||||
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
|
||||
"analyzer_name": kind,
|
||||
@ -45,8 +55,11 @@ func (d DeploymentAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error)
|
||||
for _, deployment := range deployments.Items {
|
||||
var failures []common.Failure
|
||||
if *deployment.Spec.Replicas != deployment.Status.Replicas {
|
||||
doc := apiDoc.GetApiDocV2("spec.replicas")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Deployment %s/%s has %d replicas but %d are available", deployment.Namespace, deployment.Name, *deployment.Spec.Replicas, deployment.Status.Replicas),
|
||||
Text: fmt.Sprintf("Deployment %s/%s has %d replicas but %d are available", deployment.Namespace, deployment.Name, *deployment.Spec.Replicas, deployment.Status.Replicas),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: deployment.Namespace,
|
||||
|
@ -17,10 +17,12 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
type HpaAnalyzer struct{}
|
||||
@ -28,6 +30,14 @@ type HpaAnalyzer struct{}
|
||||
func (HpaAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
|
||||
kind := "HorizontalPodAutoscaler"
|
||||
apiDoc := kubernetes.K8sApiReference{
|
||||
Kind: kind,
|
||||
ApiVersion: schema.GroupVersion{
|
||||
Group: "autoscaling",
|
||||
Version: "v1",
|
||||
},
|
||||
OpenapiSchema: a.OpenapiSchema,
|
||||
}
|
||||
|
||||
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
|
||||
"analyzer_name": kind,
|
||||
@ -76,8 +86,11 @@ func (HpaAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
}
|
||||
|
||||
if podInfo == nil {
|
||||
doc := apiDoc.GetApiDocV2("spec.scaleTargetRef")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("HorizontalPodAutoscaler uses %s/%s as ScaleTargetRef which does not exist.", scaleTargetRef.Kind, scaleTargetRef.Name),
|
||||
Text: fmt.Sprintf("HorizontalPodAutoscaler uses %s/%s as ScaleTargetRef which does not exist.", scaleTargetRef.Kind, scaleTargetRef.Name),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: scaleTargetRef.Name,
|
||||
@ -94,8 +107,11 @@ func (HpaAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
}
|
||||
|
||||
if containers <= 0 {
|
||||
doc := apiDoc.GetApiDocV2("spec.scaleTargetRef.kind")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("%s %s/%s does not have resource configured.", scaleTargetRef.Kind, a.Namespace, scaleTargetRef.Name),
|
||||
Text: fmt.Sprintf("%s %s/%s does not have resource configured.", scaleTargetRef.Kind, a.Namespace, scaleTargetRef.Name),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: scaleTargetRef.Name,
|
||||
|
@ -17,8 +17,10 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
type IngressAnalyzer struct{}
|
||||
@ -26,6 +28,14 @@ type IngressAnalyzer struct{}
|
||||
func (IngressAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
|
||||
kind := "Ingress"
|
||||
apiDoc := kubernetes.K8sApiReference{
|
||||
Kind: kind,
|
||||
ApiVersion: schema.GroupVersion{
|
||||
Group: "networking",
|
||||
Version: "v1",
|
||||
},
|
||||
OpenapiSchema: a.OpenapiSchema,
|
||||
}
|
||||
|
||||
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
|
||||
"analyzer_name": kind,
|
||||
@ -46,8 +56,11 @@ func (IngressAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
if ingressClassName == nil {
|
||||
ingClassValue := ing.Annotations["kubernetes.io/ingress.class"]
|
||||
if ingClassValue == "" {
|
||||
doc := apiDoc.GetApiDocV2("spec.ingressClassName")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Ingress %s/%s does not specify an Ingress class.", ing.Namespace, ing.Name),
|
||||
Text: fmt.Sprintf("Ingress %s/%s does not specify an Ingress class.", ing.Namespace, ing.Name),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: ing.Namespace,
|
||||
@ -68,8 +81,11 @@ func (IngressAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
if ingressClassName != nil {
|
||||
_, err := a.Client.GetClient().NetworkingV1().IngressClasses().Get(a.Context, *ingressClassName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
doc := apiDoc.GetApiDocV2("spec.ingressClassName")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Ingress uses the ingress class %s which does not exist.", *ingressClassName),
|
||||
Text: fmt.Sprintf("Ingress uses the ingress class %s which does not exist.", *ingressClassName),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: *ingressClassName,
|
||||
@ -86,8 +102,11 @@ func (IngressAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
for _, path := range rule.HTTP.Paths {
|
||||
_, err := a.Client.GetClient().CoreV1().Services(ing.Namespace).Get(a.Context, path.Backend.Service.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
doc := apiDoc.GetApiDocV2("spec.rules.http.paths.backend.service")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Ingress uses the service %s/%s which does not exist.", ing.Namespace, path.Backend.Service.Name),
|
||||
Text: fmt.Sprintf("Ingress uses the service %s/%s which does not exist.", ing.Namespace, path.Backend.Service.Name),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: ing.Namespace,
|
||||
@ -106,8 +125,11 @@ func (IngressAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
for _, tls := range ing.Spec.TLS {
|
||||
_, err := a.Client.GetClient().CoreV1().Secrets(ing.Namespace).Get(a.Context, tls.SecretName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
doc := apiDoc.GetApiDocV2("spec.tls.secretName")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Ingress uses the secret %s/%s as a TLS certificate which does not exist.", ing.Namespace, tls.SecretName),
|
||||
Text: fmt.Sprintf("Ingress uses the secret %s/%s as a TLS certificate which does not exist.", ing.Namespace, tls.SecretName),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: ing.Namespace,
|
||||
|
@ -17,8 +17,10 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
type NetworkPolicyAnalyzer struct{}
|
||||
@ -26,6 +28,14 @@ type NetworkPolicyAnalyzer struct{}
|
||||
func (NetworkPolicyAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
|
||||
kind := "NetworkPolicy"
|
||||
apiDoc := kubernetes.K8sApiReference{
|
||||
Kind: kind,
|
||||
ApiVersion: schema.GroupVersion{
|
||||
Group: "networking",
|
||||
Version: "v1",
|
||||
},
|
||||
OpenapiSchema: a.OpenapiSchema,
|
||||
}
|
||||
|
||||
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
|
||||
"analyzer_name": kind,
|
||||
@ -45,8 +55,11 @@ func (NetworkPolicyAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error)
|
||||
|
||||
// Check if policy allows traffic to all pods in the namespace
|
||||
if len(policy.Spec.PodSelector.MatchLabels) == 0 {
|
||||
doc := apiDoc.GetApiDocV2("spec.podSelector.matchLabels")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Network policy allows traffic to all pods: %s", policy.Name),
|
||||
Text: fmt.Sprintf("Network policy allows traffic to all pods: %s", policy.Name),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: policy.Name,
|
||||
|
@ -17,8 +17,10 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
type PdbAnalyzer struct{}
|
||||
@ -26,6 +28,14 @@ type PdbAnalyzer struct{}
|
||||
func (PdbAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
|
||||
kind := "PodDisruptionBudget"
|
||||
apiDoc := kubernetes.K8sApiReference{
|
||||
Kind: kind,
|
||||
ApiVersion: schema.GroupVersion{
|
||||
Group: "policy",
|
||||
Version: "v1",
|
||||
},
|
||||
OpenapiSchema: a.OpenapiSchema,
|
||||
}
|
||||
|
||||
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
|
||||
"analyzer_name": kind,
|
||||
@ -49,8 +59,11 @@ func (PdbAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
if evt.Reason == "NoPods" && evt.Message != "" {
|
||||
if pdb.Spec.Selector != nil {
|
||||
for k, v := range pdb.Spec.Selector.MatchLabels {
|
||||
doc := apiDoc.GetApiDocV2("spec.selector.matchLabels")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("%s, expected label %s=%s", evt.Message, k, v),
|
||||
Text: fmt.Sprintf("%s, expected label %s=%s", evt.Message, k, v),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: k,
|
||||
@ -64,15 +77,21 @@ func (PdbAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
})
|
||||
}
|
||||
for _, v := range pdb.Spec.Selector.MatchExpressions {
|
||||
doc := apiDoc.GetApiDocV2("spec.selector.matchExpressions")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("%s, expected expression %s", evt.Message, v),
|
||||
Sensitive: []common.Sensitive{},
|
||||
Text: fmt.Sprintf("%s, expected expression %s", evt.Message, v),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
} else {
|
||||
doc := apiDoc.GetApiDocV2("spec.selector")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("%s, selector is nil", evt.Message),
|
||||
Sensitive: []common.Sensitive{},
|
||||
Text: fmt.Sprintf("%s, selector is nil", evt.Message),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -18,8 +18,10 @@ import (
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
type ServiceAnalyzer struct{}
|
||||
@ -27,6 +29,14 @@ type ServiceAnalyzer struct{}
|
||||
func (ServiceAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
|
||||
kind := "Service"
|
||||
apiDoc := kubernetes.K8sApiReference{
|
||||
Kind: kind,
|
||||
ApiVersion: schema.GroupVersion{
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
},
|
||||
OpenapiSchema: a.OpenapiSchema,
|
||||
}
|
||||
|
||||
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
|
||||
"analyzer_name": kind,
|
||||
@ -52,8 +62,11 @@ func (ServiceAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
}
|
||||
|
||||
for k, v := range svc.Spec.Selector {
|
||||
doc := apiDoc.GetApiDocV2("spec.selector")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Service has no endpoints, expected label %s=%s", k, v),
|
||||
Text: fmt.Sprintf("Service has no endpoints, expected label %s=%s", k, v),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: k,
|
||||
@ -72,14 +85,20 @@ func (ServiceAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
|
||||
// Check through container status to check for crashes
|
||||
for _, epSubset := range ep.Subsets {
|
||||
apiDoc.Kind = "Endpoints"
|
||||
|
||||
if len(epSubset.NotReadyAddresses) > 0 {
|
||||
for _, addresses := range epSubset.NotReadyAddresses {
|
||||
count++
|
||||
pods = append(pods, addresses.TargetRef.Kind+"/"+addresses.TargetRef.Name)
|
||||
}
|
||||
|
||||
doc := apiDoc.GetApiDocV2("subsets.notReadyAddresses")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("Service has not ready endpoints, pods: %s, expected %d", pods, count),
|
||||
Sensitive: []common.Sensitive{},
|
||||
Text: fmt.Sprintf("Service has not ready endpoints, pods: %s, expected %d", pods, count),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -17,8 +17,10 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
type StatefulSetAnalyzer struct{}
|
||||
@ -26,6 +28,14 @@ type StatefulSetAnalyzer struct{}
|
||||
func (StatefulSetAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
|
||||
kind := "StatefulSet"
|
||||
apiDoc := kubernetes.K8sApiReference{
|
||||
Kind: kind,
|
||||
ApiVersion: schema.GroupVersion{
|
||||
Group: "apps",
|
||||
Version: "v1",
|
||||
},
|
||||
OpenapiSchema: a.OpenapiSchema,
|
||||
}
|
||||
|
||||
AnalyzerErrorsMetric.DeletePartialMatch(map[string]string{
|
||||
"analyzer_name": kind,
|
||||
@ -44,8 +54,15 @@ func (StatefulSetAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
|
||||
serviceName := sts.Spec.ServiceName
|
||||
_, err := a.Client.GetClient().CoreV1().Services(sts.Namespace).Get(a.Context, serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
doc := apiDoc.GetApiDocV2("spec.serviceName")
|
||||
|
||||
failures = append(failures, common.Failure{
|
||||
Text: fmt.Sprintf("StatefulSet uses the service %s/%s which does not exist.", sts.Namespace, serviceName),
|
||||
Text: fmt.Sprintf(
|
||||
"StatefulSet uses the service %s/%s which does not exist.",
|
||||
sts.Namespace,
|
||||
serviceName,
|
||||
),
|
||||
KubernetesDoc: doc,
|
||||
Sensitive: []common.Sensitive{
|
||||
{
|
||||
Unmasked: sts.Namespace,
|
||||
|
@ -17,6 +17,7 @@ import (
|
||||
"context"
|
||||
|
||||
trivy "github.com/aquasecurity/trivy-operator/pkg/apis/aquasecurity/v1alpha1"
|
||||
openapi_v2 "github.com/google/gnostic/openapiv2"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
@ -31,12 +32,13 @@ type IAnalyzer interface {
|
||||
}
|
||||
|
||||
type Analyzer struct {
|
||||
Client *kubernetes.Client
|
||||
Context context.Context
|
||||
Namespace string
|
||||
AIClient ai.IAI
|
||||
PreAnalysis map[string]PreAnalysis
|
||||
Results []Result
|
||||
Client *kubernetes.Client
|
||||
Context context.Context
|
||||
Namespace string
|
||||
AIClient ai.IAI
|
||||
PreAnalysis map[string]PreAnalysis
|
||||
Results []Result
|
||||
OpenapiSchema *openapi_v2.Document
|
||||
}
|
||||
|
||||
type PreAnalysis struct {
|
||||
@ -65,8 +67,9 @@ type Result struct {
|
||||
}
|
||||
|
||||
type Failure struct {
|
||||
Text string
|
||||
Sensitive []Sensitive
|
||||
Text string
|
||||
KubernetesDoc string
|
||||
Sensitive []Sensitive
|
||||
}
|
||||
|
||||
type Sensitive struct {
|
||||
|
70
pkg/kubernetes/apireference.go
Normal file
70
pkg/kubernetes/apireference.go
Normal file
@ -0,0 +1,70 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
openapi_v2 "github.com/google/gnostic/openapiv2"
|
||||
)
|
||||
|
||||
func (k *K8sApiReference) GetApiDocV2(field string) string {
|
||||
startPoint := ""
|
||||
// the path must be formated like "path1.path2.path3"
|
||||
paths := strings.Split(field, ".")
|
||||
group := strings.Split(k.ApiVersion.Group, ".")
|
||||
definitions := k.OpenapiSchema.GetDefinitions().GetAdditionalProperties()
|
||||
|
||||
// extract the startpoint by searching the highest leaf corresponding to the requested group qnd kind
|
||||
for _, prop := range definitions {
|
||||
if strings.HasSuffix(prop.GetName(), fmt.Sprintf("%s.%s.%s", group[0], k.ApiVersion.Version, k.Kind)) {
|
||||
startPoint = prop.GetName()
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// recursively parse the definitions to find the description of the latest part of the given path
|
||||
description := k.recursePath(definitions, startPoint, paths)
|
||||
|
||||
return description
|
||||
}
|
||||
|
||||
func (k *K8sApiReference) recursePath(definitions []*openapi_v2.NamedSchema, leaf string, paths []string) string {
|
||||
description := ""
|
||||
|
||||
for _, prop := range definitions {
|
||||
// search the requested leaf
|
||||
if prop.GetName() == leaf {
|
||||
for _, addProp := range prop.GetValue().GetProperties().GetAdditionalProperties() {
|
||||
// search the additional property of the leaf corresponding the current path
|
||||
if addProp.GetName() == paths[0] {
|
||||
// the last path or the path is string, we get the description and we go out
|
||||
if len(paths) == 1 || addProp.GetValue().GetType().String() == "value:\"string\"" {
|
||||
// extract the path description as we are at the end of the paths
|
||||
description = addProp.GetValue().Description
|
||||
} else {
|
||||
// the path is an object, we extract the xref
|
||||
if addProp.GetValue().GetXRef() != "" {
|
||||
splitRef := strings.Split(addProp.GetValue().GetXRef(), "/")
|
||||
reducedPaths := paths[1:]
|
||||
description = k.recursePath(definitions, splitRef[len(splitRef)-1], reducedPaths)
|
||||
}
|
||||
|
||||
// the path is an array, we take the first xref from the items
|
||||
if len(addProp.GetValue().GetItems().GetSchema()) == 1 {
|
||||
splitRef := strings.Split(addProp.GetValue().GetItems().GetSchema()[0].GetXRef(), "/")
|
||||
reducedPaths := paths[1:]
|
||||
description = k.recursePath(definitions, splitRef[len(splitRef)-1], reducedPaths)
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return description
|
||||
}
|
@ -22,12 +22,6 @@ import (
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
Client kubernetes.Interface
|
||||
RestClient rest.Interface
|
||||
Config *rest.Config
|
||||
}
|
||||
|
||||
func (c *Client) GetConfig() *rest.Config {
|
||||
return c.Config
|
||||
}
|
||||
@ -74,9 +68,15 @@ func NewClient(kubecontext string, kubeconfig string) (*Client, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serverVersion, err := clientSet.ServerVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Client{
|
||||
Client: clientSet,
|
||||
RestClient: restClient,
|
||||
Config: config,
|
||||
Client: clientSet,
|
||||
RestClient: restClient,
|
||||
Config: config,
|
||||
ServerVersion: serverVersion,
|
||||
}, nil
|
||||
}
|
||||
|
22
pkg/kubernetes/types.go
Normal file
22
pkg/kubernetes/types.go
Normal file
@ -0,0 +1,22 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
openapi_v2 "github.com/google/gnostic/openapiv2"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
Client kubernetes.Interface
|
||||
RestClient rest.Interface
|
||||
Config *rest.Config
|
||||
ServerVersion *version.Info
|
||||
}
|
||||
|
||||
type K8sApiReference struct {
|
||||
ApiVersion schema.GroupVersion
|
||||
Kind string
|
||||
OpenapiSchema *openapi_v2.Document
|
||||
}
|
@ -32,6 +32,7 @@ func (h *handler) Analyze(ctx context.Context, i *schemav1.AnalyzeRequest) (
|
||||
i.Nocache,
|
||||
i.Explain,
|
||||
int(i.MaxConcurrency),
|
||||
false, // Kubernetes Doc disabled in server mode
|
||||
)
|
||||
if err != nil {
|
||||
return &schemav1.AnalyzeResponse{}, err
|
||||
|
Loading…
Reference in New Issue
Block a user