feat: dump (#1322)

* feat: reverting the cncf runners

Signed-off-by: AlexsJones <alexsimonjones@gmail.com>

* feat: creating a dump file for debugging

Signed-off-by: AlexsJones <alexsimonjones@gmail.com>

* chore: ran the linter

Signed-off-by: AlexsJones <alexsimonjones@gmail.com>

* chore: updated

Signed-off-by: AlexsJones <alexsimonjones@gmail.com>

* chore: updated

Signed-off-by: AlexsJones <alexsimonjones@gmail.com>

* chore: improved the function readme

Signed-off-by: AlexsJones <alexsimonjones@gmail.com>

* feat: added k8sgpt version info

Signed-off-by: AlexsJones <alexsimonjones@gmail.com>

* feat: added k8sgpt version info

Signed-off-by: AlexsJones <alexsimonjones@gmail.com>

* chore: added additional command

Signed-off-by: AlexsJones <alexsimonjones@gmail.com>

---------

Signed-off-by: AlexsJones <alexsimonjones@gmail.com>
This commit is contained in:
Alex Jones 2024-11-12 12:04:01 +00:00 committed by GitHub
parent 896a53be83
commit da266b3c82
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 185 additions and 66 deletions

View File

@ -335,6 +335,13 @@ The stats mode allows for debugging and understanding the time taken by an analy
- Analyzer Service took 38.583359166s
```
_Diagnostic information_
To collect diagnostic information use the following command to create a `dump_<timestamp>_json` in your local directory.
```
k8sgpt dump
```
</details>
## LLM AI Backends

113
cmd/dump/dump.go Normal file
View File

@ -0,0 +1,113 @@
/*
Copyright 2023 The K8sGPT Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dump
import (
"encoding/json"
"fmt"
"net/http"
"os"
"time"
"github.com/fatih/color"
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"k8s.io/apimachinery/pkg/version"
)
type K8sGPTInfo struct {
Version string
Commit string
Date string
}
type DumpOut struct {
AIConfiguration ai.AIConfiguration
ActiveFilters []string
KubenetesServerVersion *version.Info
K8sGPTInfo K8sGPTInfo
}
var DumpCmd = &cobra.Command{
Use: "dump",
Short: "Creates a dumpfile for debugging issues with K8sGPT",
Long: `The dump command will create a dump.*.json which will contain K8sGPT non-sensitive configuration information.`,
Run: func(cmd *cobra.Command, args []string) {
// Fetch the configuration object(s)
// get ai configuration
var configAI ai.AIConfiguration
err := viper.UnmarshalKey("ai", &configAI)
if err != nil {
color.Red("Error: %v", err)
os.Exit(1)
}
var newProvider []ai.AIProvider
for _, config := range configAI.Providers {
// we blank out the custom headers for data protection reasons
config.CustomHeaders = make([]http.Header, 0)
// blank out the password
if len(config.Password) > 4 {
config.Password = config.Password[:4] + "***"
} else {
// If the password is shorter than 4 characters
config.Password = "***"
}
newProvider = append(newProvider, config)
}
configAI.Providers = newProvider
activeFilters := viper.GetStringSlice("active_filters")
kubecontext := viper.GetString("kubecontext")
kubeconfig := viper.GetString("kubeconfig")
client, err := kubernetes.NewClient(kubecontext, kubeconfig)
if err != nil {
color.Red("Error: %v", err)
os.Exit(1)
}
v, err := client.Client.Discovery().ServerVersion()
if err != nil {
color.Yellow("Could not find kubernetes server version")
}
var dumpOut DumpOut = DumpOut{
AIConfiguration: configAI,
ActiveFilters: activeFilters,
KubenetesServerVersion: v,
K8sGPTInfo: K8sGPTInfo{
Version: viper.GetString("Version"),
Commit: viper.GetString("Commit"),
Date: viper.GetString("Date"),
},
}
// Serialize dumpOut to JSON
jsonData, err := json.MarshalIndent(dumpOut, "", " ")
if err != nil {
color.Red("Error: %v", err)
os.Exit(1)
}
// Write JSON data to file
f := fmt.Sprintf("dump_%s.json", time.Now().Format("20060102150405"))
err = os.WriteFile(f, jsonData, 0644)
if err != nil {
color.Red("Error: %v", err)
os.Exit(1)
}
color.Green("Dump created successfully: %s", f)
},
}
func init() {
}

View File

@ -23,6 +23,7 @@ import (
"github.com/k8sgpt-ai/k8sgpt/cmd/auth"
"github.com/k8sgpt-ai/k8sgpt/cmd/cache"
customanalyzer "github.com/k8sgpt-ai/k8sgpt/cmd/customAnalyzer"
"github.com/k8sgpt-ai/k8sgpt/cmd/dump"
"github.com/k8sgpt-ai/k8sgpt/cmd/filters"
"github.com/k8sgpt-ai/k8sgpt/cmd/generate"
"github.com/k8sgpt-ai/k8sgpt/cmd/integration"
@ -57,6 +58,9 @@ func Execute(v string, c string, d string) {
Version = v
Commit = c
Date = d
viper.Set("Version", Version)
viper.Set("Commit", Commit)
viper.Set("Date", Date)
err := rootCmd.Execute()
if err != nil {
os.Exit(1)
@ -70,6 +74,7 @@ func init() {
rootCmd.AddCommand(auth.AuthCmd)
rootCmd.AddCommand(analyze.AnalyzeCmd)
rootCmd.AddCommand(dump.DumpCmd)
rootCmd.AddCommand(filters.FiltersCmd)
rootCmd.AddCommand(generate.GenerateCmd)
rootCmd.AddCommand(integration.IntegrationCmd)

View File

@ -20,8 +20,8 @@ type AmazonBedRockClient struct {
client *bedrockruntime.BedrockRuntime
model string
temperature float32
topP float32
maxTokens int
topP float32
maxTokens int
}
// Amazon BedRock support region list US East (N. Virginia),US West (Oregon),Asia Pacific (Singapore),Asia Pacific (Tokyo),Europe (Frankfurt)
@ -48,9 +48,9 @@ const (
ModelAnthropicClaudeV2 = "anthropic.claude-v2"
ModelAnthropicClaudeV1 = "anthropic.claude-v1"
ModelAnthropicClaudeInstantV1 = "anthropic.claude-instant-v1"
ModelA21J2UltraV1 = "ai21.j2-ultra-v1"
ModelA21J2JumboInstruct = "ai21.j2-jumbo-instruct"
ModelAmazonTitanExpressV1 = "amazon.titan-text-express-v1"
ModelA21J2UltraV1 = "ai21.j2-ultra-v1"
ModelA21J2JumboInstruct = "ai21.j2-jumbo-instruct"
ModelAmazonTitanExpressV1 = "amazon.titan-text-express-v1"
)
var BEDROCK_MODELS = []string{
@ -125,34 +125,33 @@ func (a *AmazonBedRockClient) GetCompletion(ctx context.Context, prompt string)
// Prepare the input data for the model invocation based on the model & the Response Body per model as well.
var request map[string]interface{}
switch a.model {
case ModelAnthropicClaudeV2, ModelAnthropicClaudeV1, ModelAnthropicClaudeInstantV1:
request = map[string]interface{}{
"prompt": fmt.Sprintf("\n\nHuman: %s \n\nAssistant:", prompt),
"max_tokens_to_sample": a.maxTokens,
"temperature": a.temperature,
"top_p": a.topP,
}
case ModelAnthropicClaudeV2, ModelAnthropicClaudeV1, ModelAnthropicClaudeInstantV1:
request = map[string]interface{}{
"prompt": fmt.Sprintf("\n\nHuman: %s \n\nAssistant:", prompt),
"max_tokens_to_sample": a.maxTokens,
"temperature": a.temperature,
"top_p": a.topP,
}
case ModelA21J2UltraV1, ModelA21J2JumboInstruct:
request = map[string]interface{}{
"prompt": prompt,
"maxTokens": a.maxTokens,
"temperature": a.temperature,
"topP": a.topP,
}
request = map[string]interface{}{
"prompt": prompt,
"maxTokens": a.maxTokens,
"temperature": a.temperature,
"topP": a.topP,
}
case ModelAmazonTitanExpressV1:
request = map[string]interface{}{
"inputText": fmt.Sprintf("\n\nUser: %s", prompt),
"textGenerationConfig": map[string]interface{}{
"maxTokenCount": a.maxTokens,
"temperature": a.temperature,
"topP": a.topP,
},
request = map[string]interface{}{
"inputText": fmt.Sprintf("\n\nUser: %s", prompt),
"textGenerationConfig": map[string]interface{}{
"maxTokenCount": a.maxTokens,
"temperature": a.temperature,
"topP": a.topP,
},
}
default:
return "", fmt.Errorf("model %s not supported", a.model)
return "", fmt.Errorf("model %s not supported", a.model)
}
body, err := json.Marshal(request)
if err != nil {
return "", err
@ -171,8 +170,8 @@ func (a *AmazonBedRockClient) GetCompletion(ctx context.Context, prompt string)
if err != nil {
return "", err
}
// Response type changes as per model
// Response type changes as per model
switch a.model {
case ModelAnthropicClaudeV2, ModelAnthropicClaudeV1, ModelAnthropicClaudeInstantV1:
type InvokeModelResponseBody struct {
@ -184,8 +183,8 @@ func (a *AmazonBedRockClient) GetCompletion(ctx context.Context, prompt string)
if err != nil {
return "", err
}
return output.Completion, nil
case ModelA21J2UltraV1, ModelA21J2JumboInstruct:
return output.Completion, nil
case ModelA21J2UltraV1, ModelA21J2JumboInstruct:
type Data struct {
Text string `json:"text"`
}
@ -194,33 +193,34 @@ func (a *AmazonBedRockClient) GetCompletion(ctx context.Context, prompt string)
}
type InvokeModelResponseBody struct {
Completions []Completion `json:"completions"`
}
}
output := &InvokeModelResponseBody{}
err = json.Unmarshal(resp.Body, output)
if err != nil {
return "", err
}
return output.Completions[0].Data.Text, nil
case ModelAmazonTitanExpressV1:
return output.Completions[0].Data.Text, nil
case ModelAmazonTitanExpressV1:
type Result struct {
TokenCount int `json:"tokenCount"`
OutputText string `json:"outputText"`
CompletionReason string `json:"completionReason"`
}
type InvokeModelResponseBody struct {
type InvokeModelResponseBody struct {
InputTextTokenCount int `json:"inputTextTokenCount"`
Results []Result `json:"results"`
}
}
output := &InvokeModelResponseBody{}
err = json.Unmarshal(resp.Body, output)
if err != nil {
return "", err
}
return output.Results[0].OutputText, nil
return output.Results[0].OutputText, nil
default:
return "", fmt.Errorf("model %s not supported", a.model)
}
return "", fmt.Errorf("model %s not supported", a.model)
}
}
// GetName returns the name of the AmazonBedRockClient.
func (a *AmazonBedRockClient) GetName() string {
return amazonbedrockAIClientName

View File

@ -14,9 +14,9 @@ const azureAIClientName = "azureopenai"
type AzureAIClient struct {
nopCloser
client *openai.Client
model string
temperature float32
client *openai.Client
model string
temperature float32
// organizationId string
}

View File

@ -27,10 +27,10 @@ const openAIClientName = "openai"
type OpenAIClient struct {
nopCloser
client *openai.Client
model string
temperature float32
topP float32
client *openai.Client
model string
temperature float32
topP float32
// organizationId string
}

View File

@ -53,7 +53,6 @@ func (HpaAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
for _, hpa := range list.Items {
var failures []common.Failure
//check the error from status field
conditions := hpa.Status.Conditions
for _, condition := range conditions {

View File

@ -566,7 +566,6 @@ func TestHPAAnalyzerLabelSelectorFiltering(t *testing.T) {
assert.Equal(t, len(analysisResults), 1)
}
func TestHPAAnalyzerStatusFieldAbleToScale(t *testing.T) {
clientset := fake.NewSimpleClientset(
&autoscalingv2.HorizontalPodAutoscaler{
@ -584,8 +583,8 @@ func TestHPAAnalyzerStatusFieldAbleToScale(t *testing.T) {
Status: autoscalingv2.HorizontalPodAutoscalerStatus{
Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{
{
Type: "AbleToScale",
Status: "False",
Type: "AbleToScale",
Status: "False",
Message: "test reason",
},
},
@ -607,7 +606,6 @@ func TestHPAAnalyzerStatusFieldAbleToScale(t *testing.T) {
}
func TestHPAAnalyzerStatusFieldScalingActive(t *testing.T) {
clientset := fake.NewSimpleClientset(
&autoscalingv2.HorizontalPodAutoscaler{
@ -625,8 +623,8 @@ func TestHPAAnalyzerStatusFieldScalingActive(t *testing.T) {
Status: autoscalingv2.HorizontalPodAutoscalerStatus{
Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{
{
Type: autoscalingv2.ScalingActive,
Status: "False",
Type: autoscalingv2.ScalingActive,
Status: "False",
Message: "test reason",
},
},
@ -648,8 +646,6 @@ func TestHPAAnalyzerStatusFieldScalingActive(t *testing.T) {
}
func TestHPAAnalyzerStatusFieldScalingLimited(t *testing.T) {
clientset := fake.NewSimpleClientset(
&autoscalingv2.HorizontalPodAutoscaler{
@ -667,8 +663,8 @@ func TestHPAAnalyzerStatusFieldScalingLimited(t *testing.T) {
Status: autoscalingv2.HorizontalPodAutoscalerStatus{
Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{
{
Type: autoscalingv2.ScalingLimited,
Status: "False",
Type: autoscalingv2.ScalingLimited,
Status: "False",
Message: "test reason",
},
},
@ -690,7 +686,6 @@ func TestHPAAnalyzerStatusFieldScalingLimited(t *testing.T) {
}
func TestHPAAnalyzerStatusField(t *testing.T) {
clientset := fake.NewSimpleClientset(
&autoscalingv2.HorizontalPodAutoscaler{
@ -708,18 +703,18 @@ func TestHPAAnalyzerStatusField(t *testing.T) {
Status: autoscalingv2.HorizontalPodAutoscalerStatus{
Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{
{
Type: autoscalingv2.AbleToScale,
Status: "True",
Type: autoscalingv2.AbleToScale,
Status: "True",
Message: "recommended size matches current size",
},
{
Type: autoscalingv2.ScalingActive,
Status: "True",
Type: autoscalingv2.ScalingActive,
Status: "True",
Message: "the HPA was able to successfully calculate a replica count",
},
{
Type: autoscalingv2.ScalingLimited,
Status: "True",
Type: autoscalingv2.ScalingLimited,
Status: "True",
Message: "the desired replica count is less than the minimum replica count",
},
},
@ -739,4 +734,4 @@ func TestHPAAnalyzerStatusField(t *testing.T) {
}
assert.Equal(t, len(analysisResults), 1)
}
}

View File

@ -384,7 +384,7 @@ func TestStatefulSetAnalyzerUnavailableReplicaWithPodInitialized(t *testing.T) {
if err != nil {
t.Error(err)
}
var errorFound bool
want := "Statefulset pod example-1 in the namespace default is not in running state."