mirror of
https://github.com/k8sgpt-ai/k8sgpt.git
synced 2025-09-10 11:39:40 +00:00
feat: first version of serve
Signed-off-by: Thomas Schuetz <thomas.schuetz@t-sc.eu>
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/k8sgpt-ai/k8sgpt/cmd/serve"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
@@ -53,6 +54,7 @@ func init() {
|
||||
rootCmd.AddCommand(filters.FiltersCmd)
|
||||
rootCmd.AddCommand(generate.GenerateCmd)
|
||||
rootCmd.AddCommand(integration.IntegrationCmd)
|
||||
rootCmd.AddCommand(serve.ServeCmd)
|
||||
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.k8sgpt.yaml)")
|
||||
rootCmd.PersistentFlags().StringVar(&kubecontext, "kubecontext", "", "Kubernetes context to use. Only required if out-of-cluster.")
|
||||
rootCmd.PersistentFlags().StringVar(&kubeconfig, "kubeconfig", kubeconfigPath, "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
|
60
cmd/serve/serve.go
Normal file
60
cmd/serve/serve.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package serve
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/fatih/color"
|
||||
server2 "github.com/k8sgpt-ai/k8sgpt/pkg/server"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"os"
|
||||
)
|
||||
|
||||
var (
|
||||
port string
|
||||
backend string
|
||||
token string
|
||||
)
|
||||
|
||||
var ServeCmd = &cobra.Command{
|
||||
Use: "serve",
|
||||
Short: "Runs k8sgpt as a server",
|
||||
Long: `Runs k8sgpt as a server to allow for easy integration with other applications.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
|
||||
backendType := viper.GetString("backend_type")
|
||||
if backendType == "" {
|
||||
color.Red("No backend set. Please run k8sgpt auth")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if backend != "" {
|
||||
backendType = backend
|
||||
}
|
||||
|
||||
token := viper.GetString(fmt.Sprintf("%s_key", backendType))
|
||||
// check if nil
|
||||
if token == "" {
|
||||
color.Red("No %s key set. Please run k8sgpt auth", backendType)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
server := server2.K8sGPTServer{
|
||||
Backend: backend,
|
||||
Port: port,
|
||||
Token: token,
|
||||
}
|
||||
|
||||
err := server.Serve()
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
// override the default backend if a flag is provided
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
// add flag for backend
|
||||
ServeCmd.Flags().StringVarP(&port, "port", "p", "8080", "Port to run the server on")
|
||||
ServeCmd.Flags().StringVarP(&backend, "backend", "b", "openai", "Backend AI provider")
|
||||
}
|
118
pkg/server/main.go
Normal file
118
pkg/server/main.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/fatih/color"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/analyzer"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/schollz/progressbar/v3"
|
||||
"github.com/spf13/viper"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type K8sGPTServer struct {
|
||||
Port string
|
||||
Backend string
|
||||
Key string
|
||||
Token string
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
Analysis []analyzer.Analysis `json:"analysis"`
|
||||
}
|
||||
|
||||
func (s *K8sGPTServer) analyzeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
namespace := r.URL.Query().Get("namespace")
|
||||
ex := r.URL.Query().Get("explain")
|
||||
|
||||
explain := false
|
||||
|
||||
if ex == "true" {
|
||||
explain = true
|
||||
}
|
||||
|
||||
output := Result{}
|
||||
|
||||
var aiClient ai.IAI
|
||||
switch s.Backend {
|
||||
case "openai":
|
||||
aiClient = &ai.OpenAIClient{}
|
||||
if err := aiClient.Configure(s.Token, "english"); err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
default:
|
||||
color.Red("Backend not supported")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
// Get kubernetes client from viper
|
||||
client := viper.Get("kubernetesClient").(*kubernetes.Client)
|
||||
// Analysis configuration
|
||||
config := &analyzer.AnalysisConfiguration{
|
||||
Namespace: namespace,
|
||||
Explain: explain,
|
||||
}
|
||||
|
||||
var analysisResults *[]analyzer.Analysis = &[]analyzer.Analysis{}
|
||||
if err := analyzer.RunAnalysis(ctx, []string{}, config, client,
|
||||
aiClient, analysisResults); err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
}
|
||||
|
||||
fmt.Println(analysisResults)
|
||||
if len(*analysisResults) == 0 {
|
||||
fmt.Fprintf(w, "{ \"status\": \"OK\" }")
|
||||
}
|
||||
|
||||
var bar = progressbar.Default(int64(len(*analysisResults)))
|
||||
if !explain {
|
||||
bar.Clear()
|
||||
}
|
||||
var printOutput []analyzer.Analysis
|
||||
|
||||
for _, analysis := range *analysisResults {
|
||||
|
||||
if explain {
|
||||
parsedText, err := analyzer.ParseViaAI(ctx, config, aiClient, analysis.Error)
|
||||
if err != nil {
|
||||
// Check for exhaustion
|
||||
if strings.Contains(err.Error(), "status code: 429") {
|
||||
fmt.Fprintf(w, "Exhausted API quota. Please try again later")
|
||||
os.Exit(1)
|
||||
}
|
||||
color.Red("Error: %v", err)
|
||||
continue
|
||||
}
|
||||
analysis.Details = parsedText
|
||||
bar.Add(1)
|
||||
}
|
||||
printOutput = append(printOutput, analysis)
|
||||
|
||||
analysis.Error = analysis.Error[0:]
|
||||
output.Analysis = append(output.Analysis, analysis)
|
||||
}
|
||||
j, err := json.MarshalIndent(output, "", " ")
|
||||
if err != nil {
|
||||
color.Red("Error: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Fprintf(w, "%s", j)
|
||||
|
||||
}
|
||||
|
||||
func (s *K8sGPTServer) Serve() error {
|
||||
http.HandleFunc("/analyze", s.analyzeHandler)
|
||||
err := http.ListenAndServe(":"+s.Port, nil)
|
||||
if err != nil {
|
||||
fmt.Printf("error starting server: %s\n", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
Reference in New Issue
Block a user