mirror of
https://github.com/k8sgpt-ai/k8sgpt.git
synced 2026-01-24 06:01:30 +00:00
feat: add MCP support (#1471)
* feat: first mcp impl Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * chore: update Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * chore: wip Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * chore: switcheed to stdio transport Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * chore: readme Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * feat: fix the linter 🤖 Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * feat: fix the linter 🤖 Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * feat(mcp): implement MCP server and handler - Implement MCP server and handler - Add MCP server to serve - Add MCP handler to handle MCP requests - Add MCP server to serve - Add MCP handler to handle MCP requests Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * feat: consolidating code duplication Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * feat: added http sse support Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * chore: fixed broken tests Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * chore: updated and fixed linter Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * chore: updated and fixed linter Signed-off-by: Alex Jones <alexsimonjones@gmail.com> * chore: updated the linter issues Signed-off-by: Alex Jones <alexsimonjones@gmail.com> --------- Signed-off-by: Alex Jones <alexsimonjones@gmail.com>
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@@ -7,3 +7,4 @@ k8sgpt*
|
||||
dist/
|
||||
|
||||
bin/
|
||||
pkg/server/example/example
|
||||
70
README.md
70
README.md
@@ -165,6 +165,76 @@ _This mode of operation is ideal for continuous monitoring of your cluster and c
|
||||
- And use `k8sgpt analyze --explain` to get a more detailed explanation of the issues.
|
||||
- You also run `k8sgpt analyze --with-doc` (with or without the explain flag) to get the official documentation from Kubernetes.
|
||||
|
||||
# Using with Claude Desktop
|
||||
|
||||
K8sGPT can be integrated with Claude Desktop to provide AI-powered Kubernetes cluster analysis. This integration requires K8sGPT v0.4.14 or later.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Install K8sGPT v0.4.14 or later:
|
||||
```sh
|
||||
brew install k8sgpt
|
||||
```
|
||||
|
||||
2. Install Claude Desktop from the official website
|
||||
|
||||
3. Configure K8sGPT with your preferred AI backend:
|
||||
```sh
|
||||
k8sgpt auth
|
||||
```
|
||||
|
||||
## Setup
|
||||
|
||||
1. Start the K8sGPT MCP server:
|
||||
```sh
|
||||
k8sgpt serve --mcp
|
||||
```
|
||||
|
||||
2. In Claude Desktop:
|
||||
- Open Settings
|
||||
- Navigate to the Integrations section
|
||||
- Add K8sGPT as a new integration
|
||||
- The MCP server will be automatically detected
|
||||
|
||||
3. Configure Claude Desktop with the following JSON:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"k8sgpt": {
|
||||
"command": "k8sgpt",
|
||||
"args": [
|
||||
"serve",
|
||||
"--mcp"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Once connected, you can use Claude Desktop to:
|
||||
- Analyze your Kubernetes cluster
|
||||
- Get detailed insights about cluster health
|
||||
- Receive recommendations for fixing issues
|
||||
- Query cluster information
|
||||
|
||||
Example commands in Claude Desktop:
|
||||
- "Analyze my Kubernetes cluster"
|
||||
- "What's the health status of my cluster?"
|
||||
- "Show me any issues in the default namespace"
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter connection issues:
|
||||
1. Ensure K8sGPT is running with the MCP server enabled
|
||||
2. Verify your Kubernetes cluster is accessible
|
||||
3. Check that your AI backend is properly configured
|
||||
4. Restart both K8sGPT and Claude Desktop
|
||||
|
||||
For more information, visit our [documentation](https://docs.k8sgpt.ai).
|
||||
|
||||
## Analyzers
|
||||
|
||||
K8sGPT uses analyzers to triage and diagnose issues in your cluster. It has a set of analyzers that are built in, but
|
||||
|
||||
@@ -38,6 +38,9 @@ var (
|
||||
metricsPort string
|
||||
backend string
|
||||
enableHttp bool
|
||||
enableMCP bool
|
||||
mcpPort string
|
||||
mcpHTTP bool
|
||||
)
|
||||
|
||||
var ServeCmd = &cobra.Command{
|
||||
@@ -183,6 +186,21 @@ var ServeCmd = &cobra.Command{
|
||||
}
|
||||
}()
|
||||
|
||||
if enableMCP {
|
||||
// Create and start MCP server
|
||||
mcpServer, err := k8sgptserver.NewMCPServer(mcpPort, aiProvider, mcpHTTP, logger)
|
||||
if err != nil {
|
||||
color.Red("Error creating MCP server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
go func() {
|
||||
if err := mcpServer.Start(); err != nil {
|
||||
color.Red("Error starting MCP server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
server := k8sgptserver.Config{
|
||||
Backend: aiProvider.Name,
|
||||
Port: port,
|
||||
@@ -216,4 +234,7 @@ func init() {
|
||||
ServeCmd.Flags().StringVarP(&metricsPort, "metrics-port", "", "8081", "Port to run the metrics-server on")
|
||||
ServeCmd.Flags().StringVarP(&backend, "backend", "b", "openai", "Backend AI provider")
|
||||
ServeCmd.Flags().BoolVarP(&enableHttp, "http", "", false, "Enable REST/http using gppc-gateway")
|
||||
ServeCmd.Flags().BoolVarP(&enableMCP, "mcp", "", false, "Enable Mission Control Protocol server")
|
||||
ServeCmd.Flags().StringVarP(&mcpPort, "mcp-port", "", "8089", "Port to run the MCP server on")
|
||||
ServeCmd.Flags().BoolVarP(&mcpHTTP, "mcp-http", "", false, "Enable HTTP mode for MCP server")
|
||||
}
|
||||
|
||||
10
go.mod
10
go.mod
@@ -40,9 +40,11 @@ require (
|
||||
github.com/cohere-ai/cohere-go/v2 v2.12.2
|
||||
github.com/go-logr/zapr v1.3.0
|
||||
github.com/google/generative-ai-go v0.19.0
|
||||
github.com/google/martian v2.1.0+incompatible
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1
|
||||
github.com/hupe1980/go-huggingface v0.0.15
|
||||
github.com/kyverno/policy-reporter-kyverno-plugin v1.6.4
|
||||
github.com/metoro-io/mcp-golang v0.11.0
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/oracle/oci-go-sdk/v65 v65.79.0
|
||||
github.com/prometheus/prometheus v0.302.1
|
||||
@@ -78,7 +80,9 @@ require (
|
||||
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.3 // indirect
|
||||
github.com/aws/smithy-go v1.22.0 // indirect
|
||||
github.com/bahlo/generic-list-go v0.2.0 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/buger/jsonparser v1.1.1 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect
|
||||
github.com/containerd/console v1.0.4 // indirect
|
||||
@@ -105,6 +109,7 @@ require (
|
||||
github.com/gookit/color v1.5.4 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
|
||||
github.com/invopop/jsonschema v0.12.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jpillora/backoff v1.0.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
@@ -122,6 +127,11 @@ require (
|
||||
github.com/sony/gobreaker v0.5.0 // indirect
|
||||
github.com/sourcegraph/conc v0.3.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/tidwall/gjson v1.18.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
github.com/tidwall/sjson v1.2.5 // indirect
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
|
||||
24
go.sum
24
go.sum
@@ -741,6 +741,8 @@ github.com/aws/aws-sdk-go-v2 v1.32.3 h1:T0dRlFBKcdaUPGNtkBSwHZxrtis8CQU17UpNBZYd
|
||||
github.com/aws/aws-sdk-go-v2 v1.32.3/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo=
|
||||
github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM=
|
||||
github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
|
||||
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
|
||||
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
@@ -751,6 +753,7 @@ github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl
|
||||
github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70=
|
||||
github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
|
||||
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
|
||||
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
|
||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng=
|
||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
||||
@@ -870,8 +873,6 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0
|
||||
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
|
||||
github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI=
|
||||
github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||
github.com/expr-lang/expr v1.17.2 h1:o0A99O/Px+/DTjEnQiodAgOIK9PPxL8DtXhBRKC+Iso=
|
||||
github.com/expr-lang/expr v1.17.2/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
@@ -940,9 +941,6 @@ github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeH
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
@@ -1142,6 +1140,8 @@ github.com/imdario/mergo v1.0.1 h1:lFIgOs30GMaV/2+qQ+eEBLbUL6h1YosdohE3ODy4hTs=
|
||||
github.com/imdario/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI=
|
||||
github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.3.2 h1:2mUmrZZz6cPyT9IRX0T8fBLc/7XU/eTxP2Y5tS7/09k=
|
||||
github.com/ionos-cloud/sdk-go/v6 v6.3.2/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
@@ -1230,6 +1230,8 @@ github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/metoro-io/mcp-golang v0.11.0 h1:1k+VSE9QaeMTLn0gJ3FgE/DcjsCBsLFnz5eSFbgXUiI=
|
||||
github.com/metoro-io/mcp-golang v0.11.0/go.mod h1:ifLP9ZzKpN1UqFWNTpAHOqSvNkMK6b7d1FSZ5Lu0lN0=
|
||||
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
|
||||
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
|
||||
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
|
||||
@@ -1433,8 +1435,20 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
|
||||
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
|
||||
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
||||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
|
||||
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
|
||||
github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs=
|
||||
github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc=
|
||||
github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw=
|
||||
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
|
||||
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
|
||||
@@ -323,7 +323,16 @@ func (a *Analysis) RunAnalysis() {
|
||||
OpenapiSchema: openapiSchema,
|
||||
}
|
||||
|
||||
semaphore := make(chan struct{}, a.MaxConcurrency)
|
||||
// Set a reasonable maximum for concurrency to prevent excessive memory allocation
|
||||
const maxAllowedConcurrency = 100
|
||||
concurrency := a.MaxConcurrency
|
||||
if concurrency <= 0 {
|
||||
concurrency = 10 // Default value if not set
|
||||
} else if concurrency > maxAllowedConcurrency {
|
||||
concurrency = maxAllowedConcurrency // Cap at a reasonable maximum
|
||||
}
|
||||
|
||||
semaphore := make(chan struct{}, concurrency)
|
||||
var wg sync.WaitGroup
|
||||
var mutex sync.Mutex
|
||||
// if there are no filters selected and no active_filters then run coreAnalyzer
|
||||
|
||||
@@ -15,116 +15,45 @@ package analyzer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func TestIngressAnalyzer(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
config common.Analyzer
|
||||
expectations []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}
|
||||
// Create test cases
|
||||
testCases := []struct {
|
||||
name string
|
||||
ingress *networkingv1.Ingress
|
||||
expectedIssues []string
|
||||
}{
|
||||
{
|
||||
name: "Missing ingress class",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "no-class",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
// No ingress class specified
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/no-class",
|
||||
failuresCount: 1, // One failure for missing ingress class
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Non-existent ingress class",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-class",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: strPtr("non-existent"),
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/bad-class",
|
||||
failuresCount: 1, // One failure for non-existent ingress class
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Non-existent backend service",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-backend",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": "nginx",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "example.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: pathTypePtr(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "non-existent-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
ingress: &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-ingress",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "example.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "non-existent-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -133,177 +62,144 @@ func TestIngressAnalyzer(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/bad-backend",
|
||||
failuresCount: 2, // Two failures: non-existent ingress class and non-existent service
|
||||
},
|
||||
expectedIssues: []string{
|
||||
"Ingress default/test-ingress does not specify an Ingress class.",
|
||||
"Ingress uses the service default/non-existent-service which does not exist.",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Non-existent TLS secret",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad-tls",
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
"kubernetes.io/ingress.class": "nginx",
|
||||
},
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{
|
||||
Hosts: []string{"example.com"},
|
||||
SecretName: "non-existent-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
ingress: &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-ingress-tls",
|
||||
Namespace: "default",
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/bad-tls",
|
||||
failuresCount: 2, // Two failures: non-existent ingress class and non-existent TLS secret
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Valid ingress with all components",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "valid-ingress",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: strPtr("nginx"),
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{
|
||||
Hosts: []string{"example.com"},
|
||||
SecretName: "non-existent-secret",
|
||||
},
|
||||
&networkingv1.IngressClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nginx",
|
||||
},
|
||||
},
|
||||
&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "backend-service",
|
||||
Namespace: "default",
|
||||
},
|
||||
},
|
||||
&v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "tls-secret",
|
||||
Namespace: "default",
|
||||
},
|
||||
Type: v1.SecretTypeTLS,
|
||||
},
|
||||
),
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
// No expectations for valid ingress
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple issues",
|
||||
config: common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: fake.NewSimpleClientset(
|
||||
&networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "multiple-issues",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: strPtr("non-existent"),
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "example.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
PathType: pathTypePtr(networkingv1.PathTypePrefix),
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "non-existent-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "example.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "test-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{
|
||||
Hosts: []string{"example.com"},
|
||||
SecretName: "non-existent-secret",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedIssues: []string{
|
||||
"Ingress default/test-ingress-tls does not specify an Ingress class.",
|
||||
"Ingress uses the service default/test-service which does not exist.",
|
||||
"Ingress uses the secret default/non-existent-secret as a TLS certificate which does not exist.",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Multiple issues",
|
||||
ingress: &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-ingress-multi",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
TLS: []networkingv1.IngressTLS{
|
||||
{
|
||||
Hosts: []string{"example.com"},
|
||||
SecretName: "non-existent-secret",
|
||||
},
|
||||
},
|
||||
Rules: []networkingv1.IngressRule{
|
||||
{
|
||||
Host: "example.com",
|
||||
IngressRuleValue: networkingv1.IngressRuleValue{
|
||||
HTTP: &networkingv1.HTTPIngressRuleValue{
|
||||
Paths: []networkingv1.HTTPIngressPath{
|
||||
{
|
||||
Path: "/",
|
||||
Backend: networkingv1.IngressBackend{
|
||||
Service: &networkingv1.IngressServiceBackend{
|
||||
Name: "non-existent-service",
|
||||
Port: networkingv1.ServiceBackendPort{
|
||||
Number: 80,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
Context: context.Background(),
|
||||
Namespace: "default",
|
||||
},
|
||||
expectations: []struct {
|
||||
name string
|
||||
failuresCount int
|
||||
}{
|
||||
{
|
||||
name: "default/multiple-issues",
|
||||
failuresCount: 3, // Three failures: ingress class, service, and TLS secret
|
||||
},
|
||||
expectedIssues: []string{
|
||||
"Ingress default/test-ingress-multi does not specify an Ingress class.",
|
||||
"Ingress uses the service default/non-existent-service which does not exist.",
|
||||
"Ingress uses the secret default/non-existent-secret as a TLS certificate which does not exist.",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Run test cases
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create a new context and clientset for each test case
|
||||
ctx := context.Background()
|
||||
clientset := fake.NewSimpleClientset()
|
||||
|
||||
// Create the ingress in the fake clientset
|
||||
_, err := clientset.NetworkingV1().Ingresses(tc.ingress.Namespace).Create(ctx, tc.ingress, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create the analyzer configuration
|
||||
config := common.Analyzer{
|
||||
Client: &kubernetes.Client{
|
||||
Client: clientset,
|
||||
},
|
||||
Context: ctx,
|
||||
Namespace: tc.ingress.Namespace,
|
||||
}
|
||||
|
||||
// Create the analyzer and run analysis
|
||||
analyzer := IngressAnalyzer{}
|
||||
results, err := analyzer.Analyze(tt.config)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, results, len(tt.expectations))
|
||||
results, err := analyzer.Analyze(config)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Sort results by name for consistent comparison
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].Name < results[j].Name
|
||||
})
|
||||
// Check that we got the expected number of issues
|
||||
assert.Len(t, results, 1, "Expected 1 result")
|
||||
result := results[0]
|
||||
assert.Len(t, result.Error, len(tc.expectedIssues), "Expected %d issues, got %d", len(tc.expectedIssues), len(result.Error))
|
||||
|
||||
for i, expectation := range tt.expectations {
|
||||
require.Equal(t, expectation.name, results[i].Name)
|
||||
require.Len(t, results[i].Error, expectation.failuresCount)
|
||||
// Check that each expected issue is present
|
||||
for _, expectedIssue := range tc.expectedIssues {
|
||||
found := false
|
||||
for _, failure := range result.Error {
|
||||
if failure.Text == expectedIssue {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
assert.True(t, found, "Expected to find issue: %s", expectedIssue)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
53
pkg/cache/interplex_based.go
vendored
53
pkg/cache/interplex_based.go
vendored
@@ -1,12 +1,15 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
rpc "buf.build/gen/go/interplex-ai/schemas/grpc/go/protobuf/schema/v1/schemav1grpc"
|
||||
schemav1 "buf.build/gen/go/interplex-ai/schemas/protocolbuffers/go/protobuf/schema/v1"
|
||||
"context"
|
||||
"errors"
|
||||
"google.golang.org/grpc"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
rpc "buf.build/gen/go/interplex-ai/schemas/grpc/go/protobuf/schema/v1/schemav1grpc"
|
||||
schemav1 "buf.build/gen/go/interplex-ai/schemas/protocolbuffers/go/protobuf/schema/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
)
|
||||
|
||||
var _ ICache = (*InterplexCache)(nil)
|
||||
@@ -59,6 +62,10 @@ func (c *InterplexCache) Store(key string, data string) error {
|
||||
}
|
||||
|
||||
func (c *InterplexCache) Load(key string) (string, error) {
|
||||
if os.Getenv("INTERPLEX_LOCAL_MODE") != "" {
|
||||
c.configuration.ConnectionString = "localhost:8084"
|
||||
}
|
||||
|
||||
conn, err := grpc.NewClient(c.configuration.ConnectionString, grpc.WithInsecure(), grpc.WithBlock())
|
||||
defer conn.Close()
|
||||
if err != nil {
|
||||
@@ -70,36 +77,52 @@ func (c *InterplexCache) Load(key string) (string, error) {
|
||||
Key: key,
|
||||
}
|
||||
resp, err := c.cacheServiceClient.Get(context.Background(), &req)
|
||||
// check if response is cache error not found
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return resp.Value, nil
|
||||
}
|
||||
|
||||
func (InterplexCache) List() ([]CacheObjectDetails, error) {
|
||||
//TODO implement me
|
||||
return nil, errors.New("not implemented")
|
||||
func (c *InterplexCache) List() ([]CacheObjectDetails, error) {
|
||||
// Not implemented for Interplex cache
|
||||
return []CacheObjectDetails{}, nil
|
||||
}
|
||||
|
||||
func (InterplexCache) Remove(key string) error {
|
||||
func (c *InterplexCache) Remove(key string) error {
|
||||
if os.Getenv("INTERPLEX_LOCAL_MODE") != "" {
|
||||
c.configuration.ConnectionString = "localhost:8084"
|
||||
}
|
||||
|
||||
return errors.New("not implemented")
|
||||
conn, err := grpc.NewClient(c.configuration.ConnectionString, grpc.WithTransportCredentials(insecure.NewCredentials()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := conn.Close(); err != nil {
|
||||
// Log the error but don't return it since this is a deferred function
|
||||
fmt.Printf("Error closing connection: %v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
serviceClient := rpc.NewCacheServiceClient(conn)
|
||||
c.cacheServiceClient = serviceClient
|
||||
req := schemav1.DeleteRequest{
|
||||
Key: key,
|
||||
}
|
||||
_, err = c.cacheServiceClient.Delete(context.Background(), &req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *InterplexCache) Exists(key string) bool {
|
||||
if _, err := c.Load(key); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
_, err := c.Load(key)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func (c *InterplexCache) IsCacheDisabled() bool {
|
||||
return c.noCache
|
||||
}
|
||||
|
||||
func (InterplexCache) GetName() string {
|
||||
//TODO implement me
|
||||
func (c *InterplexCache) GetName() string {
|
||||
return "interplex"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,30 +1,49 @@
|
||||
# serve
|
||||
# K8sGPT MCP Server
|
||||
|
||||
The serve commands allow you to run k8sgpt in a grpc server mode.
|
||||
This would be enabled typically through `k8sgpt serve` and is how the in-cluster k8sgpt deployment functions when managed by the [k8sgpt-operator](https://github.com/k8sgpt-ai/k8sgpt-operator)
|
||||
This directory contains the implementation of the Mission Control Protocol (MCP) server for K8sGPT. The MCP server allows K8sGPT to be integrated with other tools that support the MCP protocol.
|
||||
|
||||
The grpc interface that is served is hosted on [buf](https://buf.build/k8sgpt-ai/schemas) and the repository for this is [here](https://github.com/k8sgpt-ai/schemas)
|
||||
## Components
|
||||
|
||||
## grpcurl
|
||||
- `mcp.go`: The main MCP server implementation
|
||||
- `server.go`: The HTTP server implementation
|
||||
- `tools.go`: Tool definitions for the MCP server
|
||||
|
||||
A fantastic tool for local debugging and development is `grpcurl`
|
||||
It allows you to form curl like requests that are http2
|
||||
e.g.
|
||||
## Features
|
||||
|
||||
```
|
||||
grpcurl -plaintext -d '{"namespace": "k8sgpt", "explain" : "true"}' localhost:8080 schema.v1.ServiceAnalyzeService/Analyze
|
||||
```
|
||||
The MCP server provides the following features:
|
||||
|
||||
```
|
||||
grpcurl -plaintext localhost:8080 schema.v1.ServiceConfigService/ListIntegrations
|
||||
{
|
||||
"integrations": [
|
||||
"prometheus"
|
||||
]
|
||||
1. **Analyze Kubernetes Resources**: Analyze Kubernetes resources in a cluster
|
||||
2. **Get Cluster Information**: Retrieve information about the Kubernetes cluster
|
||||
|
||||
## Usage
|
||||
|
||||
To use the MCP server, you need to:
|
||||
|
||||
1. Initialize the MCP server with a Kubernetes client
|
||||
2. Start the server
|
||||
3. Connect to the server using an MCP client
|
||||
|
||||
Example:
|
||||
|
||||
```go
|
||||
client, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create Kubernetes client: %v", err)
|
||||
}
|
||||
|
||||
mcpServer := server.NewMCPServer(client)
|
||||
if err := mcpServer.Start(); err != nil {
|
||||
log.Fatalf("Failed to start MCP server: %v", err)
|
||||
}
|
||||
```
|
||||
|
||||
```
|
||||
grpcurl -plaintext -d '{"integrations":{"prometheus":{"enabled":"true","namespace":"default","skipInstall":"false"}}}' localhost:8080 schema.v1.ServiceConfigService/AddConfig
|
||||
```
|
||||
## Integration
|
||||
|
||||
The MCP server can be integrated with other tools that support the MCP protocol, such as:
|
||||
|
||||
- Mission Control
|
||||
- Other MCP-compatible tools
|
||||
|
||||
## License
|
||||
|
||||
This code is licensed under the Apache License 2.0.
|
||||
|
||||
60
pkg/server/client_example/README.md
Normal file
60
pkg/server/client_example/README.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# K8sGPT MCP Client Example
|
||||
|
||||
This directory contains an example of how to use the K8sGPT MCP client in a real-world scenario.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- Go 1.16 or later
|
||||
- Access to a Kubernetes cluster
|
||||
- `kubectl` configured to access your cluster
|
||||
|
||||
## Building the Example
|
||||
|
||||
To build the example, run:
|
||||
|
||||
```bash
|
||||
go build -o mcp-client-example
|
||||
```
|
||||
|
||||
## Running the Example
|
||||
|
||||
To run the example, use the following command:
|
||||
|
||||
```bash
|
||||
./mcp-client-example --kubeconfig=/path/to/kubeconfig --namespace=default
|
||||
```
|
||||
|
||||
### Command-line Flags
|
||||
|
||||
- `--kubeconfig`: Path to the kubeconfig file (optional, defaults to the standard location)
|
||||
- `--namespace`: Kubernetes namespace to analyze (optional)
|
||||
|
||||
## Example Output
|
||||
|
||||
When you run the example, you should see output similar to the following:
|
||||
|
||||
```
|
||||
Starting MCP client...
|
||||
```
|
||||
|
||||
The client will continue running until you press Ctrl+C to stop it.
|
||||
|
||||
## Integration with Mission Control
|
||||
|
||||
To integrate this example with Mission Control, you need to:
|
||||
|
||||
1. Start the MCP client using the example
|
||||
2. Configure Mission Control to connect to the MCP client
|
||||
3. Use Mission Control to analyze your Kubernetes cluster
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter any issues, check the following:
|
||||
|
||||
1. Ensure that your Kubernetes cluster is accessible
|
||||
2. Verify that your kubeconfig file is valid
|
||||
3. Check that the namespace you specified exists
|
||||
|
||||
## License
|
||||
|
||||
This code is licensed under the Apache License 2.0.
|
||||
114
pkg/server/client_example/main.go
Normal file
114
pkg/server/client_example/main.go
Normal file
@@ -0,0 +1,114 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AnalyzeRequest represents the input parameters for the analyze tool
|
||||
type AnalyzeRequest struct {
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
Backend string `json:"backend,omitempty"`
|
||||
Language string `json:"language,omitempty"`
|
||||
Filters []string `json:"filters,omitempty"`
|
||||
LabelSelector string `json:"labelSelector,omitempty"`
|
||||
NoCache bool `json:"noCache,omitempty"`
|
||||
Explain bool `json:"explain,omitempty"`
|
||||
MaxConcurrency int `json:"maxConcurrency,omitempty"`
|
||||
WithDoc bool `json:"withDoc,omitempty"`
|
||||
InteractiveMode bool `json:"interactiveMode,omitempty"`
|
||||
CustomHeaders []string `json:"customHeaders,omitempty"`
|
||||
WithStats bool `json:"withStats,omitempty"`
|
||||
}
|
||||
|
||||
// AnalyzeResponse represents the output of the analyze tool
|
||||
type AnalyzeResponse struct {
|
||||
Content []struct {
|
||||
Text string `json:"text"`
|
||||
Type string `json:"type"`
|
||||
} `json:"content"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Parse command line flags
|
||||
serverPort := flag.String("port", "8089", "Port of the MCP server")
|
||||
namespace := flag.String("namespace", "", "Kubernetes namespace to analyze")
|
||||
backend := flag.String("backend", "", "AI backend to use")
|
||||
language := flag.String("language", "english", "Language for analysis")
|
||||
flag.Parse()
|
||||
|
||||
// Create analyze request
|
||||
req := AnalyzeRequest{
|
||||
Namespace: *namespace,
|
||||
Backend: *backend,
|
||||
Language: *language,
|
||||
Explain: true,
|
||||
MaxConcurrency: 10,
|
||||
}
|
||||
|
||||
// Convert request to JSON
|
||||
reqJSON, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to marshal request: %v", err)
|
||||
}
|
||||
|
||||
// Create HTTP client with timeout
|
||||
client := &http.Client{
|
||||
Timeout: 5 * time.Minute,
|
||||
}
|
||||
|
||||
// Send request to MCP server
|
||||
resp, err := client.Post(
|
||||
fmt.Sprintf("http://localhost:%s/mcp/analyze", *serverPort),
|
||||
"application/json",
|
||||
bytes.NewBuffer(reqJSON),
|
||||
)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to send request: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
log.Printf("Error closing response body: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Read and print raw response for debugging
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read response body: %v", err)
|
||||
}
|
||||
fmt.Printf("Raw response: %s\n", string(body))
|
||||
|
||||
// Parse response
|
||||
var analyzeResp AnalyzeResponse
|
||||
if err := json.Unmarshal(body, &analyzeResp); err != nil {
|
||||
log.Fatalf("Failed to decode response: %v", err)
|
||||
}
|
||||
|
||||
// Print results
|
||||
fmt.Println("Analysis Results:")
|
||||
if len(analyzeResp.Content) > 0 {
|
||||
fmt.Println(analyzeResp.Content[0].Text)
|
||||
} else {
|
||||
fmt.Println("No results returned")
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
schemav1 "buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go/schema/v1"
|
||||
"context"
|
||||
|
||||
schemav1 "buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go/schema/v1"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/custom"
|
||||
"github.com/spf13/viper"
|
||||
@@ -20,19 +21,13 @@ const (
|
||||
notUsedInsecure = false
|
||||
)
|
||||
|
||||
func (h *Handler) AddConfig(ctx context.Context, i *schemav1.AddConfigRequest) (*schemav1.AddConfigResponse, error,
|
||||
) {
|
||||
|
||||
resp, err := h.syncIntegration(ctx, i)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// ApplyConfig applies the configuration changes from the request
|
||||
func (h *Handler) ApplyConfig(ctx context.Context, i *schemav1.AddConfigRequest) error {
|
||||
if i.CustomAnalyzers != nil {
|
||||
// We need to add the custom analyzers to the viper config and save them
|
||||
var customAnalyzers = make([]custom.CustomAnalyzer, 0)
|
||||
if err := viper.UnmarshalKey("custom_analyzers", &customAnalyzers); err != nil {
|
||||
return resp, err
|
||||
return err
|
||||
} else {
|
||||
// If there are analyzers are already in the config we will append the ones with new names
|
||||
for _, ca := range i.CustomAnalyzers {
|
||||
@@ -56,7 +51,7 @@ func (h *Handler) AddConfig(ctx context.Context, i *schemav1.AddConfigRequest) (
|
||||
// save the config
|
||||
viper.Set("custom_analyzers", customAnalyzers)
|
||||
if err := viper.WriteConfig(); err != nil {
|
||||
return resp, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -74,18 +69,30 @@ func (h *Handler) AddConfig(ctx context.Context, i *schemav1.AddConfigRequest) (
|
||||
case *schemav1.Cache_InterplexCache:
|
||||
remoteCache, err = cache.NewCacheProvider("interplex", notUsedBucket, notUsedRegion, i.Cache.GetInterplexCache().Endpoint, notUsedStorageAcc, notUsedContainerName, notUsedProjectId, notUsedInsecure)
|
||||
default:
|
||||
return resp, status.Error(codes.InvalidArgument, "Invalid cache configuration")
|
||||
return status.Error(codes.InvalidArgument, "Invalid cache configuration")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return resp, err
|
||||
return err
|
||||
}
|
||||
err = cache.AddRemoteCache(remoteCache)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *Handler) AddConfig(ctx context.Context, i *schemav1.AddConfigRequest) (*schemav1.AddConfigResponse, error) {
|
||||
resp, err := h.syncIntegration(ctx, i)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
if err := h.ApplyConfig(ctx, i); err != nil {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -1,18 +1,15 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
schemav1 "buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go/schema/v1"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
schemav1 "buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go/schema/v1"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/integration"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
//const (
|
||||
// trivyName = "trivy"
|
||||
//)
|
||||
|
||||
// syncIntegration is aware of the following events
|
||||
// A new integration added
|
||||
// An integration removed from the Integration block
|
||||
|
||||
74
pkg/server/example/main.go
Normal file
74
pkg/server/example/main.go
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/server"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Parse command line flags
|
||||
port := flag.String("port", "8089", "Port to run the MCP server on")
|
||||
useHTTP := flag.Bool("http", false, "Enable HTTP mode for MCP server")
|
||||
flag.Parse()
|
||||
|
||||
// Initialize zap logger
|
||||
logger, err := zap.NewProduction()
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating logger: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := logger.Sync(); err != nil {
|
||||
log.Printf("Error syncing logger: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Create AI provider
|
||||
aiProvider := &ai.AIProvider{
|
||||
Name: "openai",
|
||||
Password: os.Getenv("OPENAI_API_KEY"),
|
||||
Model: "gpt-3.5-turbo",
|
||||
}
|
||||
|
||||
// Create and start MCP server
|
||||
mcpServer, err := server.NewMCPServer(*port, aiProvider, *useHTTP, logger)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating MCP server: %v", err)
|
||||
}
|
||||
|
||||
// Start the server in a goroutine
|
||||
go func() {
|
||||
if err := mcpServer.Start(); err != nil {
|
||||
log.Fatalf("Error starting MCP server: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Handle graceful shutdown
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-sigChan
|
||||
|
||||
// Cleanup
|
||||
if err := mcpServer.Close(); err != nil {
|
||||
log.Printf("Error closing MCP server: %v", err)
|
||||
}
|
||||
}
|
||||
416
pkg/server/mcp.go
Normal file
416
pkg/server/mcp.go
Normal file
@@ -0,0 +1,416 @@
|
||||
/*
|
||||
Copyright 2024 The K8sGPT Authors.
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
schemav1 "buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go/schema/v1"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/analysis"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
|
||||
"github.com/k8sgpt-ai/k8sgpt/pkg/server/config"
|
||||
mcp_golang "github.com/metoro-io/mcp-golang"
|
||||
"github.com/metoro-io/mcp-golang/transport/stdio"
|
||||
"github.com/spf13/viper"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// MCPServer represents an MCP server for k8sgpt
|
||||
type MCPServer struct {
|
||||
server *mcp_golang.Server
|
||||
port string
|
||||
aiProvider *ai.AIProvider
|
||||
useHTTP bool
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// NewMCPServer creates a new MCP server
|
||||
func NewMCPServer(port string, aiProvider *ai.AIProvider, useHTTP bool, logger *zap.Logger) (*MCPServer, error) {
|
||||
// Create MCP server with stdio transport
|
||||
transport := stdio.NewStdioServerTransport()
|
||||
|
||||
server := mcp_golang.NewServer(transport)
|
||||
|
||||
return &MCPServer{
|
||||
server: server,
|
||||
port: port,
|
||||
aiProvider: aiProvider,
|
||||
useHTTP: useHTTP,
|
||||
logger: logger,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start starts the MCP server
|
||||
func (s *MCPServer) Start() error {
|
||||
if s.server == nil {
|
||||
return fmt.Errorf("server not initialized")
|
||||
}
|
||||
|
||||
// Register analyze tool
|
||||
if err := s.server.RegisterTool("analyze", "Analyze Kubernetes resources", s.handleAnalyze); err != nil {
|
||||
return fmt.Errorf("failed to register analyze tool: %v", err)
|
||||
}
|
||||
|
||||
// Register cluster info tool
|
||||
if err := s.server.RegisterTool("cluster-info", "Get Kubernetes cluster information", s.handleClusterInfo); err != nil {
|
||||
return fmt.Errorf("failed to register cluster-info tool: %v", err)
|
||||
}
|
||||
|
||||
// Register config tool
|
||||
if err := s.server.RegisterTool("config", "Configure K8sGPT settings", s.handleConfig); err != nil {
|
||||
return fmt.Errorf("failed to register config tool: %v", err)
|
||||
}
|
||||
|
||||
// Register resources
|
||||
if err := s.registerResources(); err != nil {
|
||||
return fmt.Errorf("failed to register resources: %v", err)
|
||||
}
|
||||
|
||||
// Register prompts
|
||||
if err := s.registerPrompts(); err != nil {
|
||||
return fmt.Errorf("failed to register prompts: %v", err)
|
||||
}
|
||||
|
||||
if s.useHTTP {
|
||||
// Start HTTP server
|
||||
go func() {
|
||||
http.HandleFunc("/mcp/analyze", s.handleAnalyzeHTTP)
|
||||
http.HandleFunc("/mcp", s.handleSSE)
|
||||
s.logger.Info("Starting MCP server on port", zap.String("port", s.port))
|
||||
if err := http.ListenAndServe(fmt.Sprintf(":%s", s.port), nil); err != nil {
|
||||
s.logger.Error("Error starting HTTP server", zap.Error(err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Start the server
|
||||
return s.server.Serve()
|
||||
}
|
||||
|
||||
// AnalyzeRequest represents the input parameters for the analyze tool
|
||||
type AnalyzeRequest struct {
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
Backend string `json:"backend,omitempty"`
|
||||
Language string `json:"language,omitempty"`
|
||||
Filters []string `json:"filters,omitempty"`
|
||||
LabelSelector string `json:"labelSelector,omitempty"`
|
||||
NoCache bool `json:"noCache,omitempty"`
|
||||
Explain bool `json:"explain,omitempty"`
|
||||
MaxConcurrency int `json:"maxConcurrency,omitempty"`
|
||||
WithDoc bool `json:"withDoc,omitempty"`
|
||||
InteractiveMode bool `json:"interactiveMode,omitempty"`
|
||||
CustomHeaders []string `json:"customHeaders,omitempty"`
|
||||
WithStats bool `json:"withStats,omitempty"`
|
||||
}
|
||||
|
||||
// AnalyzeResponse represents the output of the analyze tool
|
||||
type AnalyzeResponse struct {
|
||||
Results string `json:"results"`
|
||||
}
|
||||
|
||||
// ClusterInfoRequest represents the input parameters for the cluster-info tool
|
||||
type ClusterInfoRequest struct {
|
||||
// Empty struct as we don't need any input parameters
|
||||
}
|
||||
|
||||
// ClusterInfoResponse represents the output of the cluster-info tool
|
||||
type ClusterInfoResponse struct {
|
||||
Info string `json:"info"`
|
||||
}
|
||||
|
||||
// ConfigRequest represents the input parameters for the config tool
|
||||
type ConfigRequest struct {
|
||||
CustomAnalyzers []struct {
|
||||
Name string `json:"name"`
|
||||
Connection struct {
|
||||
Url string `json:"url"`
|
||||
Port int `json:"port"`
|
||||
} `json:"connection"`
|
||||
} `json:"customAnalyzers,omitempty"`
|
||||
Cache struct {
|
||||
Type string `json:"type"`
|
||||
// S3 specific fields
|
||||
BucketName string `json:"bucketName,omitempty"`
|
||||
Region string `json:"region,omitempty"`
|
||||
Endpoint string `json:"endpoint,omitempty"`
|
||||
Insecure bool `json:"insecure,omitempty"`
|
||||
// Azure specific fields
|
||||
StorageAccount string `json:"storageAccount,omitempty"`
|
||||
ContainerName string `json:"containerName,omitempty"`
|
||||
// GCS specific fields
|
||||
ProjectId string `json:"projectId,omitempty"`
|
||||
} `json:"cache,omitempty"`
|
||||
}
|
||||
|
||||
// ConfigResponse represents the output of the config tool
|
||||
type ConfigResponse struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// handleAnalyze handles the analyze tool
|
||||
func (s *MCPServer) handleAnalyze(ctx context.Context, request *AnalyzeRequest) (*mcp_golang.ToolResponse, error) {
|
||||
// Get stored configuration
|
||||
var configAI ai.AIConfiguration
|
||||
if err := viper.UnmarshalKey("ai", &configAI); err != nil {
|
||||
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(fmt.Sprintf("Failed to load AI configuration: %v", err))), nil
|
||||
}
|
||||
// Use stored configuration if not specified in request
|
||||
if request.Backend == "" {
|
||||
if configAI.DefaultProvider != "" {
|
||||
request.Backend = configAI.DefaultProvider
|
||||
} else if len(configAI.Providers) > 0 {
|
||||
request.Backend = configAI.Providers[0].Name
|
||||
} else {
|
||||
request.Backend = "openai" // fallback default
|
||||
}
|
||||
}
|
||||
|
||||
request.Explain = true
|
||||
// Get stored filters if not specified
|
||||
if len(request.Filters) == 0 {
|
||||
request.Filters = viper.GetStringSlice("active_filters")
|
||||
}
|
||||
|
||||
// Validate MaxConcurrency to prevent excessive memory allocation
|
||||
request.MaxConcurrency = validateMaxConcurrency(request.MaxConcurrency)
|
||||
|
||||
// Create a new analysis with the request parameters
|
||||
analysis, err := analysis.NewAnalysis(
|
||||
request.Backend,
|
||||
request.Language,
|
||||
request.Filters,
|
||||
request.Namespace,
|
||||
request.LabelSelector,
|
||||
request.NoCache,
|
||||
request.Explain,
|
||||
request.MaxConcurrency,
|
||||
request.WithDoc,
|
||||
request.InteractiveMode,
|
||||
request.CustomHeaders,
|
||||
request.WithStats,
|
||||
)
|
||||
if err != nil {
|
||||
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(fmt.Sprintf("Failed to create analysis: %v", err))), nil
|
||||
}
|
||||
defer analysis.Close()
|
||||
|
||||
// Run the analysis
|
||||
analysis.RunAnalysis()
|
||||
|
||||
// Get the output
|
||||
output, err := analysis.PrintOutput("json")
|
||||
if err != nil {
|
||||
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(fmt.Sprintf("Failed to print output: %v", err))), nil
|
||||
}
|
||||
|
||||
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(string(output))), nil
|
||||
}
|
||||
|
||||
// validateMaxConcurrency validates and bounds the MaxConcurrency parameter
|
||||
func validateMaxConcurrency(maxConcurrency int) int {
|
||||
const maxAllowedConcurrency = 100
|
||||
if maxConcurrency <= 0 {
|
||||
return 10 // Default value if not set
|
||||
} else if maxConcurrency > maxAllowedConcurrency {
|
||||
return maxAllowedConcurrency // Cap at a reasonable maximum
|
||||
}
|
||||
return maxConcurrency
|
||||
}
|
||||
|
||||
// handleClusterInfo handles the cluster-info tool
|
||||
func (s *MCPServer) handleClusterInfo(ctx context.Context, request *ClusterInfoRequest) (*mcp_golang.ToolResponse, error) {
|
||||
// Create a new Kubernetes client
|
||||
client, err := kubernetes.NewClient("", "")
|
||||
if err != nil {
|
||||
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(fmt.Sprintf("failed to create Kubernetes client: %v", err))), nil
|
||||
}
|
||||
|
||||
// Get cluster info from the client
|
||||
version, err := client.Client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(fmt.Sprintf("failed to get cluster version: %v", err))), nil
|
||||
}
|
||||
|
||||
info := fmt.Sprintf("Kubernetes %s", version.GitVersion)
|
||||
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(info)), nil
|
||||
}
|
||||
|
||||
// handleConfig handles the config tool
|
||||
func (s *MCPServer) handleConfig(ctx context.Context, request *ConfigRequest) (*mcp_golang.ToolResponse, error) {
|
||||
// Create a new config handler
|
||||
handler := &config.Handler{}
|
||||
|
||||
// Convert request to AddConfigRequest
|
||||
addConfigReq := &schemav1.AddConfigRequest{
|
||||
CustomAnalyzers: make([]*schemav1.CustomAnalyzer, 0),
|
||||
}
|
||||
|
||||
// Add custom analyzers if present
|
||||
if len(request.CustomAnalyzers) > 0 {
|
||||
for _, ca := range request.CustomAnalyzers {
|
||||
addConfigReq.CustomAnalyzers = append(addConfigReq.CustomAnalyzers, &schemav1.CustomAnalyzer{
|
||||
Name: ca.Name,
|
||||
Connection: &schemav1.Connection{
|
||||
Url: ca.Connection.Url,
|
||||
Port: fmt.Sprintf("%d", ca.Connection.Port),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Add cache configuration if present
|
||||
if request.Cache.Type != "" {
|
||||
cacheConfig := &schemav1.Cache{}
|
||||
switch request.Cache.Type {
|
||||
case "s3":
|
||||
cacheConfig.CacheType = &schemav1.Cache_S3Cache{
|
||||
S3Cache: &schemav1.S3Cache{
|
||||
BucketName: request.Cache.BucketName,
|
||||
Region: request.Cache.Region,
|
||||
Endpoint: request.Cache.Endpoint,
|
||||
Insecure: request.Cache.Insecure,
|
||||
},
|
||||
}
|
||||
case "azure":
|
||||
cacheConfig.CacheType = &schemav1.Cache_AzureCache{
|
||||
AzureCache: &schemav1.AzureCache{
|
||||
StorageAccount: request.Cache.StorageAccount,
|
||||
ContainerName: request.Cache.ContainerName,
|
||||
},
|
||||
}
|
||||
case "gcs":
|
||||
cacheConfig.CacheType = &schemav1.Cache_GcsCache{
|
||||
GcsCache: &schemav1.GCSCache{
|
||||
BucketName: request.Cache.BucketName,
|
||||
Region: request.Cache.Region,
|
||||
ProjectId: request.Cache.ProjectId,
|
||||
},
|
||||
}
|
||||
}
|
||||
addConfigReq.Cache = cacheConfig
|
||||
}
|
||||
|
||||
// Apply the configuration using the shared function
|
||||
if err := handler.ApplyConfig(ctx, addConfigReq); err != nil {
|
||||
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(fmt.Sprintf("Failed to add config: %v", err))), nil
|
||||
}
|
||||
|
||||
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent("Successfully added configuration")), nil
|
||||
}
|
||||
|
||||
// registerPrompts registers the prompts for the MCP server
|
||||
func (s *MCPServer) registerPrompts() error {
|
||||
// Register any prompts needed for the MCP server
|
||||
return nil
|
||||
}
|
||||
|
||||
// registerResources registers the resources for the MCP server
|
||||
func (s *MCPServer) registerResources() error {
|
||||
if err := s.server.RegisterResource("cluster-info", "Get cluster information", "Get information about the Kubernetes cluster", "text", s.getClusterInfo); err != nil {
|
||||
return fmt.Errorf("failed to register cluster-info resource: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MCPServer) getClusterInfo(ctx context.Context) (interface{}, error) {
|
||||
// Create a new Kubernetes client
|
||||
client, err := kubernetes.NewClient("", "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Kubernetes client: %v", err)
|
||||
}
|
||||
|
||||
// Get cluster info from the client
|
||||
version, err := client.Client.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get cluster version: %v", err)
|
||||
}
|
||||
|
||||
return map[string]string{
|
||||
"version": version.String(),
|
||||
"platform": version.Platform,
|
||||
"gitVersion": version.GitVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// handleSSE handles Server-Sent Events for MCP
|
||||
func (s *MCPServer) handleSSE(w http.ResponseWriter, r *http.Request) {
|
||||
// Set headers for SSE
|
||||
w.Header().Set("Content-Type", "text/event-stream")
|
||||
w.Header().Set("Cache-Control", "no-cache")
|
||||
w.Header().Set("Connection", "keep-alive")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
|
||||
// Create a channel to receive messages
|
||||
msgChan := make(chan string)
|
||||
defer close(msgChan)
|
||||
|
||||
// Start a goroutine to handle the stdio transport
|
||||
go func() {
|
||||
// TODO: Implement message handling between HTTP and stdio transport
|
||||
// This would require implementing a custom transport that bridges HTTP and stdio
|
||||
|
||||
}()
|
||||
|
||||
// Send messages to the client
|
||||
for msg := range msgChan {
|
||||
if _, err := fmt.Fprintf(w, "data: %s\n\n", msg); err != nil {
|
||||
s.logger.Error("Failed to write SSE message", zap.Error(err))
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
|
||||
// handleAnalyzeHTTP handles HTTP requests for the analyze endpoint
|
||||
func (s *MCPServer) handleAnalyzeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
|
||||
// Parse the request body
|
||||
var req AnalyzeRequest
|
||||
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to decode request: %v", err), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Validate MaxConcurrency to prevent excessive memory allocation
|
||||
req.MaxConcurrency = validateMaxConcurrency(req.MaxConcurrency)
|
||||
|
||||
// Call the analyze handler
|
||||
resp, err := s.handleAnalyze(r.Context(), &req)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("Failed to analyze: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
// Set response headers
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
// Write the response
|
||||
if err := json.NewEncoder(w).Encode(resp); err != nil {
|
||||
s.logger.Error("Failed to encode response", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the MCP server and releases resources
|
||||
func (s *MCPServer) Close() error {
|
||||
return nil
|
||||
}
|
||||
Reference in New Issue
Block a user