1
0
mirror of https://github.com/k8sgpt-ai/k8sgpt.git synced 2025-05-10 17:16:06 +00:00

Compare commits

...

20 Commits

Author SHA1 Message Date
renovate[bot]
d0f03641ae
fix(deps): update module gopkg.in/yaml.v2 to v3 ()
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-05-09 15:30:11 +01:00
renovate[bot]
e76bdb0c23
chore(deps): update actions/setup-go digest to d35c59a ()
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-05-09 13:17:19 +01:00
typeid
cae94e7b6d
fix: panic in k8sgpt auth update ()
Signed-off-by: Claudio Busse <cbusse@redhat.com>
Co-authored-by: Alex Jones <alexsimonjones@gmail.com>
2025-05-09 13:02:37 +01:00
typeid
7e375a30be
fix: align documentation to reflect default analyzers properly ()
Signed-off-by: Claudio Busse <cbusse@redhat.com>
2025-05-09 12:58:29 +01:00
github-actions[bot]
34ff645fa0
chore(main): release 0.4.16 ()
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-05-06 19:24:25 +01:00
Naveen Thangaraj
61b60d5768
feat: enhancement of deployment analyzer ()
* Updated the deployment analyzer

Signed-off-by: naveenthangaraj03 <tnaveen3402@gmail.com>

* Enhanced the deployment analyzer

Signed-off-by: naveenthangaraj03 <tnaveen3402@gmail.com>

---------

Signed-off-by: naveenthangaraj03 <tnaveen3402@gmail.com>
Co-authored-by: Alex Jones <alexsimonjones@gmail.com>
2025-05-06 16:30:16 +01:00
renovate[bot]
6a81d2c140
fix(deps): update k8s.io/utils digest to 0f33e8f ()
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-05-06 11:30:48 +01:00
rkarthikr
21bc76e5b7
feat: add support for Amazon Bedrock Inference Profiles ()
Signed-off-by: rkarthikr <38294804+rkarthikr@users.noreply.github.com>
Co-authored-by: Alex Jones <alexsimonjones@gmail.com>
2025-05-06 11:18:40 +01:00
renovate[bot]
d5341f3c00
chore(deps): update golangci/golangci-lint-action digest to 9fae48a ()
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-05-06 09:23:12 +01:00
Alex Jones
752a16c407
feat: supported regions govcloud ()
* feat: added token for goreleaser

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* feat: updated the bedrock supported regions

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

---------

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>
2025-05-01 09:01:25 +01:00
renovate[bot]
81da402d46
chore(deps): update docker/build-push-action digest to 14487ce ()
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-29 13:58:23 +01:00
github-actions[bot]
f2f25edef7
chore(main): release 0.4.15 ()
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-04-29 12:57:30 +01:00
Alex Jones
85935a46d8
feat: added token for goreleaser ()
Signed-off-by: Alex Jones <alexsimonjones@gmail.com>
2025-04-29 12:49:44 +01:00
github-actions[bot]
a56e663169
chore(main): release 0.4.14 ()
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-04-29 09:26:48 +01:00
Alex Jones
e41ffd80d0
feat: add MCP support ()
* feat: first mcp impl

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* chore: update

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* chore: wip

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* chore: switcheed to stdio transport

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* chore: readme

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* feat: fix the linter 🤖

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* feat: fix the linter 🤖

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* feat(mcp): implement MCP server and handler

- Implement MCP server and handler
- Add MCP server to serve
- Add MCP handler to handle MCP requests
- Add MCP server to serve
- Add MCP handler to handle MCP requests

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* feat: consolidating code duplication

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* feat: added http sse support

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* chore: fixed broken tests

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* chore: updated and fixed linter

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* chore: updated and fixed linter

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* chore: updated the linter issues

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

---------

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>
2025-04-29 09:22:44 +01:00
ju187
f603948935
feat: using modelName will calling completion ()
* using modelName will calling completion

Signed-off-by: Tony Chen <tony_chen@discovery.com>

* sign

Signed-off-by: Tony Chen <tony_chen@discovery.com>

---------

Signed-off-by: Tony Chen <tony_chen@discovery.com>
2025-04-24 09:15:17 +01:00
github-actions[bot]
67f5855695
chore(main): release 0.4.13 ()
Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2025-04-22 11:28:20 +01:00
Antoine Deschênes
ebb0373f69
fix: reverse hpa ScalingLimited error condition ()
* fix: reverse hpa ScalingLimited error condition

Signed-off-by: Antoine Deschênes <antoine.deschenes@linux.com>

* chore: removed break

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

---------

Signed-off-by: Antoine Deschênes <antoine.deschenes@linux.com>
Signed-off-by: Alex Jones <alexsimonjones@gmail.com>
Co-authored-by: Alex Jones <alexsimonjones@gmail.com>
2025-04-22 11:27:02 +01:00
Alex Jones
3b6ad06de1
feat: slack announce ()
* chore: added slack integration on release

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

* chore: patched two go dep security warnings

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>

---------

Signed-off-by: Alex Jones <alexsimonjones@gmail.com>
2025-04-22 10:49:52 +01:00
renovate[bot]
443469960a
chore(deps): update softprops/action-gh-release digest to da05d55 ()
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
2025-04-19 20:27:30 +01:00
32 changed files with 1772 additions and 393 deletions

View File

@ -96,7 +96,7 @@ jobs:
uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3
- name: Build and push multi-arch image
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6
uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 # v6
with:
context: .
file: ./container/Dockerfile

View File

@ -12,7 +12,7 @@ jobs:
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: golangci-lint
uses: golangci/golangci-lint-action@1481404843c368bc19ca9406f87d6e0fc97bdcfd # v7
uses: golangci/golangci-lint-action@9fae48acfc02a90574d7c304a1758ef9895495fa # v7
with:
version: v2.0
only-new-issues: true

View File

@ -59,7 +59,7 @@ jobs:
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5
with:
go-version: '1.22'
- name: Download Syft
@ -73,6 +73,7 @@ jobs:
args: release --clean
env:
GITHUB_TOKEN: ${{ secrets.K8SGPT_BOT_SECRET }}
SLACK_TOKEN: ${{ secrets.SLACK_TOKEN }}
# - name: Update new version in krew-index
# uses: rajatjindal/krew-release-bot@3d9faef30a82761d610544f62afddca00993eef9 # v0.0.47
@ -106,7 +107,7 @@ jobs:
password: ${{ secrets.K8SGPT_BOT_SECRET }}
- name: Build Docker Image
uses: docker/build-push-action@471d1dc4e07e5cdedd4c2171150001c434f0b7a4 # v6
uses: docker/build-push-action@14487ce63c7a62a4a324b0bfb37086795e31c6c1 # v6
with:
context: .
file: ./container/Dockerfile
@ -127,7 +128,7 @@ jobs:
output-file: ./sbom-${{ env.IMAGE_NAME }}.spdx.json
- name: Attach SBOM to release
uses: softprops/action-gh-release@c95fe1489396fe8a9eb87c0abf8aa5b2ef267fda # v2
uses: softprops/action-gh-release@da05d552573ad5aba039eaac05058a918a7bf631 # v2
with:
tag_name: ${{ needs.release-please.outputs.tag_name }}
files: ./sbom-${{ env.IMAGE_NAME }}.spdx.json

View File

@ -18,7 +18,7 @@ jobs:
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
- name: Set up Go
uses: actions/setup-go@0aaccfd150d50ccaeb58ebd88d36e91967a5f35b # v5
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5
with:
go-version: ${{ env.GO_VERSION }}

1
.gitignore vendored
View File

@ -7,3 +7,4 @@ k8sgpt*
dist/
bin/
pkg/server/example/example

View File

@ -70,8 +70,28 @@ checksum:
snapshot:
name_template: "{{ incpatch .Version }}-next"
# skip: true
# The lines beneath this are called `modelines`. See `:help modeline`
# Feel free to remove those if you don't want/use them.
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
announce:
slack:
# Whether its enabled or not.
#
# Templates: allowed (since v2.6).
enabled: true
# Message template to use while publishing.
#
# Default: '{{ .ProjectName }} {{ .Tag }} is out! Check it out at {{ .ReleaseURL }}'.
# Templates: allowed.
message_template: "{{ .ProjectName }} release {{.Tag}} is out!"
# The name of the channel that the user selected as a destination for webhook messages.
channel: "#general"
# Set your Webhook's user name.
username: "K8sGPT"
# Emoji to use as the icon for this message. Overrides icon_url.
icon_emoji: ""
# URL to an image to use as the icon for this message.
icon_url: ""

View File

@ -1 +1 @@
{".":"0.4.12"}
{".":"0.4.16"}

View File

@ -1,5 +1,57 @@
# Changelog
## [0.4.16](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.4.15...v0.4.16) (2025-05-06)
### Features
* add support for Amazon Bedrock Inference Profiles ([#1492](https://github.com/k8sgpt-ai/k8sgpt/issues/1492)) ([21bc76e](https://github.com/k8sgpt-ai/k8sgpt/commit/21bc76e5b77524b48f09ef6707204742dcd879a7))
* enhancement of deployment analyzer ([#1406](https://github.com/k8sgpt-ai/k8sgpt/issues/1406)) ([61b60d5](https://github.com/k8sgpt-ai/k8sgpt/commit/61b60d5768b54f98232dcc415e89aa38987dc6e3))
* supported regions govcloud ([#1483](https://github.com/k8sgpt-ai/k8sgpt/issues/1483)) ([752a16c](https://github.com/k8sgpt-ai/k8sgpt/commit/752a16c40728f42f10ab6c3177cb7e24f44db339))
### Bug Fixes
* **deps:** update k8s.io/utils digest to 0f33e8f ([#1484](https://github.com/k8sgpt-ai/k8sgpt/issues/1484)) ([6a81d2c](https://github.com/k8sgpt-ai/k8sgpt/commit/6a81d2c140f00a405b651d6c6dae5e343ffddb4f))
### Other
* **deps:** update docker/build-push-action digest to 14487ce ([#1472](https://github.com/k8sgpt-ai/k8sgpt/issues/1472)) ([81da402](https://github.com/k8sgpt-ai/k8sgpt/commit/81da402d46e1a1db83a41b717dfb23eb07d2e919))
* **deps:** update golangci/golangci-lint-action digest to 9fae48a ([#1489](https://github.com/k8sgpt-ai/k8sgpt/issues/1489)) ([d5341f3](https://github.com/k8sgpt-ai/k8sgpt/commit/d5341f3c0019c1114254ac05f00c743a0354ec0b))
## [0.4.15](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.4.14...v0.4.15) (2025-04-29)
### Features
* added token for goreleaser ([#1476](https://github.com/k8sgpt-ai/k8sgpt/issues/1476)) ([85935a4](https://github.com/k8sgpt-ai/k8sgpt/commit/85935a46d8f137b0339435cf19ce7f83ead97f8c))
## [0.4.14](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.4.13...v0.4.14) (2025-04-29)
### Features
* add MCP support ([#1471](https://github.com/k8sgpt-ai/k8sgpt/issues/1471)) ([e41ffd8](https://github.com/k8sgpt-ai/k8sgpt/commit/e41ffd80d01ce7ae1fac9ce7e07344020d8bf914))
* using modelName will calling completion ([#1469](https://github.com/k8sgpt-ai/k8sgpt/issues/1469)) ([f603948](https://github.com/k8sgpt-ai/k8sgpt/commit/f603948935f1c4cb171378634714577205de7b08))
## [0.4.13](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.4.12...v0.4.13) (2025-04-22)
### Features
* slack announce ([#1466](https://github.com/k8sgpt-ai/k8sgpt/issues/1466)) ([3b6ad06](https://github.com/k8sgpt-ai/k8sgpt/commit/3b6ad06de1121c870fb486e0fe2bd1f87be16627))
### Bug Fixes
* reverse hpa ScalingLimited error condition ([#1366](https://github.com/k8sgpt-ai/k8sgpt/issues/1366)) ([ebb0373](https://github.com/k8sgpt-ai/k8sgpt/commit/ebb0373f69ad64a6cc43d0695d07e1d076c6366e))
### Other
* **deps:** update softprops/action-gh-release digest to da05d55 ([#1464](https://github.com/k8sgpt-ai/k8sgpt/issues/1464)) ([4434699](https://github.com/k8sgpt-ai/k8sgpt/commit/443469960a6b6791e358ee0a97e4c1dc5c3018e6))
## [0.4.12](https://github.com/k8sgpt-ai/k8sgpt/compare/v0.4.11...v0.4.12) (2025-04-17)

100
README.md
View File

@ -62,7 +62,7 @@ brew install k8sgpt
<!---x-release-please-start-version-->
```
sudo rpm -ivh https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.12/k8sgpt_386.rpm
sudo rpm -ivh https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.16/k8sgpt_386.rpm
```
<!---x-release-please-end-->
@ -70,7 +70,7 @@ brew install k8sgpt
<!---x-release-please-start-version-->
```
sudo rpm -ivh https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.12/k8sgpt_amd64.rpm
sudo rpm -ivh https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.16/k8sgpt_amd64.rpm
```
<!---x-release-please-end-->
</details>
@ -83,7 +83,7 @@ brew install k8sgpt
<!---x-release-please-start-version-->
```
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.12/k8sgpt_386.deb
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.16/k8sgpt_386.deb
sudo dpkg -i k8sgpt_386.deb
```
@ -94,7 +94,7 @@ sudo dpkg -i k8sgpt_386.deb
<!---x-release-please-start-version-->
```
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.12/k8sgpt_amd64.deb
curl -LO https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.16/k8sgpt_amd64.deb
sudo dpkg -i k8sgpt_amd64.deb
```
@ -109,7 +109,7 @@ sudo dpkg -i k8sgpt_amd64.deb
<!---x-release-please-start-version-->
```
wget https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.12/k8sgpt_386.apk
wget https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.16/k8sgpt_386.apk
apk add --allow-untrusted k8sgpt_386.apk
```
<!---x-release-please-end-->
@ -118,7 +118,7 @@ sudo dpkg -i k8sgpt_amd64.deb
<!---x-release-please-start-version-->
```
wget https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.12/k8sgpt_amd64.apk
wget https://github.com/k8sgpt-ai/k8sgpt/releases/download/v0.4.16/k8sgpt_amd64.apk
apk add --allow-untrusted k8sgpt_amd64.apk
```
<!---x-release-please-end-->
@ -165,6 +165,76 @@ _This mode of operation is ideal for continuous monitoring of your cluster and c
- And use `k8sgpt analyze --explain` to get a more detailed explanation of the issues.
- You also run `k8sgpt analyze --with-doc` (with or without the explain flag) to get the official documentation from Kubernetes.
# Using with Claude Desktop
K8sGPT can be integrated with Claude Desktop to provide AI-powered Kubernetes cluster analysis. This integration requires K8sGPT v0.4.14 or later.
## Prerequisites
1. Install K8sGPT v0.4.14 or later:
```sh
brew install k8sgpt
```
2. Install Claude Desktop from the official website
3. Configure K8sGPT with your preferred AI backend:
```sh
k8sgpt auth
```
## Setup
1. Start the K8sGPT MCP server:
```sh
k8sgpt serve --mcp
```
2. In Claude Desktop:
- Open Settings
- Navigate to the Integrations section
- Add K8sGPT as a new integration
- The MCP server will be automatically detected
3. Configure Claude Desktop with the following JSON:
```json
{
"mcpServers": {
"k8sgpt": {
"command": "k8sgpt",
"args": [
"serve",
"--mcp"
]
}
}
}
```
## Usage
Once connected, you can use Claude Desktop to:
- Analyze your Kubernetes cluster
- Get detailed insights about cluster health
- Receive recommendations for fixing issues
- Query cluster information
Example commands in Claude Desktop:
- "Analyze my Kubernetes cluster"
- "What's the health status of my cluster?"
- "Show me any issues in the default namespace"
## Troubleshooting
If you encounter connection issues:
1. Ensure K8sGPT is running with the MCP server enabled
2. Verify your Kubernetes cluster is accessible
3. Check that your AI backend is properly configured
4. Restart both K8sGPT and Claude Desktop
For more information, visit our [documentation](https://docs.k8sgpt.ai).
## Analyzers
K8sGPT uses analyzers to triage and diagnose issues in your cluster. It has a set of analyzers that are built in, but
@ -186,6 +256,7 @@ you will be able to write your own analyzers.
- [x] nodeAnalyzer
- [x] mutatingWebhookAnalyzer
- [x] validatingWebhookAnalyzer
- [x] configMapAnalyzer
#### Optional
@ -198,7 +269,6 @@ you will be able to write your own analyzers.
- [x] logAnalyzer
- [x] storageAnalyzer
- [x] securityAnalyzer
- [x] configMapAnalyzer
## Examples
@ -396,6 +466,22 @@ k8sgpt auth default -p azureopenai
Default provider set to azureopenai
```
_Using Amazon Bedrock with inference profiles_
_System Inference Profile_
```
k8sgpt auth add --backend amazonbedrock --providerRegion us-east-1 --model arn:aws:bedrock:us-east-1:123456789012:inference-profile/my-inference-profile
```
_Application Inference Profile_
```
k8sgpt auth add --backend amazonbedrock --providerRegion us-east-1 --model arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/2uzp4s0w39t6
```
## Key Features
<details>

View File

@ -90,7 +90,7 @@ var updateCmd = &cobra.Command{
}
}
if !foundBackend {
color.Red("Error: %s does not exist in configuration file. Please use k8sgpt auth new.", args[0])
color.Red("Error: %s does not exist in configuration file. Please use k8sgpt auth new.", backend)
os.Exit(1)
}

View File

@ -38,6 +38,9 @@ var (
metricsPort string
backend string
enableHttp bool
enableMCP bool
mcpPort string
mcpHTTP bool
)
var ServeCmd = &cobra.Command{
@ -183,6 +186,21 @@ var ServeCmd = &cobra.Command{
}
}()
if enableMCP {
// Create and start MCP server
mcpServer, err := k8sgptserver.NewMCPServer(mcpPort, aiProvider, mcpHTTP, logger)
if err != nil {
color.Red("Error creating MCP server: %v", err)
os.Exit(1)
}
go func() {
if err := mcpServer.Start(); err != nil {
color.Red("Error starting MCP server: %v", err)
os.Exit(1)
}
}()
}
server := k8sgptserver.Config{
Backend: aiProvider.Name,
Port: port,
@ -216,4 +234,7 @@ func init() {
ServeCmd.Flags().StringVarP(&metricsPort, "metrics-port", "", "8081", "Port to run the metrics-server on")
ServeCmd.Flags().StringVarP(&backend, "backend", "b", "openai", "Backend AI provider")
ServeCmd.Flags().BoolVarP(&enableHttp, "http", "", false, "Enable REST/http using gppc-gateway")
ServeCmd.Flags().BoolVarP(&enableMCP, "mcp", "", false, "Enable Mission Control Protocol server")
ServeCmd.Flags().StringVarP(&mcpPort, "mcp-port", "", "8089", "Port to run the MCP server on")
ServeCmd.Flags().BoolVarP(&mcpHTTP, "mcp-http", "", false, "Enable HTTP mode for MCP server")
}

37
go.mod
View File

@ -36,19 +36,25 @@ require (
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.5.0
github.com/IBM/watsonx-go v1.0.1
github.com/agiledragon/gomonkey/v2 v2.13.0
github.com/aws/aws-sdk-go v1.55.6
github.com/aws/aws-sdk-go v1.55.7
github.com/aws/aws-sdk-go-v2 v1.36.3
github.com/aws/aws-sdk-go-v2/config v1.29.14
github.com/aws/aws-sdk-go-v2/service/bedrock v1.33.0
github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.30.0
github.com/cohere-ai/cohere-go/v2 v2.12.2
github.com/go-logr/zapr v1.3.0
github.com/google/generative-ai-go v0.19.0
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1
github.com/hupe1980/go-huggingface v0.0.15
github.com/kyverno/policy-reporter-kyverno-plugin v1.6.4
github.com/metoro-io/mcp-golang v0.11.0
github.com/olekukonko/tablewriter v0.0.5
github.com/oracle/oci-go-sdk/v65 v65.79.0
github.com/prometheus/prometheus v0.302.1
github.com/pterm/pterm v0.12.80
google.golang.org/api v0.218.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
sigs.k8s.io/controller-runtime v0.19.3
sigs.k8s.io/gateway-api v1.2.1
)
@ -76,9 +82,21 @@ require (
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect
github.com/Microsoft/hcsshim v0.12.4 // indirect
github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b // indirect
github.com/aws/aws-sdk-go-v2 v1.32.3 // indirect
github.com/aws/smithy-go v1.22.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.67 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect
github.com/aws/smithy-go v1.22.2 // indirect
github.com/bahlo/generic-list-go v0.2.0 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/buger/jsonparser v1.1.1 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect
github.com/containerd/console v1.0.4 // indirect
@ -92,11 +110,11 @@ require (
github.com/envoyproxy/go-control-plane v0.13.1 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
github.com/expr-lang/expr v1.16.9 // indirect
github.com/expr-lang/expr v1.17.2 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
github.com/gofrs/flock v0.12.1 // indirect
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect
github.com/google/s2a-go v0.1.9 // indirect
@ -105,6 +123,7 @@ require (
github.com/gookit/color v1.5.4 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
github.com/invopop/jsonschema v0.12.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
@ -122,6 +141,11 @@ require (
github.com/sony/gobreaker v0.5.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/tidwall/gjson v1.18.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tidwall/sjson v1.2.5 // indirect
github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
go.opencensus.io v0.24.0 // indirect
@ -137,7 +161,6 @@ require (
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
knative.dev/pkg v0.0.0-20241026180704-25f6002b00f3 // indirect
)
@ -261,7 +284,7 @@ require (
k8s.io/component-base v0.32.2 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e
k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979
oras.land/oras-go v1.2.5 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
sigs.k8s.io/kustomize/api v0.18.0 // indirect

71
go.sum
View File

@ -735,12 +735,42 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/atomicgo/cursor v0.0.1/go.mod h1:cBON2QmmrysudxNBFthvMtN32r3jxVRIvzkUiF/RuIk=
github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk=
github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v1.32.3 h1:T0dRlFBKcdaUPGNtkBSwHZxrtis8CQU17UpNBZYd0wk=
github.com/aws/aws-sdk-go-v2 v1.32.3/go.mod h1:2SK5n0a2karNTv5tbP1SjsX0uhttou00v/HpXKM1ZUo=
github.com/aws/smithy-go v1.22.0 h1:uunKnWlcoL3zO7q+gG2Pk53joueEOsnNB28QdMsmiMM=
github.com/aws/smithy-go v1.22.0/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=
github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM=
github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g=
github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM=
github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
github.com/aws/aws-sdk-go-v2/service/bedrock v1.33.0 h1:2P70khV5KDzoRs8UuplU3rAzzyLaj5kzND33Jutwpbg=
github.com/aws/aws-sdk-go-v2/service/bedrock v1.33.0/go.mod h1:rZOgAxQVRg9v5ZEQHrrKw0Gkb9DBAASeeRiwUmmXcG0=
github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.30.0 h1:eMOwQ8ZZK+76+08RfxeaGUtRFN6wxmD1rvqovc2kq2w=
github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.30.0/go.mod h1:0b5Rq7rUvSQFYHI1UO0zFTV/S6j6DUyuykXA80C+YOI=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@ -751,6 +781,7 @@ github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl
github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70=
github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng=
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
@ -870,8 +901,8 @@ github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0
github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
github.com/expr-lang/expr v1.16.9 h1:WUAzmR0JNI9JCiF0/ewwHB1gmcGw5wW7nWt8gc6PpCI=
github.com/expr-lang/expr v1.16.9/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
github.com/expr-lang/expr v1.17.2 h1:o0A99O/Px+/DTjEnQiodAgOIK9PPxL8DtXhBRKC+Iso=
github.com/expr-lang/expr v1.17.2/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
@ -938,8 +969,8 @@ github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeH
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
@ -1137,6 +1168,8 @@ github.com/imdario/mergo v1.0.1 h1:lFIgOs30GMaV/2+qQ+eEBLbUL6h1YosdohE3ODy4hTs=
github.com/imdario/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/invopop/jsonschema v0.12.0 h1:6ovsNSuvn9wEQVOyc72aycBMVQFKz7cPdMJn10CvzRI=
github.com/invopop/jsonschema v0.12.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0=
github.com/ionos-cloud/sdk-go/v6 v6.3.2 h1:2mUmrZZz6cPyT9IRX0T8fBLc/7XU/eTxP2Y5tS7/09k=
github.com/ionos-cloud/sdk-go/v6 v6.3.2/go.mod h1:SXrO9OGyWjd2rZhAhEpdYN6VUAODzzqRdqA9BCviQtI=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
@ -1225,6 +1258,8 @@ github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/metoro-io/mcp-golang v0.11.0 h1:1k+VSE9QaeMTLn0gJ3FgE/DcjsCBsLFnz5eSFbgXUiI=
github.com/metoro-io/mcp-golang v0.11.0/go.mod h1:ifLP9ZzKpN1UqFWNTpAHOqSvNkMK6b7d1FSZ5Lu0lN0=
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
@ -1428,8 +1463,20 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs=
github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
github.com/wk8/go-ordered-map/v2 v2.1.8 h1:5h/BUHu93oj4gIdvHHHGsScSTMijfx5PeYkE/fJgbpc=
github.com/wk8/go-ordered-map/v2 v2.1.8/go.mod h1:5nJHM5DyteebpVlHnWMV0rPz6Zp7+xBAnxjb1X5vnTw=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@ -2209,8 +2256,8 @@ k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJ
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4=
k8s.io/kubectl v0.32.2 h1:TAkag6+XfSBgkqK9I7ZvwtF0WVtUAvK8ZqTt+5zi1Us=
k8s.io/kubectl v0.32.2/go.mod h1:+h/NQFSPxiDZYX/WZaWw9fwYezGLISP0ud8nQKg+3g8=
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979 h1:jgJW5IePPXLGB8e/1wvd0Ich9QE97RvvF3a8J3fP/Lg=
k8s.io/utils v0.0.0-20250502105355-0f33e8f1c979/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
knative.dev/pkg v0.0.0-20241026180704-25f6002b00f3 h1:uUSDGlOIkdPT4svjlhi+JEnP2Ufw7AM/F5QDYiEL02U=
knative.dev/pkg v0.0.0-20241026180704-25f6002b00f3/go.mod h1:FeMbTLlxQqSASwlRCrYEOsZ0OKUgSj52qxhECwYCJsw=
lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=

View File

@ -8,22 +8,22 @@ import (
"regexp"
"strings"
"github.com/aws/aws-sdk-go/service/bedrockruntime/bedrockruntimeiface"
"github.com/k8sgpt-ai/k8sgpt/pkg/ai/bedrock_support"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/bedrockruntime"
"github.com/aws/aws-sdk-go-v2/aws"
awsconfig "github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/bedrock"
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
)
const amazonbedrockAIClientName = "amazonbedrock"
// AmazonBedRockClient represents the client for interacting with the AmazonCompletion Bedrock service.
// AmazonBedRockClient represents the client for interacting with the Amazon Bedrock service.
type AmazonBedRockClient struct {
nopCloser
client bedrockruntimeiface.BedrockRuntimeAPI
client BedrockRuntimeAPI
mgmtClient BedrockManagementAPI
model *bedrock_support.BedrockModel
temperature float32
topP float32
@ -42,6 +42,8 @@ const (
AP_Northeast_1 = "ap-northeast-1"
EU_Central_1 = "eu-central-1"
AP_South_1 = "ap-south-1"
US_Gov_West_1 = "us-gov-west-1"
US_Gov_East_1 = "us-gov-east-1"
)
var BEDROCKER_SUPPORTED_REGION = []string{
@ -51,13 +53,39 @@ var BEDROCKER_SUPPORTED_REGION = []string{
AP_Northeast_1,
EU_Central_1,
AP_South_1,
US_Gov_West_1,
US_Gov_East_1,
}
var defaultModels = []bedrock_support.BedrockModel{
{
Name: "anthropic.claude-3-5-sonnet-20240620-v1:0",
Name: "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
Completion: &bedrock_support.CohereMessagesCompletion{},
Response: &bedrock_support.CohereMessagesResponse{},
Config: bedrock_support.BedrockModelConfig{
// sensible defaults
MaxTokens: 100,
Temperature: 0.5,
TopP: 0.9,
ModelName: "us.anthropic.claude-3-7-sonnet-20250219-v1:0",
},
},
{
Name: "eu.anthropic.claude-3-7-sonnet-20250219-v1:0",
Completion: &bedrock_support.CohereMessagesCompletion{},
Response: &bedrock_support.CohereMessagesResponse{},
Config: bedrock_support.BedrockModelConfig{
// sensible defaults
MaxTokens: 100,
Temperature: 0.5,
TopP: 0.9,
ModelName: "eu.anthropic.claude-3-7-sonnet-20250219-v1:0",
},
},
{
Name: "anthropic.claude-3-5-sonnet-20240620-v1:0",
Completion: &bedrock_support.CohereCompletion{},
Response: &bedrock_support.CohereResponse{},
Config: bedrock_support.BedrockModelConfig{
// sensible defaults
MaxTokens: 100,
@ -250,7 +278,6 @@ func NewAmazonBedRockClient(models []bedrock_support.BedrockModel) *AmazonBedRoc
// GetModelOrDefault check config region
func GetRegionOrDefault(region string) string {
if os.Getenv("AWS_DEFAULT_REGION") != "" {
region = os.Getenv("AWS_DEFAULT_REGION")
}
@ -265,6 +292,17 @@ func GetRegionOrDefault(region string) string {
return BEDROCK_DEFAULT_REGION
}
func validateModelArn(model string) bool {
var re = regexp.MustCompile(`(?m)^arn:(?P<Partition>[^:\n]*):bedrock:(?P<Region>[^:\n]*):(?P<AccountID>[^:\n]*):(?P<Ignore>(?P<ResourceType>[^:\/\n]*)[:\/])?(?P<Resource>.*)$`)
return re.MatchString(model)
}
func validateInferenceProfileArn(inferenceProfile string) bool {
// Support both inference-profile and application-inference-profile formats
var re = regexp.MustCompile(`(?m)^arn:(?P<Partition>[^:\n]*):bedrock:(?P<Region>[^:\n]*):(?P<AccountID>[^:\n]*):(?:inference-profile|application-inference-profile)\/(?P<ProfileName>.+)$`)
return re.MatchString(inferenceProfile)
}
// Get model from string
func (a *AmazonBedRockClient) getModelFromString(model string) (*bedrock_support.BedrockModel, error) {
if model == "" {
@ -306,11 +344,6 @@ func (a *AmazonBedRockClient) getModelFromString(model string) (*bedrock_support
return nil, fmt.Errorf("model '%s' not found in supported models", model)
}
func validateModelArn(model string) bool {
var re = regexp.MustCompile(`(?m)^arn:(?P<Partition>[^:\n]*):bedrock:(?P<Region>[^:\n]*):(?P<AccountID>[^:\n]*):(?P<Ignore>(?P<ResourceType>[^:\/\n]*)[:\/])?(?P<Resource>.*)$`)
return re.MatchString(model)
}
// Configure configures the AmazonBedRockClient with the provided configuration.
func (a *AmazonBedRockClient) Configure(config IAIConfig) error {
// Initialize models if not already initialized
@ -318,26 +351,77 @@ func (a *AmazonBedRockClient) Configure(config IAIConfig) error {
a.models = defaultModels
}
// Create a new AWS session
providerRegion := GetRegionOrDefault(config.GetProviderRegion())
sess, err := session.NewSession(&aws.Config{
Region: aws.String(providerRegion),
})
if err != nil {
return err
// Get the model input
modelInput := config.GetModel()
// Determine the appropriate region to use
var region string
// Check if the model input is actually an inference profile ARN
if validateInferenceProfileArn(modelInput) {
// Extract the region from the inference profile ARN
arnParts := strings.Split(modelInput, ":")
if len(arnParts) >= 4 {
region = arnParts[3]
} else {
return fmt.Errorf("could not extract region from inference profile ARN: %s", modelInput)
}
} else {
// Use the provided region or default
region = GetRegionOrDefault(config.GetProviderRegion())
}
// Only create AWS clients if they haven't been injected (for testing)
if a.client == nil || a.mgmtClient == nil {
// Create a new AWS config with the determined region
cfg, err := awsconfig.LoadDefaultConfig(context.Background(),
awsconfig.WithRegion(region),
)
if err != nil {
return fmt.Errorf("failed to load AWS config for region %s: %w", region, err)
}
foundModel, err := a.getModelFromString(config.GetModel())
if err != nil {
return err
// Create clients with the config
a.client = bedrockruntime.NewFromConfig(cfg)
a.mgmtClient = bedrock.NewFromConfig(cfg)
}
// Create a new BedrockRuntime client
a.client = bedrockruntime.New(sess)
a.model = foundModel
a.model.Config.ModelName = foundModel.Name
// Handle model selection based on input type
if validateInferenceProfileArn(modelInput) {
// Get the inference profile details
profile, err := a.getInferenceProfile(context.Background(), modelInput)
if err != nil {
// Instead of using a fallback model, throw an error
return fmt.Errorf("failed to get inference profile: %v", err)
} else {
// Extract the model ID from the inference profile
modelID, err := a.extractModelFromInferenceProfile(profile)
if err != nil {
return fmt.Errorf("failed to extract model ID from inference profile: %v", err)
}
// Find the model configuration for the extracted model ID
foundModel, err := a.getModelFromString(modelID)
if err != nil {
// Instead of using a fallback model, throw an error
return fmt.Errorf("failed to find model configuration for %s: %v", modelID, err)
}
a.model = foundModel
// Use the inference profile ARN as the model ID for API calls
a.model.Config.ModelName = modelInput
}
} else {
// Regular model ID provided
foundModel, err := a.getModelFromString(modelInput)
if err != nil {
return err
}
a.model = foundModel
a.model.Config.ModelName = foundModel.Config.ModelName
}
// Set common configuration parameters
a.temperature = config.GetTemperature()
a.topP = config.GetTopP()
a.maxTokens = config.GetMaxTokens()
@ -345,9 +429,62 @@ func (a *AmazonBedRockClient) Configure(config IAIConfig) error {
return nil
}
// getInferenceProfile retrieves the inference profile details from Amazon Bedrock
func (a *AmazonBedRockClient) getInferenceProfile(ctx context.Context, inferenceProfileARN string) (*bedrock.GetInferenceProfileOutput, error) {
// Extract the profile ID from the ARN
// ARN format: arn:aws:bedrock:region:account-id:inference-profile/profile-id
// or arn:aws:bedrock:region:account-id:application-inference-profile/profile-id
parts := strings.Split(inferenceProfileARN, "/")
if len(parts) != 2 {
return nil, fmt.Errorf("invalid inference profile ARN format: %s", inferenceProfileARN)
}
profileID := parts[1]
// Create the input for the GetInferenceProfile API call
input := &bedrock.GetInferenceProfileInput{
InferenceProfileIdentifier: aws.String(profileID),
}
// Call the GetInferenceProfile API
output, err := a.mgmtClient.GetInferenceProfile(ctx, input)
if err != nil {
return nil, fmt.Errorf("failed to get inference profile: %w", err)
}
return output, nil
}
// extractModelFromInferenceProfile extracts the model ID from the inference profile
func (a *AmazonBedRockClient) extractModelFromInferenceProfile(profile *bedrock.GetInferenceProfileOutput) (string, error) {
if profile == nil || len(profile.Models) == 0 {
return "", fmt.Errorf("inference profile does not contain any models")
}
// Check if the first model has a non-nil ModelArn
if profile.Models[0].ModelArn == nil {
return "", fmt.Errorf("model information is missing in inference profile")
}
// Get the first model ARN from the profile
modelARN := aws.ToString(profile.Models[0].ModelArn)
if modelARN == "" {
return "", fmt.Errorf("model ARN is empty in inference profile")
}
// Extract the model ID from the ARN
// ARN format: arn:aws:bedrock:region::foundation-model/model-id
parts := strings.Split(modelARN, "/")
if len(parts) != 2 {
return "", fmt.Errorf("invalid model ARN format: %s", modelARN)
}
modelID := parts[1]
return modelID, nil
}
// GetCompletion sends a request to the model for generating completion based on the provided prompt.
func (a *AmazonBedRockClient) GetCompletion(ctx context.Context, prompt string) (string, error) {
// override config defaults
a.model.Config.MaxTokens = a.maxTokens
a.model.Config.Temperature = a.temperature
@ -357,23 +494,23 @@ func (a *AmazonBedRockClient) GetCompletion(ctx context.Context, prompt string)
if err != nil {
return "", err
}
// Build the parameters for the model invocation
params := &bedrockruntime.InvokeModelInput{
Body: body,
ModelId: aws.String(a.model.Name),
ModelId: aws.String(a.model.Config.ModelName),
ContentType: aws.String("application/json"),
Accept: aws.String("application/json"),
}
// Invoke the model
resp, err := a.client.InvokeModelWithContext(ctx, params)
resp, err := a.client.InvokeModel(ctx, params)
if err != nil {
return "", err
}
// Parse the response
return a.model.Response.ParseResponse(resp.Body)
}
// GetName returns the name of the AmazonBedRockClient.

View File

@ -0,0 +1,103 @@
package ai
import (
"context"
"testing"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/bedrock"
"github.com/aws/aws-sdk-go-v2/service/bedrock/types"
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
"github.com/k8sgpt-ai/k8sgpt/pkg/ai/bedrock_support"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
)
// Mock for Bedrock Management Client
type MockBedrockClient struct {
mock.Mock
}
func (m *MockBedrockClient) GetInferenceProfile(ctx context.Context, params *bedrock.GetInferenceProfileInput, optFns ...func(*bedrock.Options)) (*bedrock.GetInferenceProfileOutput, error) {
args := m.Called(ctx, params)
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).(*bedrock.GetInferenceProfileOutput), args.Error(1)
}
// Mock for Bedrock Runtime Client
type MockBedrockRuntimeClient struct {
mock.Mock
}
func (m *MockBedrockRuntimeClient) InvokeModel(ctx context.Context, params *bedrockruntime.InvokeModelInput, optFns ...func(*bedrockruntime.Options)) (*bedrockruntime.InvokeModelOutput, error) {
args := m.Called(ctx, params)
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).(*bedrockruntime.InvokeModelOutput), args.Error(1)
}
// TestBedrockInferenceProfileARNWithMocks tests the inference profile ARN validation with mocks
func TestBedrockInferenceProfileARNWithMocks(t *testing.T) {
// Create test models
testModels := []bedrock_support.BedrockModel{
{
Name: "anthropic.claude-3-5-sonnet-20240620-v1:0",
Completion: &bedrock_support.CohereMessagesCompletion{},
Response: &bedrock_support.CohereMessagesResponse{},
Config: bedrock_support.BedrockModelConfig{
MaxTokens: 100,
Temperature: 0.5,
TopP: 0.9,
ModelName: "anthropic.claude-3-5-sonnet-20240620-v1:0",
},
},
}
// Create a client with test models
client := &AmazonBedRockClient{models: testModels}
// Create mock clients
mockMgmtClient := new(MockBedrockClient)
mockRuntimeClient := new(MockBedrockRuntimeClient)
// Inject mock clients into the AmazonBedRockClient
client.mgmtClient = mockMgmtClient
client.client = mockRuntimeClient
// Test with a valid inference profile ARN
inferenceProfileARN := "arn:aws:bedrock:us-east-1:123456789012:inference-profile/my-profile"
// Setup mock response for GetInferenceProfile
mockMgmtClient.On("GetInferenceProfile", mock.Anything, &bedrock.GetInferenceProfileInput{
InferenceProfileIdentifier: aws.String("my-profile"),
}).Return(&bedrock.GetInferenceProfileOutput{
Models: []types.InferenceProfileModel{
{
ModelArn: aws.String("arn:aws:bedrock:us-east-1::foundation-model/anthropic.claude-3-5-sonnet-20240620-v1:0"),
},
},
}, nil)
// Configure the client with the inference profile ARN
config := AIProvider{
Model: inferenceProfileARN,
ProviderRegion: "us-east-1",
}
// Test the Configure method with the inference profile ARN
err := client.Configure(&config)
// Verify that the configuration was successful
assert.NoError(t, err)
assert.Equal(t, inferenceProfileARN, client.model.Config.ModelName)
// Verify that the mock was called
mockMgmtClient.AssertExpectations(t)
}

View File

@ -31,6 +31,17 @@ var testModels = []bedrock_support.BedrockModel{
ModelName: "anthropic.claude-3-5-sonnet-20241022-v2:0",
},
},
{
Name: "anthropic.claude-3-7-sonnet-20250219-v1:0",
Completion: &bedrock_support.CohereCompletion{},
Response: &bedrock_support.CohereResponse{},
Config: bedrock_support.BedrockModelConfig{
MaxTokens: 100,
Temperature: 0.5,
TopP: 0.9,
ModelName: "anthropic.claude-3-7-sonnet-20250219-v1:0",
},
},
}
func TestBedrockModelConfig(t *testing.T) {
@ -52,6 +63,70 @@ func TestBedrockInvalidModel(t *testing.T) {
assert.Equal(t, foundModel.Config.MaxTokens, 100)
}
func TestBedrockInferenceProfileARN(t *testing.T) {
// Create a mock client with test models
client := &AmazonBedRockClient{models: testModels}
// Test with a valid inference profile ARN
inferenceProfileARN := "arn:aws:bedrock:us-east-1:123456789012:inference-profile/my-profile"
config := AIProvider{
Model: inferenceProfileARN,
ProviderRegion: "us-east-1",
}
// This will fail in a real environment without mocks, but we're just testing the validation logic
err := client.Configure(&config)
// We expect an error since we can't actually call AWS in tests
assert.NotNil(t, err, "Error should not be nil without AWS mocks")
// Test with a valid application inference profile ARN
appInferenceProfileARN := "arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/my-profile"
config = AIProvider{
Model: appInferenceProfileARN,
ProviderRegion: "us-east-1",
}
// This will fail in a real environment without mocks, but we're just testing the validation logic
err = client.Configure(&config)
// We expect an error since we can't actually call AWS in tests
assert.NotNil(t, err, "Error should not be nil without AWS mocks")
// Test with an invalid inference profile ARN format
invalidARN := "arn:aws:bedrock:us-east-1:123456789012:invalid-resource/my-profile"
config = AIProvider{
Model: invalidARN,
ProviderRegion: "us-east-1",
}
err = client.Configure(&config)
assert.NotNil(t, err, "Error should not be nil for invalid inference profile ARN format")
}
func TestBedrockGetCompletionInferenceProfile(t *testing.T) {
modelName := "arn:aws:bedrock:us-east-1:*:inference-policy/anthropic.claude-3-5-sonnet-20240620-v1:0"
var inferenceModelModels = []bedrock_support.BedrockModel{
{
Name: "anthropic.claude-3-5-sonnet-20240620-v1:0",
Completion: &bedrock_support.CohereMessagesCompletion{},
Response: &bedrock_support.CohereMessagesResponse{},
Config: bedrock_support.BedrockModelConfig{
MaxTokens: 100,
Temperature: 0.5,
TopP: 0.9,
ModelName: modelName,
},
},
}
client := &AmazonBedRockClient{models: inferenceModelModels}
config := AIProvider{
Model: modelName,
}
err := client.Configure(&config)
assert.Nil(t, err, "Error should be nil")
assert.Equal(t, modelName, client.model.Config.ModelName, "Model name should match")
}
func TestGetModelFromString(t *testing.T) {
client := &AmazonBedRockClient{models: testModels}
@ -137,3 +212,54 @@ func TestDefaultModels(t *testing.T) {
assert.NoError(t, err, "Should find the model")
assert.Equal(t, "anthropic.claude-v2", model.Name, "Should find the correct model")
}
func TestValidateInferenceProfileArn(t *testing.T) {
tests := []struct {
name string
arn string
valid bool
}{
{
name: "valid inference profile ARN",
arn: "arn:aws:bedrock:us-east-1:123456789012:inference-profile/my-profile",
valid: true,
},
{
name: "valid application inference profile ARN",
arn: "arn:aws:bedrock:us-east-1:123456789012:application-inference-profile/my-profile",
valid: true,
},
{
name: "invalid service in ARN",
arn: "arn:aws:s3:us-east-1:123456789012:inference-profile/my-profile",
valid: false,
},
{
name: "invalid resource type in ARN",
arn: "arn:aws:bedrock:us-east-1:123456789012:model/my-profile",
valid: false,
},
{
name: "malformed ARN",
arn: "arn:aws:bedrock:us-east-1:inference-profile/my-profile",
valid: false,
},
{
name: "not an ARN",
arn: "not-an-arn",
valid: false,
},
{
name: "empty string",
arn: "",
valid: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := validateInferenceProfileArn(tt.arn)
assert.Equal(t, tt.valid, result, "validateInferenceProfileArn() result should match expected")
})
}
}

View File

@ -0,0 +1,18 @@
package ai
import (
"context"
"github.com/aws/aws-sdk-go-v2/service/bedrock"
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
)
// BedrockManagementAPI defines the interface for Bedrock management operations
type BedrockManagementAPI interface {
GetInferenceProfile(ctx context.Context, params *bedrock.GetInferenceProfileInput, optFns ...func(*bedrock.Options)) (*bedrock.GetInferenceProfileOutput, error)
}
// BedrockRuntimeAPI defines the interface for Bedrock runtime operations
type BedrockRuntimeAPI interface {
InvokeModel(ctx context.Context, params *bedrockruntime.InvokeModelInput, optFns ...func(*bedrockruntime.Options)) (*bedrockruntime.InvokeModelOutput, error)
}

View File

@ -173,6 +173,20 @@ func TestAmazonCompletion_GetCompletion_UnsupportedModel(t *testing.T) {
assert.Contains(t, err.Error(), "model unsupported-model is not supported")
}
func TestAmazonCompletion_GetCompletion_Inference_Profile(t *testing.T) {
completion := &AmazonCompletion{}
modelConfig := BedrockModelConfig{
MaxTokens: 200,
Temperature: 0.5,
TopP: 0.7,
ModelName: "arn:aws:bedrock:us-east-1:*:inference-policy/anthropic.claude-3-5-sonnet-20240620-v1:0",
}
prompt := "Test prompt"
_, err := completion.GetCompletion(context.Background(), prompt, modelConfig)
assert.NoError(t, err)
}
func Test_isModelSupported(t *testing.T) {
assert.True(t, isModelSupported("anthropic.claude-v2"))
assert.False(t, isModelSupported("unsupported-model"))

View File

@ -323,7 +323,16 @@ func (a *Analysis) RunAnalysis() {
OpenapiSchema: openapiSchema,
}
semaphore := make(chan struct{}, a.MaxConcurrency)
// Set a reasonable maximum for concurrency to prevent excessive memory allocation
const maxAllowedConcurrency = 100
concurrency := a.MaxConcurrency
if concurrency <= 0 {
concurrency = 10 // Default value if not set
} else if concurrency > maxAllowedConcurrency {
concurrency = maxAllowedConcurrency // Cap at a reasonable maximum
}
semaphore := make(chan struct{}, concurrency)
var wg sync.WaitGroup
var mutex sync.Mutex
// if there are no filters selected and no active_filters then run coreAnalyzer

View File

@ -47,7 +47,7 @@ var coreAnalyzerMap = map[string]common.IAnalyzer{
}
var additionalAnalyzerMap = map[string]common.IAnalyzer{
"HorizontalPodAutoScaler": HpaAnalyzer{},
"HorizontalPodAutoscaler": HpaAnalyzer{},
"PodDisruptionBudget": PdbAnalyzer{},
"NetworkPolicy": NetworkPolicyAnalyzer{},
"Log": LogAnalyzer{},

View File

@ -54,22 +54,41 @@ func (d DeploymentAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error)
for _, deployment := range deployments.Items {
var failures []common.Failure
if *deployment.Spec.Replicas != deployment.Status.Replicas {
doc := apiDoc.GetApiDocV2("spec.replicas")
if *deployment.Spec.Replicas != deployment.Status.ReadyReplicas {
if deployment.Status.Replicas > *deployment.Spec.Replicas {
doc := apiDoc.GetApiDocV2("spec.replicas")
failures = append(failures, common.Failure{
Text: fmt.Sprintf("Deployment %s/%s has %d replicas but %d are available", deployment.Namespace, deployment.Name, *deployment.Spec.Replicas, deployment.Status.Replicas),
KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: deployment.Namespace,
Masked: util.MaskString(deployment.Namespace),
},
{
Unmasked: deployment.Name,
Masked: util.MaskString(deployment.Name),
},
}})
failures = append(failures, common.Failure{
Text: fmt.Sprintf("Deployment %s/%s has %d replicas in spec but %d replicas in status because status field is not updated yet after scaling and %d replicas are available with status running", deployment.Namespace, deployment.Name, *deployment.Spec.Replicas, deployment.Status.Replicas, deployment.Status.ReadyReplicas),
KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: deployment.Namespace,
Masked: util.MaskString(deployment.Namespace),
},
{
Unmasked: deployment.Name,
Masked: util.MaskString(deployment.Name),
},
}})
} else {
doc := apiDoc.GetApiDocV2("spec.replicas")
failures = append(failures, common.Failure{
Text: fmt.Sprintf("Deployment %s/%s has %d replicas but %d are available with status running", deployment.Namespace, deployment.Name, *deployment.Spec.Replicas, deployment.Status.ReadyReplicas),
KubernetesDoc: doc,
Sensitive: []common.Sensitive{
{
Unmasked: deployment.Namespace,
Masked: util.MaskString(deployment.Namespace),
},
{
Unmasked: deployment.Name,
Masked: util.MaskString(deployment.Name),
},
}})
}
}
if len(failures) > 0 {
preAnalysis[fmt.Sprintf("%s/%s", deployment.Namespace, deployment.Name)] = common.PreAnalysis{

View File

@ -20,6 +20,7 @@ import (
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
"github.com/k8sgpt-ai/k8sgpt/pkg/util"
appsv1 "k8s.io/api/apps/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -34,7 +35,7 @@ func (HpaAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
Kind: kind,
ApiVersion: schema.GroupVersion{
Group: "autoscaling",
Version: "v1",
Version: "v2",
},
OpenapiSchema: a.OpenapiSchema,
}
@ -56,11 +57,22 @@ func (HpaAnalyzer) Analyze(a common.Analyzer) ([]common.Result, error) {
//check the error from status field
conditions := hpa.Status.Conditions
for _, condition := range conditions {
if condition.Status != "True" {
failures = append(failures, common.Failure{
Text: condition.Message,
Sensitive: []common.Sensitive{},
})
// https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale-walkthrough/#appendix-horizontal-pod-autoscaler-status-conditions
switch condition.Type {
case autoscalingv2.ScalingLimited:
if condition.Status == corev1.ConditionTrue {
failures = append(failures, common.Failure{
Text: condition.Message,
Sensitive: []common.Sensitive{},
})
}
default:
if condition.Status == corev1.ConditionFalse {
failures = append(failures, common.Failure{
Text: condition.Message,
Sensitive: []common.Sensitive{},
})
}
}
}

View File

@ -735,3 +735,87 @@ func TestHPAAnalyzerStatusField(t *testing.T) {
assert.Equal(t, len(analysisResults), 1)
}
func TestHPAAnalyzerStatusScalingLimitedError(t *testing.T) {
clientset := fake.NewSimpleClientset(
&autoscalingv2.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: "example",
Namespace: "default",
Annotations: map[string]string{},
},
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
Kind: "Deployment",
Name: "example",
},
},
Status: autoscalingv2.HorizontalPodAutoscalerStatus{
Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{
{
Type: autoscalingv2.AbleToScale,
Status: "True",
Message: "recommended size matches current size",
},
{
Type: autoscalingv2.ScalingActive,
Status: "True",
Message: "the HPA was able to successfully calculate a replica count",
},
{
Type: autoscalingv2.ScalingLimited,
Status: "True",
Message: "the desired replica count is less than the minimum replica count",
},
},
},
},
&appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: "example",
Namespace: "default",
Annotations: map[string]string{},
},
Spec: appsv1.DeploymentSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "example",
Image: "nginx",
},
},
},
},
},
},
)
hpaAnalyzer := HpaAnalyzer{}
config := common.Analyzer{
Client: &kubernetes.Client{
Client: clientset,
},
Context: context.Background(),
Namespace: "default",
}
analysisResults, err := hpaAnalyzer.Analyze(config)
if err != nil {
t.Error(err)
}
var errorFound bool
want := "the desired replica count is less than the minimum replica count"
for _, analysis := range analysisResults {
for _, got := range analysis.Error {
if want == got.Text {
errorFound = true
}
}
if errorFound {
break
}
}
if !errorFound {
t.Errorf("Expected message, <%v> , not found in HorizontalPodAutoscaler's analysis results", want)
}
}

View File

@ -15,116 +15,45 @@ package analyzer
import (
"context"
"sort"
"testing"
"github.com/k8sgpt-ai/k8sgpt/pkg/common"
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
)
func TestIngressAnalyzer(t *testing.T) {
tests := []struct {
name string
config common.Analyzer
expectations []struct {
name string
failuresCount int
}
// Create test cases
testCases := []struct {
name string
ingress *networkingv1.Ingress
expectedIssues []string
}{
{
name: "Missing ingress class",
config: common.Analyzer{
Client: &kubernetes.Client{
Client: fake.NewSimpleClientset(
&networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "no-class",
Namespace: "default",
},
Spec: networkingv1.IngressSpec{
// No ingress class specified
},
},
),
},
Context: context.Background(),
Namespace: "default",
},
expectations: []struct {
name string
failuresCount int
}{
{
name: "default/no-class",
failuresCount: 1, // One failure for missing ingress class
},
},
},
{
name: "Non-existent ingress class",
config: common.Analyzer{
Client: &kubernetes.Client{
Client: fake.NewSimpleClientset(
&networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "bad-class",
Namespace: "default",
},
Spec: networkingv1.IngressSpec{
IngressClassName: strPtr("non-existent"),
},
},
),
},
Context: context.Background(),
Namespace: "default",
},
expectations: []struct {
name string
failuresCount int
}{
{
name: "default/bad-class",
failuresCount: 1, // One failure for non-existent ingress class
},
},
},
{
name: "Non-existent backend service",
config: common.Analyzer{
Client: &kubernetes.Client{
Client: fake.NewSimpleClientset(
&networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "bad-backend",
Namespace: "default",
Annotations: map[string]string{
"kubernetes.io/ingress.class": "nginx",
},
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "example.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
PathType: pathTypePtr(networkingv1.PathTypePrefix),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "non-existent-service",
Port: networkingv1.ServiceBackendPort{
Number: 80,
},
},
},
ingress: &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "test-ingress",
Namespace: "default",
},
Spec: networkingv1.IngressSpec{
Rules: []networkingv1.IngressRule{
{
Host: "example.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "non-existent-service",
Port: networkingv1.ServiceBackendPort{
Number: 80,
},
},
},
@ -133,177 +62,144 @@ func TestIngressAnalyzer(t *testing.T) {
},
},
},
),
},
},
Context: context.Background(),
Namespace: "default",
},
expectations: []struct {
name string
failuresCount int
}{
{
name: "default/bad-backend",
failuresCount: 2, // Two failures: non-existent ingress class and non-existent service
},
expectedIssues: []string{
"Ingress default/test-ingress does not specify an Ingress class.",
"Ingress uses the service default/non-existent-service which does not exist.",
},
},
{
name: "Non-existent TLS secret",
config: common.Analyzer{
Client: &kubernetes.Client{
Client: fake.NewSimpleClientset(
&networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "bad-tls",
Namespace: "default",
Annotations: map[string]string{
"kubernetes.io/ingress.class": "nginx",
},
},
Spec: networkingv1.IngressSpec{
TLS: []networkingv1.IngressTLS{
{
Hosts: []string{"example.com"},
SecretName: "non-existent-secret",
},
},
},
},
),
ingress: &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "test-ingress-tls",
Namespace: "default",
},
Context: context.Background(),
Namespace: "default",
},
expectations: []struct {
name string
failuresCount int
}{
{
name: "default/bad-tls",
failuresCount: 2, // Two failures: non-existent ingress class and non-existent TLS secret
},
},
},
{
name: "Valid ingress with all components",
config: common.Analyzer{
Client: &kubernetes.Client{
Client: fake.NewSimpleClientset(
&networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "valid-ingress",
Namespace: "default",
},
Spec: networkingv1.IngressSpec{
IngressClassName: strPtr("nginx"),
},
Spec: networkingv1.IngressSpec{
TLS: []networkingv1.IngressTLS{
{
Hosts: []string{"example.com"},
SecretName: "non-existent-secret",
},
&networkingv1.IngressClass{
ObjectMeta: metav1.ObjectMeta{
Name: "nginx",
},
},
&v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "backend-service",
Namespace: "default",
},
},
&v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "tls-secret",
Namespace: "default",
},
Type: v1.SecretTypeTLS,
},
),
},
Context: context.Background(),
Namespace: "default",
},
expectations: []struct {
name string
failuresCount int
}{
// No expectations for valid ingress
},
},
{
name: "Multiple issues",
config: common.Analyzer{
Client: &kubernetes.Client{
Client: fake.NewSimpleClientset(
&networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "multiple-issues",
Namespace: "default",
},
Spec: networkingv1.IngressSpec{
IngressClassName: strPtr("non-existent"),
Rules: []networkingv1.IngressRule{
{
Host: "example.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
PathType: pathTypePtr(networkingv1.PathTypePrefix),
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "non-existent-service",
Port: networkingv1.ServiceBackendPort{
Number: 80,
},
},
},
},
Rules: []networkingv1.IngressRule{
{
Host: "example.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "test-service",
Port: networkingv1.ServiceBackendPort{
Number: 80,
},
},
},
},
},
},
TLS: []networkingv1.IngressTLS{
{
Hosts: []string{"example.com"},
SecretName: "non-existent-secret",
},
},
},
},
},
expectedIssues: []string{
"Ingress default/test-ingress-tls does not specify an Ingress class.",
"Ingress uses the service default/test-service which does not exist.",
"Ingress uses the secret default/non-existent-secret as a TLS certificate which does not exist.",
},
},
{
name: "Multiple issues",
ingress: &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: "test-ingress-multi",
Namespace: "default",
},
Spec: networkingv1.IngressSpec{
TLS: []networkingv1.IngressTLS{
{
Hosts: []string{"example.com"},
SecretName: "non-existent-secret",
},
},
Rules: []networkingv1.IngressRule{
{
Host: "example.com",
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: "non-existent-service",
Port: networkingv1.ServiceBackendPort{
Number: 80,
},
},
},
},
},
},
},
},
),
},
},
Context: context.Background(),
Namespace: "default",
},
expectations: []struct {
name string
failuresCount int
}{
{
name: "default/multiple-issues",
failuresCount: 3, // Three failures: ingress class, service, and TLS secret
},
expectedIssues: []string{
"Ingress default/test-ingress-multi does not specify an Ingress class.",
"Ingress uses the service default/non-existent-service which does not exist.",
"Ingress uses the secret default/non-existent-secret as a TLS certificate which does not exist.",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// Run test cases
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Create a new context and clientset for each test case
ctx := context.Background()
clientset := fake.NewSimpleClientset()
// Create the ingress in the fake clientset
_, err := clientset.NetworkingV1().Ingresses(tc.ingress.Namespace).Create(ctx, tc.ingress, metav1.CreateOptions{})
assert.NoError(t, err)
// Create the analyzer configuration
config := common.Analyzer{
Client: &kubernetes.Client{
Client: clientset,
},
Context: ctx,
Namespace: tc.ingress.Namespace,
}
// Create the analyzer and run analysis
analyzer := IngressAnalyzer{}
results, err := analyzer.Analyze(tt.config)
require.NoError(t, err)
require.Len(t, results, len(tt.expectations))
results, err := analyzer.Analyze(config)
assert.NoError(t, err)
// Sort results by name for consistent comparison
sort.Slice(results, func(i, j int) bool {
return results[i].Name < results[j].Name
})
// Check that we got the expected number of issues
assert.Len(t, results, 1, "Expected 1 result")
result := results[0]
assert.Len(t, result.Error, len(tc.expectedIssues), "Expected %d issues, got %d", len(tc.expectedIssues), len(result.Error))
for i, expectation := range tt.expectations {
require.Equal(t, expectation.name, results[i].Name)
require.Len(t, results[i].Error, expectation.failuresCount)
// Check that each expected issue is present
for _, expectedIssue := range tc.expectedIssues {
found := false
for _, failure := range result.Error {
if failure.Text == expectedIssue {
found = true
break
}
}
assert.True(t, found, "Expected to find issue: %s", expectedIssue)
}
})
}

View File

@ -1,12 +1,15 @@
package cache
import (
rpc "buf.build/gen/go/interplex-ai/schemas/grpc/go/protobuf/schema/v1/schemav1grpc"
schemav1 "buf.build/gen/go/interplex-ai/schemas/protocolbuffers/go/protobuf/schema/v1"
"context"
"errors"
"google.golang.org/grpc"
"fmt"
"os"
rpc "buf.build/gen/go/interplex-ai/schemas/grpc/go/protobuf/schema/v1/schemav1grpc"
schemav1 "buf.build/gen/go/interplex-ai/schemas/protocolbuffers/go/protobuf/schema/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
var _ ICache = (*InterplexCache)(nil)
@ -59,6 +62,10 @@ func (c *InterplexCache) Store(key string, data string) error {
}
func (c *InterplexCache) Load(key string) (string, error) {
if os.Getenv("INTERPLEX_LOCAL_MODE") != "" {
c.configuration.ConnectionString = "localhost:8084"
}
conn, err := grpc.NewClient(c.configuration.ConnectionString, grpc.WithInsecure(), grpc.WithBlock())
defer conn.Close()
if err != nil {
@ -70,36 +77,52 @@ func (c *InterplexCache) Load(key string) (string, error) {
Key: key,
}
resp, err := c.cacheServiceClient.Get(context.Background(), &req)
// check if response is cache error not found
if err != nil {
return "", err
}
return resp.Value, nil
}
func (InterplexCache) List() ([]CacheObjectDetails, error) {
//TODO implement me
return nil, errors.New("not implemented")
func (c *InterplexCache) List() ([]CacheObjectDetails, error) {
// Not implemented for Interplex cache
return []CacheObjectDetails{}, nil
}
func (InterplexCache) Remove(key string) error {
func (c *InterplexCache) Remove(key string) error {
if os.Getenv("INTERPLEX_LOCAL_MODE") != "" {
c.configuration.ConnectionString = "localhost:8084"
}
return errors.New("not implemented")
conn, err := grpc.NewClient(c.configuration.ConnectionString, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
return err
}
defer func() {
if err := conn.Close(); err != nil {
// Log the error but don't return it since this is a deferred function
fmt.Printf("Error closing connection: %v\n", err)
}
}()
serviceClient := rpc.NewCacheServiceClient(conn)
c.cacheServiceClient = serviceClient
req := schemav1.DeleteRequest{
Key: key,
}
_, err = c.cacheServiceClient.Delete(context.Background(), &req)
return err
}
func (c *InterplexCache) Exists(key string) bool {
if _, err := c.Load(key); err != nil {
return false
}
return true
_, err := c.Load(key)
return err == nil
}
func (c *InterplexCache) IsCacheDisabled() bool {
return c.noCache
}
func (InterplexCache) GetName() string {
//TODO implement me
func (c *InterplexCache) GetName() string {
return "interplex"
}

View File

@ -1,30 +1,49 @@
# serve
# K8sGPT MCP Server
The serve commands allow you to run k8sgpt in a grpc server mode.
This would be enabled typically through `k8sgpt serve` and is how the in-cluster k8sgpt deployment functions when managed by the [k8sgpt-operator](https://github.com/k8sgpt-ai/k8sgpt-operator)
This directory contains the implementation of the Mission Control Protocol (MCP) server for K8sGPT. The MCP server allows K8sGPT to be integrated with other tools that support the MCP protocol.
The grpc interface that is served is hosted on [buf](https://buf.build/k8sgpt-ai/schemas) and the repository for this is [here](https://github.com/k8sgpt-ai/schemas)
## Components
## grpcurl
- `mcp.go`: The main MCP server implementation
- `server.go`: The HTTP server implementation
- `tools.go`: Tool definitions for the MCP server
A fantastic tool for local debugging and development is `grpcurl`
It allows you to form curl like requests that are http2
e.g.
## Features
```
grpcurl -plaintext -d '{"namespace": "k8sgpt", "explain" : "true"}' localhost:8080 schema.v1.ServiceAnalyzeService/Analyze
```
The MCP server provides the following features:
```
grpcurl -plaintext localhost:8080 schema.v1.ServiceConfigService/ListIntegrations
{
"integrations": [
"prometheus"
]
1. **Analyze Kubernetes Resources**: Analyze Kubernetes resources in a cluster
2. **Get Cluster Information**: Retrieve information about the Kubernetes cluster
## Usage
To use the MCP server, you need to:
1. Initialize the MCP server with a Kubernetes client
2. Start the server
3. Connect to the server using an MCP client
Example:
```go
client, err := kubernetes.NewForConfig(config)
if err != nil {
log.Fatalf("Failed to create Kubernetes client: %v", err)
}
mcpServer := server.NewMCPServer(client)
if err := mcpServer.Start(); err != nil {
log.Fatalf("Failed to start MCP server: %v", err)
}
```
```
grpcurl -plaintext -d '{"integrations":{"prometheus":{"enabled":"true","namespace":"default","skipInstall":"false"}}}' localhost:8080 schema.v1.ServiceConfigService/AddConfig
```
## Integration
The MCP server can be integrated with other tools that support the MCP protocol, such as:
- Mission Control
- Other MCP-compatible tools
## License
This code is licensed under the Apache License 2.0.

View File

@ -0,0 +1,60 @@
# K8sGPT MCP Client Example
This directory contains an example of how to use the K8sGPT MCP client in a real-world scenario.
## Prerequisites
- Go 1.16 or later
- Access to a Kubernetes cluster
- `kubectl` configured to access your cluster
## Building the Example
To build the example, run:
```bash
go build -o mcp-client-example
```
## Running the Example
To run the example, use the following command:
```bash
./mcp-client-example --kubeconfig=/path/to/kubeconfig --namespace=default
```
### Command-line Flags
- `--kubeconfig`: Path to the kubeconfig file (optional, defaults to the standard location)
- `--namespace`: Kubernetes namespace to analyze (optional)
## Example Output
When you run the example, you should see output similar to the following:
```
Starting MCP client...
```
The client will continue running until you press Ctrl+C to stop it.
## Integration with Mission Control
To integrate this example with Mission Control, you need to:
1. Start the MCP client using the example
2. Configure Mission Control to connect to the MCP client
3. Use Mission Control to analyze your Kubernetes cluster
## Troubleshooting
If you encounter any issues, check the following:
1. Ensure that your Kubernetes cluster is accessible
2. Verify that your kubeconfig file is valid
3. Check that the namespace you specified exists
## License
This code is licensed under the Apache License 2.0.

View File

@ -0,0 +1,114 @@
/*
Copyright 2024 The K8sGPT Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io"
"log"
"net/http"
"time"
)
// AnalyzeRequest represents the input parameters for the analyze tool
type AnalyzeRequest struct {
Namespace string `json:"namespace,omitempty"`
Backend string `json:"backend,omitempty"`
Language string `json:"language,omitempty"`
Filters []string `json:"filters,omitempty"`
LabelSelector string `json:"labelSelector,omitempty"`
NoCache bool `json:"noCache,omitempty"`
Explain bool `json:"explain,omitempty"`
MaxConcurrency int `json:"maxConcurrency,omitempty"`
WithDoc bool `json:"withDoc,omitempty"`
InteractiveMode bool `json:"interactiveMode,omitempty"`
CustomHeaders []string `json:"customHeaders,omitempty"`
WithStats bool `json:"withStats,omitempty"`
}
// AnalyzeResponse represents the output of the analyze tool
type AnalyzeResponse struct {
Content []struct {
Text string `json:"text"`
Type string `json:"type"`
} `json:"content"`
}
func main() {
// Parse command line flags
serverPort := flag.String("port", "8089", "Port of the MCP server")
namespace := flag.String("namespace", "", "Kubernetes namespace to analyze")
backend := flag.String("backend", "", "AI backend to use")
language := flag.String("language", "english", "Language for analysis")
flag.Parse()
// Create analyze request
req := AnalyzeRequest{
Namespace: *namespace,
Backend: *backend,
Language: *language,
Explain: true,
MaxConcurrency: 10,
}
// Convert request to JSON
reqJSON, err := json.Marshal(req)
if err != nil {
log.Fatalf("Failed to marshal request: %v", err)
}
// Create HTTP client with timeout
client := &http.Client{
Timeout: 5 * time.Minute,
}
// Send request to MCP server
resp, err := client.Post(
fmt.Sprintf("http://localhost:%s/mcp/analyze", *serverPort),
"application/json",
bytes.NewBuffer(reqJSON),
)
if err != nil {
log.Fatalf("Failed to send request: %v", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
log.Printf("Error closing response body: %v", err)
}
}()
// Read and print raw response for debugging
body, err := io.ReadAll(resp.Body)
if err != nil {
log.Fatalf("Failed to read response body: %v", err)
}
fmt.Printf("Raw response: %s\n", string(body))
// Parse response
var analyzeResp AnalyzeResponse
if err := json.Unmarshal(body, &analyzeResp); err != nil {
log.Fatalf("Failed to decode response: %v", err)
}
// Print results
fmt.Println("Analysis Results:")
if len(analyzeResp.Content) > 0 {
fmt.Println(analyzeResp.Content[0].Text)
} else {
fmt.Println("No results returned")
}
}

View File

@ -1,8 +1,9 @@
package config
import (
schemav1 "buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go/schema/v1"
"context"
schemav1 "buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go/schema/v1"
"github.com/k8sgpt-ai/k8sgpt/pkg/cache"
"github.com/k8sgpt-ai/k8sgpt/pkg/custom"
"github.com/spf13/viper"
@ -20,19 +21,13 @@ const (
notUsedInsecure = false
)
func (h *Handler) AddConfig(ctx context.Context, i *schemav1.AddConfigRequest) (*schemav1.AddConfigResponse, error,
) {
resp, err := h.syncIntegration(ctx, i)
if err != nil {
return resp, err
}
// ApplyConfig applies the configuration changes from the request
func (h *Handler) ApplyConfig(ctx context.Context, i *schemav1.AddConfigRequest) error {
if i.CustomAnalyzers != nil {
// We need to add the custom analyzers to the viper config and save them
var customAnalyzers = make([]custom.CustomAnalyzer, 0)
if err := viper.UnmarshalKey("custom_analyzers", &customAnalyzers); err != nil {
return resp, err
return err
} else {
// If there are analyzers are already in the config we will append the ones with new names
for _, ca := range i.CustomAnalyzers {
@ -56,7 +51,7 @@ func (h *Handler) AddConfig(ctx context.Context, i *schemav1.AddConfigRequest) (
// save the config
viper.Set("custom_analyzers", customAnalyzers)
if err := viper.WriteConfig(); err != nil {
return resp, err
return err
}
}
}
@ -74,18 +69,30 @@ func (h *Handler) AddConfig(ctx context.Context, i *schemav1.AddConfigRequest) (
case *schemav1.Cache_InterplexCache:
remoteCache, err = cache.NewCacheProvider("interplex", notUsedBucket, notUsedRegion, i.Cache.GetInterplexCache().Endpoint, notUsedStorageAcc, notUsedContainerName, notUsedProjectId, notUsedInsecure)
default:
return resp, status.Error(codes.InvalidArgument, "Invalid cache configuration")
return status.Error(codes.InvalidArgument, "Invalid cache configuration")
}
if err != nil {
return resp, err
return err
}
err = cache.AddRemoteCache(remoteCache)
if err != nil {
return resp, err
return err
}
}
return nil
}
func (h *Handler) AddConfig(ctx context.Context, i *schemav1.AddConfigRequest) (*schemav1.AddConfigResponse, error) {
resp, err := h.syncIntegration(ctx, i)
if err != nil {
return resp, err
}
if err := h.ApplyConfig(ctx, i); err != nil {
return resp, err
}
return resp, nil
}

View File

@ -1,18 +1,15 @@
package config
import (
schemav1 "buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go/schema/v1"
"context"
"fmt"
schemav1 "buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go/schema/v1"
"github.com/k8sgpt-ai/k8sgpt/pkg/integration"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
//const (
// trivyName = "trivy"
//)
// syncIntegration is aware of the following events
// A new integration added
// An integration removed from the Integration block

View File

@ -0,0 +1,74 @@
/*
Copyright 2024 The K8sGPT Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"log"
"os"
"os/signal"
"syscall"
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
"github.com/k8sgpt-ai/k8sgpt/pkg/server"
"go.uber.org/zap"
)
func main() {
// Parse command line flags
port := flag.String("port", "8089", "Port to run the MCP server on")
useHTTP := flag.Bool("http", false, "Enable HTTP mode for MCP server")
flag.Parse()
// Initialize zap logger
logger, err := zap.NewProduction()
if err != nil {
log.Fatalf("Error creating logger: %v", err)
}
defer func() {
if err := logger.Sync(); err != nil {
log.Printf("Error syncing logger: %v", err)
}
}()
// Create AI provider
aiProvider := &ai.AIProvider{
Name: "openai",
Password: os.Getenv("OPENAI_API_KEY"),
Model: "gpt-3.5-turbo",
}
// Create and start MCP server
mcpServer, err := server.NewMCPServer(*port, aiProvider, *useHTTP, logger)
if err != nil {
log.Fatalf("Error creating MCP server: %v", err)
}
// Start the server in a goroutine
go func() {
if err := mcpServer.Start(); err != nil {
log.Fatalf("Error starting MCP server: %v", err)
}
}()
// Handle graceful shutdown
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
<-sigChan
// Cleanup
if err := mcpServer.Close(); err != nil {
log.Printf("Error closing MCP server: %v", err)
}
}

416
pkg/server/mcp.go Normal file
View File

@ -0,0 +1,416 @@
/*
Copyright 2024 The K8sGPT Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"context"
"encoding/json"
"fmt"
"net/http"
schemav1 "buf.build/gen/go/k8sgpt-ai/k8sgpt/protocolbuffers/go/schema/v1"
"github.com/k8sgpt-ai/k8sgpt/pkg/ai"
"github.com/k8sgpt-ai/k8sgpt/pkg/analysis"
"github.com/k8sgpt-ai/k8sgpt/pkg/kubernetes"
"github.com/k8sgpt-ai/k8sgpt/pkg/server/config"
mcp_golang "github.com/metoro-io/mcp-golang"
"github.com/metoro-io/mcp-golang/transport/stdio"
"github.com/spf13/viper"
"go.uber.org/zap"
)
// MCPServer represents an MCP server for k8sgpt
type MCPServer struct {
server *mcp_golang.Server
port string
aiProvider *ai.AIProvider
useHTTP bool
logger *zap.Logger
}
// NewMCPServer creates a new MCP server
func NewMCPServer(port string, aiProvider *ai.AIProvider, useHTTP bool, logger *zap.Logger) (*MCPServer, error) {
// Create MCP server with stdio transport
transport := stdio.NewStdioServerTransport()
server := mcp_golang.NewServer(transport)
return &MCPServer{
server: server,
port: port,
aiProvider: aiProvider,
useHTTP: useHTTP,
logger: logger,
}, nil
}
// Start starts the MCP server
func (s *MCPServer) Start() error {
if s.server == nil {
return fmt.Errorf("server not initialized")
}
// Register analyze tool
if err := s.server.RegisterTool("analyze", "Analyze Kubernetes resources", s.handleAnalyze); err != nil {
return fmt.Errorf("failed to register analyze tool: %v", err)
}
// Register cluster info tool
if err := s.server.RegisterTool("cluster-info", "Get Kubernetes cluster information", s.handleClusterInfo); err != nil {
return fmt.Errorf("failed to register cluster-info tool: %v", err)
}
// Register config tool
if err := s.server.RegisterTool("config", "Configure K8sGPT settings", s.handleConfig); err != nil {
return fmt.Errorf("failed to register config tool: %v", err)
}
// Register resources
if err := s.registerResources(); err != nil {
return fmt.Errorf("failed to register resources: %v", err)
}
// Register prompts
if err := s.registerPrompts(); err != nil {
return fmt.Errorf("failed to register prompts: %v", err)
}
if s.useHTTP {
// Start HTTP server
go func() {
http.HandleFunc("/mcp/analyze", s.handleAnalyzeHTTP)
http.HandleFunc("/mcp", s.handleSSE)
s.logger.Info("Starting MCP server on port", zap.String("port", s.port))
if err := http.ListenAndServe(fmt.Sprintf(":%s", s.port), nil); err != nil {
s.logger.Error("Error starting HTTP server", zap.Error(err))
}
}()
}
// Start the server
return s.server.Serve()
}
// AnalyzeRequest represents the input parameters for the analyze tool
type AnalyzeRequest struct {
Namespace string `json:"namespace,omitempty"`
Backend string `json:"backend,omitempty"`
Language string `json:"language,omitempty"`
Filters []string `json:"filters,omitempty"`
LabelSelector string `json:"labelSelector,omitempty"`
NoCache bool `json:"noCache,omitempty"`
Explain bool `json:"explain,omitempty"`
MaxConcurrency int `json:"maxConcurrency,omitempty"`
WithDoc bool `json:"withDoc,omitempty"`
InteractiveMode bool `json:"interactiveMode,omitempty"`
CustomHeaders []string `json:"customHeaders,omitempty"`
WithStats bool `json:"withStats,omitempty"`
}
// AnalyzeResponse represents the output of the analyze tool
type AnalyzeResponse struct {
Results string `json:"results"`
}
// ClusterInfoRequest represents the input parameters for the cluster-info tool
type ClusterInfoRequest struct {
// Empty struct as we don't need any input parameters
}
// ClusterInfoResponse represents the output of the cluster-info tool
type ClusterInfoResponse struct {
Info string `json:"info"`
}
// ConfigRequest represents the input parameters for the config tool
type ConfigRequest struct {
CustomAnalyzers []struct {
Name string `json:"name"`
Connection struct {
Url string `json:"url"`
Port int `json:"port"`
} `json:"connection"`
} `json:"customAnalyzers,omitempty"`
Cache struct {
Type string `json:"type"`
// S3 specific fields
BucketName string `json:"bucketName,omitempty"`
Region string `json:"region,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
Insecure bool `json:"insecure,omitempty"`
// Azure specific fields
StorageAccount string `json:"storageAccount,omitempty"`
ContainerName string `json:"containerName,omitempty"`
// GCS specific fields
ProjectId string `json:"projectId,omitempty"`
} `json:"cache,omitempty"`
}
// ConfigResponse represents the output of the config tool
type ConfigResponse struct {
Status string `json:"status"`
}
// handleAnalyze handles the analyze tool
func (s *MCPServer) handleAnalyze(ctx context.Context, request *AnalyzeRequest) (*mcp_golang.ToolResponse, error) {
// Get stored configuration
var configAI ai.AIConfiguration
if err := viper.UnmarshalKey("ai", &configAI); err != nil {
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(fmt.Sprintf("Failed to load AI configuration: %v", err))), nil
}
// Use stored configuration if not specified in request
if request.Backend == "" {
if configAI.DefaultProvider != "" {
request.Backend = configAI.DefaultProvider
} else if len(configAI.Providers) > 0 {
request.Backend = configAI.Providers[0].Name
} else {
request.Backend = "openai" // fallback default
}
}
request.Explain = true
// Get stored filters if not specified
if len(request.Filters) == 0 {
request.Filters = viper.GetStringSlice("active_filters")
}
// Validate MaxConcurrency to prevent excessive memory allocation
request.MaxConcurrency = validateMaxConcurrency(request.MaxConcurrency)
// Create a new analysis with the request parameters
analysis, err := analysis.NewAnalysis(
request.Backend,
request.Language,
request.Filters,
request.Namespace,
request.LabelSelector,
request.NoCache,
request.Explain,
request.MaxConcurrency,
request.WithDoc,
request.InteractiveMode,
request.CustomHeaders,
request.WithStats,
)
if err != nil {
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(fmt.Sprintf("Failed to create analysis: %v", err))), nil
}
defer analysis.Close()
// Run the analysis
analysis.RunAnalysis()
// Get the output
output, err := analysis.PrintOutput("json")
if err != nil {
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(fmt.Sprintf("Failed to print output: %v", err))), nil
}
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(string(output))), nil
}
// validateMaxConcurrency validates and bounds the MaxConcurrency parameter
func validateMaxConcurrency(maxConcurrency int) int {
const maxAllowedConcurrency = 100
if maxConcurrency <= 0 {
return 10 // Default value if not set
} else if maxConcurrency > maxAllowedConcurrency {
return maxAllowedConcurrency // Cap at a reasonable maximum
}
return maxConcurrency
}
// handleClusterInfo handles the cluster-info tool
func (s *MCPServer) handleClusterInfo(ctx context.Context, request *ClusterInfoRequest) (*mcp_golang.ToolResponse, error) {
// Create a new Kubernetes client
client, err := kubernetes.NewClient("", "")
if err != nil {
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(fmt.Sprintf("failed to create Kubernetes client: %v", err))), nil
}
// Get cluster info from the client
version, err := client.Client.Discovery().ServerVersion()
if err != nil {
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(fmt.Sprintf("failed to get cluster version: %v", err))), nil
}
info := fmt.Sprintf("Kubernetes %s", version.GitVersion)
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(info)), nil
}
// handleConfig handles the config tool
func (s *MCPServer) handleConfig(ctx context.Context, request *ConfigRequest) (*mcp_golang.ToolResponse, error) {
// Create a new config handler
handler := &config.Handler{}
// Convert request to AddConfigRequest
addConfigReq := &schemav1.AddConfigRequest{
CustomAnalyzers: make([]*schemav1.CustomAnalyzer, 0),
}
// Add custom analyzers if present
if len(request.CustomAnalyzers) > 0 {
for _, ca := range request.CustomAnalyzers {
addConfigReq.CustomAnalyzers = append(addConfigReq.CustomAnalyzers, &schemav1.CustomAnalyzer{
Name: ca.Name,
Connection: &schemav1.Connection{
Url: ca.Connection.Url,
Port: fmt.Sprintf("%d", ca.Connection.Port),
},
})
}
}
// Add cache configuration if present
if request.Cache.Type != "" {
cacheConfig := &schemav1.Cache{}
switch request.Cache.Type {
case "s3":
cacheConfig.CacheType = &schemav1.Cache_S3Cache{
S3Cache: &schemav1.S3Cache{
BucketName: request.Cache.BucketName,
Region: request.Cache.Region,
Endpoint: request.Cache.Endpoint,
Insecure: request.Cache.Insecure,
},
}
case "azure":
cacheConfig.CacheType = &schemav1.Cache_AzureCache{
AzureCache: &schemav1.AzureCache{
StorageAccount: request.Cache.StorageAccount,
ContainerName: request.Cache.ContainerName,
},
}
case "gcs":
cacheConfig.CacheType = &schemav1.Cache_GcsCache{
GcsCache: &schemav1.GCSCache{
BucketName: request.Cache.BucketName,
Region: request.Cache.Region,
ProjectId: request.Cache.ProjectId,
},
}
}
addConfigReq.Cache = cacheConfig
}
// Apply the configuration using the shared function
if err := handler.ApplyConfig(ctx, addConfigReq); err != nil {
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent(fmt.Sprintf("Failed to add config: %v", err))), nil
}
return mcp_golang.NewToolResponse(mcp_golang.NewTextContent("Successfully added configuration")), nil
}
// registerPrompts registers the prompts for the MCP server
func (s *MCPServer) registerPrompts() error {
// Register any prompts needed for the MCP server
return nil
}
// registerResources registers the resources for the MCP server
func (s *MCPServer) registerResources() error {
if err := s.server.RegisterResource("cluster-info", "Get cluster information", "Get information about the Kubernetes cluster", "text", s.getClusterInfo); err != nil {
return fmt.Errorf("failed to register cluster-info resource: %v", err)
}
return nil
}
func (s *MCPServer) getClusterInfo(ctx context.Context) (interface{}, error) {
// Create a new Kubernetes client
client, err := kubernetes.NewClient("", "")
if err != nil {
return nil, fmt.Errorf("failed to create Kubernetes client: %v", err)
}
// Get cluster info from the client
version, err := client.Client.Discovery().ServerVersion()
if err != nil {
return nil, fmt.Errorf("failed to get cluster version: %v", err)
}
return map[string]string{
"version": version.String(),
"platform": version.Platform,
"gitVersion": version.GitVersion,
}, nil
}
// handleSSE handles Server-Sent Events for MCP
func (s *MCPServer) handleSSE(w http.ResponseWriter, r *http.Request) {
// Set headers for SSE
w.Header().Set("Content-Type", "text/event-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Connection", "keep-alive")
w.Header().Set("Access-Control-Allow-Origin", "*")
// Create a channel to receive messages
msgChan := make(chan string)
defer close(msgChan)
// Start a goroutine to handle the stdio transport
go func() {
// TODO: Implement message handling between HTTP and stdio transport
// This would require implementing a custom transport that bridges HTTP and stdio
}()
// Send messages to the client
for msg := range msgChan {
if _, err := fmt.Fprintf(w, "data: %s\n\n", msg); err != nil {
s.logger.Error("Failed to write SSE message", zap.Error(err))
return
}
w.(http.Flusher).Flush()
}
}
// handleAnalyzeHTTP handles HTTP requests for the analyze endpoint
func (s *MCPServer) handleAnalyzeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
// Parse the request body
var req AnalyzeRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, fmt.Sprintf("Failed to decode request: %v", err), http.StatusBadRequest)
return
}
// Validate MaxConcurrency to prevent excessive memory allocation
req.MaxConcurrency = validateMaxConcurrency(req.MaxConcurrency)
// Call the analyze handler
resp, err := s.handleAnalyze(r.Context(), &req)
if err != nil {
http.Error(w, fmt.Sprintf("Failed to analyze: %v", err), http.StatusInternalServerError)
return
}
// Set response headers
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
// Write the response
if err := json.NewEncoder(w).Encode(resp); err != nil {
s.logger.Error("Failed to encode response", zap.Error(err))
}
}
// Close closes the MCP server and releases resources
func (s *MCPServer) Close() error {
return nil
}