feat: update helm charts with mcp support and fix Google ADA issue (#1568)

* migrated to more actively maintained mcp golang lib and added AI explain support for mcp mode

Signed-off-by: Umesh Kaul <umeshkaul@gmail.com>

* added a makefile option to create local docker image for testing

Signed-off-by: Umesh Kaul <umeshkaul@gmail.com>

* fixed linter errors and made anonymize as an arg

Signed-off-by: Umesh Kaul <umeshkaul@gmail.com>

* added mcp support for helm chart and fixed google adk support issue

Signed-off-by: Umesh Kaul <umeshkaul@gmail.com>

---------

Signed-off-by: Umesh Kaul <umeshkaul@gmail.com>
Co-authored-by: Alex Jones <1235925+AlexsJones@users.noreply.github.com>
This commit is contained in:
Umesh Kaul
2025-08-18 14:33:12 -04:00
committed by GitHub
parent 7e332761d8
commit 53345895de
7 changed files with 79 additions and 2 deletions

View File

@@ -399,6 +399,26 @@ _Serve mode_
k8sgpt serve k8sgpt serve
``` ```
_Serve mode with MCP (Model Context Protocol)_
```
# Enable MCP server on default port 8089
k8sgpt serve --mcp --mcp-http
# Enable MCP server on custom port
k8sgpt serve --mcp --mcp-http --mcp-port 8089
# Full serve mode with MCP
k8sgpt serve --mcp --mcp-http --port 8080 --metrics-port 8081 --mcp-port 8089
```
The MCP server enables integration with tools like Claude Desktop and other MCP-compatible clients. It runs on port 8089 by default and provides:
- Kubernetes cluster analysis via MCP protocol
- Resource information and health status
- AI-powered issue explanations and recommendations
For Helm chart deployment with MCP support, see the `charts/k8sgpt/values-mcp-example.yaml` file.
_Analysis with serve mode_ _Analysis with serve mode_
``` ```

View File

@@ -1,5 +1,5 @@
apiVersion: v2 apiVersion: v2
appVersion: v0.3.0 #x-release-please-version appVersion: v0.4.23 #x-release-please-version
description: A Helm chart for K8SGPT description: A Helm chart for K8SGPT
name: k8sgpt name: k8sgpt
type: application type: application

View File

@@ -32,7 +32,13 @@ spec:
image: {{ .Values.deployment.image.repository }}:{{ .Values.deployment.image.tag | default .Chart.AppVersion }} image: {{ .Values.deployment.image.repository }}:{{ .Values.deployment.image.tag | default .Chart.AppVersion }}
ports: ports:
- containerPort: 8080 - containerPort: 8080
args: ["serve"] {{- if .Values.deployment.mcp.enabled }}
- containerPort: {{ .Values.deployment.mcp.port | int }}
{{- end }}
args: ["serve"
{{- if .Values.deployment.mcp.enabled }}, "--mcp", "-v","--mcp-http", "--mcp-port", {{ .Values.deployment.mcp.port | quote }}
{{- end }}
]
{{- if .Values.deployment.resources }} {{- if .Values.deployment.resources }}
resources: resources:
{{- toYaml .Values.deployment.resources | nindent 10 }} {{- toYaml .Values.deployment.resources | nindent 10 }}

View File

@@ -19,4 +19,9 @@ spec:
- name: metrics - name: metrics
port: 8081 port: 8081
targetPort: 8081 targetPort: 8081
{{- if .Values.deployment.mcp.enabled }}
- name: mcp
port: {{ .Values.deployment.mcp.port | int }}
targetPort: {{ .Values.deployment.mcp.port | int }}
{{- end }}
type: {{ .Values.service.type }} type: {{ .Values.service.type }}

View File

@@ -0,0 +1,39 @@
# Example values file to enable MCP (Model Context Protocol) service
# Copy this file and modify as needed, then use: helm install -f values-mcp-example.yaml
deployment:
# Enable MCP server
mcp:
enabled: true
port: "8089" # Port for MCP server (default: 8089)
http: true # Enable HTTP mode for MCP server
# Other deployment settings remain the same
image:
repository: ghcr.io/k8sgpt-ai/k8sgpt
tag: "" # defaults to Chart.appVersion if unspecified
imagePullPolicy: Always
env:
model: "gpt-3.5-turbo"
backend: "openai"
resources:
limits:
cpu: "1"
memory: "512Mi"
requests:
cpu: "0.2"
memory: "156Mi"
# Service configuration
service:
type: ClusterIP
annotations: {}
# Secret configuration for AI backend
secret:
secretKey: "" # base64 encoded OpenAI token
# ServiceMonitor for Prometheus metrics
serviceMonitor:
enabled: false
additionalLabels: {}

View File

@@ -7,6 +7,11 @@ deployment:
env: env:
model: "gpt-3.5-turbo" model: "gpt-3.5-turbo"
backend: "openai" # one of: [ openai | llama ] backend: "openai" # one of: [ openai | llama ]
# MCP (Model Context Protocol) server configuration
mcp:
enabled: false # Enable MCP server
port: "8089" # Port for MCP server
http: true # Enable HTTP mode for MCP server
resources: resources:
limits: limits:
cpu: "1" cpu: "1"

View File

@@ -141,6 +141,8 @@ func (s *K8sGptMCPServer) registerToolsAndResources() error {
), ),
mcp.WithArray("filters", mcp.WithArray("filters",
mcp.Description("Provide filters to narrow down the analysis (e.g. ['Pods', 'Deployments'])"), mcp.Description("Provide filters to narrow down the analysis (e.g. ['Pods', 'Deployments'])"),
// without below line MCP server fails with Google Agent Development Kit (ADK), interestingly works fine with mcpinspector
mcp.WithStringItems(),
), ),
) )
s.server.AddTool(analyzeTool, s.handleAnalyze) s.server.AddTool(analyzeTool, s.handleAnalyze)