mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-03-10 14:42:19 +00:00
Compare commits
31 Commits
v52.10.0
...
mcp-unit-t
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fe2716aed6 | ||
|
|
2ccd716a68 | ||
|
|
a190de8f60 | ||
|
|
0bbbb473ea | ||
|
|
999299006f | ||
|
|
4c23290c29 | ||
|
|
b774d333e9 | ||
|
|
1471cb2365 | ||
|
|
9a6c81620a | ||
|
|
6d46d38701 | ||
|
|
d012ea89b6 | ||
|
|
0f1c9c52ea | ||
|
|
f3a0d35485 | ||
|
|
d6631e8565 | ||
|
|
1669680d10 | ||
|
|
19389fcba7 | ||
|
|
1b027153e3 | ||
|
|
77d16e73e8 | ||
|
|
a73c904a9b | ||
|
|
8f3f136be6 | ||
|
|
897aa44965 | ||
|
|
57093e8a25 | ||
|
|
1fd9dffc60 | ||
|
|
3b315eb89f | ||
|
|
6645e23704 | ||
|
|
b7190162ec | ||
|
|
9570b2e317 | ||
|
|
b98113a2b5 | ||
|
|
9724a0c279 | ||
|
|
47ac96a71b | ||
|
|
4dea643781 |
16
.github/workflows/release.yml
vendored
16
.github/workflows/release.yml
vendored
@@ -50,18 +50,4 @@ jobs:
|
||||
artifacts: "bin/*"
|
||||
tag: ${{ steps.version.outputs.tag }}
|
||||
prerelease: false
|
||||
bodyFile: 'bin/README.md'
|
||||
|
||||
brew:
|
||||
name: Publish a new Homebrew formulae
|
||||
needs: [release]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Bump core homebrew formula
|
||||
uses: mislav/bump-homebrew-formula-action@v3
|
||||
with:
|
||||
# A PR will be sent to github.com/Homebrew/homebrew-core to update this formula:
|
||||
formula-name: kubeshark
|
||||
push-to: kubeshark/homebrew-core
|
||||
env:
|
||||
COMMITTER_TOKEN: ${{ secrets.COMMITTER_TOKEN }}
|
||||
bodyFile: 'bin/README.md'
|
||||
@@ -7,7 +7,7 @@ Please read and follow the guidelines below.
|
||||
|
||||
## Communication
|
||||
|
||||
* Before starting work on a major feature, please reach out to us via [GitHub](https://github.com/kubeshark/kubeshark), [Discord](https://discord.gg/WkvRGMUcx7), [Slack](https://join.slack.com/t/kubeshark/shared_invite/zt-1k3sybpq9-uAhFkuPJiJftKniqrGHGhg), [email](mailto:info@kubeshark.co), etc. We will make sure no one else is already working on it. A _major feature_ is defined as any change that is > 100 LOC altered (not including tests), or changes any user-facing behavior
|
||||
* Before starting work on a major feature, please reach out to us via [GitHub](https://github.com/kubeshark/kubeshark), [Discord](https://discord.gg/WkvRGMUcx7), [Slack](https://join.slack.com/t/kubeshark/shared_invite/zt-1k3sybpq9-uAhFkuPJiJftKniqrGHGhg), [email](mailto:info@kubeshark.com), etc. We will make sure no one else is already working on it. A _major feature_ is defined as any change that is > 100 LOC altered (not including tests), or changes any user-facing behavior
|
||||
* Small patches and bug fixes don't need prior communication.
|
||||
|
||||
## Contribution Requirements
|
||||
|
||||
78
Makefile
78
Makefile
@@ -74,6 +74,69 @@ clean: ## Clean all build artifacts.
|
||||
test: ## Run cli tests.
|
||||
@go test ./... -coverpkg=./... -race -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-integration: ## Run integration tests (requires Kubernetes cluster).
|
||||
@echo "Running integration tests..."
|
||||
@LOG_FILE=$$(mktemp /tmp/integration-test.XXXXXX.log); \
|
||||
go test -tags=integration -timeout $${INTEGRATION_TIMEOUT:-5m} -v ./integration/... 2>&1 | tee $$LOG_FILE; \
|
||||
status=$$?; \
|
||||
echo ""; \
|
||||
echo "========================================"; \
|
||||
echo " INTEGRATION TEST SUMMARY"; \
|
||||
echo "========================================"; \
|
||||
grep -E "^(--- PASS|--- FAIL|--- SKIP)" $$LOG_FILE || true; \
|
||||
echo "----------------------------------------"; \
|
||||
pass=$$(grep -c "^--- PASS" $$LOG_FILE 2>/dev/null || true); \
|
||||
fail=$$(grep -c "^--- FAIL" $$LOG_FILE 2>/dev/null || true); \
|
||||
skip=$$(grep -c "^--- SKIP" $$LOG_FILE 2>/dev/null || true); \
|
||||
echo "PASSED: $${pass:-0}"; \
|
||||
echo "FAILED: $${fail:-0}"; \
|
||||
echo "SKIPPED: $${skip:-0}"; \
|
||||
echo "========================================"; \
|
||||
rm -f $$LOG_FILE; \
|
||||
exit $$status
|
||||
|
||||
test-integration-mcp: ## Run only MCP integration tests.
|
||||
@echo "Running MCP integration tests..."
|
||||
@LOG_FILE=$$(mktemp /tmp/integration-test.XXXXXX.log); \
|
||||
go test -tags=integration -timeout $${INTEGRATION_TIMEOUT:-5m} -v ./integration/ -run "MCP" 2>&1 | tee $$LOG_FILE; \
|
||||
status=$$?; \
|
||||
echo ""; \
|
||||
echo "========================================"; \
|
||||
echo " INTEGRATION TEST SUMMARY"; \
|
||||
echo "========================================"; \
|
||||
grep -E "^(--- PASS|--- FAIL|--- SKIP)" $$LOG_FILE || true; \
|
||||
echo "----------------------------------------"; \
|
||||
pass=$$(grep -c "^--- PASS" $$LOG_FILE 2>/dev/null || true); \
|
||||
fail=$$(grep -c "^--- FAIL" $$LOG_FILE 2>/dev/null || true); \
|
||||
skip=$$(grep -c "^--- SKIP" $$LOG_FILE 2>/dev/null || true); \
|
||||
echo "PASSED: $${pass:-0}"; \
|
||||
echo "FAILED: $${fail:-0}"; \
|
||||
echo "SKIPPED: $${skip:-0}"; \
|
||||
echo "========================================"; \
|
||||
rm -f $$LOG_FILE; \
|
||||
exit $$status
|
||||
|
||||
test-integration-short: ## Run quick integration tests (skips long-running tests).
|
||||
@echo "Running quick integration tests..."
|
||||
@LOG_FILE=$$(mktemp /tmp/integration-test.XXXXXX.log); \
|
||||
go test -tags=integration -timeout $${INTEGRATION_TIMEOUT:-2m} -short -v ./integration/... 2>&1 | tee $$LOG_FILE; \
|
||||
status=$$?; \
|
||||
echo ""; \
|
||||
echo "========================================"; \
|
||||
echo " INTEGRATION TEST SUMMARY"; \
|
||||
echo "========================================"; \
|
||||
grep -E "^(--- PASS|--- FAIL|--- SKIP)" $$LOG_FILE || true; \
|
||||
echo "----------------------------------------"; \
|
||||
pass=$$(grep -c "^--- PASS" $$LOG_FILE 2>/dev/null || true); \
|
||||
fail=$$(grep -c "^--- FAIL" $$LOG_FILE 2>/dev/null || true); \
|
||||
skip=$$(grep -c "^--- SKIP" $$LOG_FILE 2>/dev/null || true); \
|
||||
echo "PASSED: $${pass:-0}"; \
|
||||
echo "FAILED: $${fail:-0}"; \
|
||||
echo "SKIPPED: $${skip:-0}"; \
|
||||
echo "========================================"; \
|
||||
rm -f $$LOG_FILE; \
|
||||
exit $$status
|
||||
|
||||
lint: ## Lint the source code.
|
||||
golangci-lint run
|
||||
|
||||
@@ -84,8 +147,10 @@ kubectl-view-kubeshark-resources: ## This command outputs all Kubernetes resourc
|
||||
./kubectl.sh view-kubeshark-resources
|
||||
|
||||
generate-helm-values: ## Generate the Helm values from config.yaml
|
||||
mv ~/.kubeshark/config.yaml ~/.kubeshark/config.yaml.old; bin/kubeshark__ config>helm-chart/values.yaml;mv ~/.kubeshark/config.yaml.old ~/.kubeshark/config.yaml
|
||||
sed -i 's/^license:.*/license: ""/' helm-chart/values.yaml && sed -i '1i # find a detailed description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md' helm-chart/values.yaml
|
||||
# [ -f ~/.kubeshark/config.yaml ] && mv ~/.kubeshark/config.yaml ~/.kubeshark/config.yaml.old
|
||||
bin/kubeshark__ config>helm-chart/values.yaml
|
||||
# [ -f ~/.kubeshark/config.yaml.old ] && mv ~/.kubeshark/config.yaml.old ~/.kubeshark/config.yaml
|
||||
# sed -i 's/^license:.*/license: ""/' helm-chart/values.yaml && sed -i '1i # find a detailed description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md' helm-chart/values.yaml
|
||||
|
||||
generate-manifests: ## Generate the manifests from the Helm chart using default configuration
|
||||
helm template kubeshark -n default ./helm-chart > ./manifests/complete.yaml
|
||||
@@ -189,8 +254,8 @@ release:
|
||||
@make generate-helm-values && make generate-manifests
|
||||
@git add -A . && git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)" && git push
|
||||
@git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd helm-chart && rm -rf ../../kubeshark.github.io/charts/chart && mkdir ../../kubeshark.github.io/charts/chart && cp -r . ../../kubeshark.github.io/charts/chart/
|
||||
@cd ../../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push
|
||||
@rm -rf ../kubeshark.github.io/charts/chart && mkdir ../kubeshark.github.io/charts/chart && cp -r helm-chart/ ../kubeshark.github.io/charts/chart/
|
||||
@cd ../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push
|
||||
@cd ../kubeshark
|
||||
|
||||
release-dry-run:
|
||||
@@ -198,11 +263,14 @@ release-dry-run:
|
||||
@cd ../tracer && git checkout master && git pull
|
||||
@cd ../hub && git checkout master && git pull
|
||||
@cd ../front && git checkout master && git pull
|
||||
@cd ../kubeshark && git checkout master && git pull && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make
|
||||
@cd ../kubeshark && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make
|
||||
@if [ "$(shell uname)" = "Darwin" ]; then \
|
||||
codesign --sign - --force --preserve-metadata=entitlements,requirements,flags,runtime ./bin/kubeshark__; \
|
||||
fi
|
||||
@make generate-helm-values && make generate-manifests
|
||||
@rm -rf ../kubeshark.github.io/charts/chart && mkdir ../kubeshark.github.io/charts/chart && cp -r helm-chart/ ../kubeshark.github.io/charts/chart/
|
||||
@cd ../kubeshark.github.io/
|
||||
@cd ../kubeshark
|
||||
|
||||
branch:
|
||||
@cd ../worker && git checkout master && git pull && git checkout -b $(name); git push --set-upstream origin $(name)
|
||||
|
||||
28
README.md
28
README.md
@@ -21,26 +21,40 @@
|
||||
</p>
|
||||
<p align="center">
|
||||
<b>
|
||||
We're currently experiencing issues and our team is working around the clock to resolve them as quickly as possible. We'll keep you updated once everything is back to normal. In the meantime, feel free to reach out to us on <a href="https://join.slack.com/t/kubeshark/shared_invite/zt-3jdcdgxdv-1qNkhBh9c6CFoE7bSPkpBQ">Slack</a> or email us at <a href="mailto:support@kubehq.io">support@kubehq.io</a>.
|
||||
Want to see Kubeshark in action right now? Visit this
|
||||
<a href="https://demo.kubeshark.com/">live demo deployment</a> of Kubeshark.
|
||||
</b>
|
||||
</p>
|
||||
|
||||
**Kubeshark** is a network observability platform for Kubernetes, providing real-time, cluster-wide visibility into Kubernetes’ network. It enables users to inspect all internal and external cluster communications, API calls, and data in transit. Additionally, Kubeshark detects anomalies and emergent behaviors, trigger autonomous remediations, and generate deep network insights.
|
||||
**Kubeshark** is an API traffic analyzer for Kubernetes, providing deep packet inspection with complete API and Kubernetes contexts, retaining cluster-wide L4 traffic (PCAP), and using minimal production compute resources.
|
||||
|
||||

|
||||
|
||||
Think [TCPDump](https://en.wikipedia.org/wiki/Tcpdump) and [Wireshark](https://www.wireshark.org/) reimagined for Kubernetes.
|
||||
|
||||
Access cluster-wide PCAP traffic by pressing a single button, without the need to install `tcpdump` or manually copy files. Understand the traffic context in relation to the API and Kubernetes contexts.
|
||||
|
||||
#### Service-Map w/Kubernetes Context
|
||||
|
||||

|
||||
|
||||
#### Cluster-Wide PCAP Recording
|
||||
#### Export Cluster-Wide L4 Traffic (PCAP)
|
||||
|
||||

|
||||
Imagine having a cluster-wide [TCPDump](https://www.tcpdump.org/)-like capability—exporting a single [PCAP](https://www.ietf.org/archive/id/draft-gharris-opsawg-pcap-01.html) file that consolidates traffic from multiple nodes, all accessible with a single click.
|
||||
|
||||
1. Go to the **Snapshots** tab
|
||||
2. Create a new snapshot
|
||||
3. **Optionally** select the nodes (default: all nodes)
|
||||
4. **Optionally** select the time frame (default: last one hour)
|
||||
5. Press **Create**
|
||||
|
||||
<img width="3342" height="1206" alt="image" src="https://github.com/user-attachments/assets/e8e47996-52b7-4028-9698-f059a13ffdb7" />
|
||||
|
||||
|
||||
Once the snapshot is ready, click the PCAP file to export its contents and open it in Wireshark.
|
||||
|
||||
## Getting Started
|
||||
Download **Kubeshark**'s binary distribution [latest release](https://github.com/kubeshark/kubeshark/releases/latest) or use one of the following methods to deploy **Kubeshark**. The [web-based dashboard](https://docs.kubehq.com/en/ui) should open in your browser, showing a real-time view of your cluster's traffic.
|
||||
Download **Kubeshark**'s binary distribution [latest release](https://github.com/kubeshark/kubeshark/releases/latest) or use one of the following methods to deploy **Kubeshark**. The [web-based dashboard](https://docs.kubeshark.com/en/ui) should open in your browser, showing a real-time view of your cluster's traffic.
|
||||
|
||||
### Homebrew
|
||||
|
||||
@@ -61,7 +75,7 @@ kubeshark clean
|
||||
Add the Helm repository and install the chart:
|
||||
|
||||
```shell
|
||||
helm repo add kubeshark https://helm.kubehq.com
|
||||
helm repo add kubeshark https://helm.kubeshark.com
|
||||
helm install kubeshark kubeshark/kubeshark
|
||||
```
|
||||
Follow the on-screen instructions how to connect to the dashboard.
|
||||
@@ -77,7 +91,7 @@ Clone this repository and run the `make` command to build it. After the build is
|
||||
|
||||
## Documentation
|
||||
|
||||
To learn more, read the [documentation](https://docs.kubehq.com).
|
||||
To learn more, read the [documentation](https://docs.kubeshark.com).
|
||||
|
||||
## Contributing
|
||||
|
||||
|
||||
125
cmd/mcp.go
Normal file
125
cmd/mcp.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var mcpURL string
|
||||
var mcpKubeconfig string
|
||||
var mcpListTools bool
|
||||
var mcpConfig bool
|
||||
var mcpAllowDestructive bool
|
||||
|
||||
var mcpCmd = &cobra.Command{
|
||||
Use: "mcp",
|
||||
Short: "Run MCP (Model Context Protocol) server for AI assistant integration",
|
||||
Long: `Run an MCP server over stdio that exposes Kubeshark's L7 API visibility
|
||||
to AI assistants like Claude Desktop.
|
||||
|
||||
TOOLS PROVIDED:
|
||||
|
||||
Cluster Management (work without Kubeshark running):
|
||||
- check_kubeshark_status: Check if Kubeshark is running in the cluster
|
||||
- start_kubeshark: Start Kubeshark to capture traffic
|
||||
- stop_kubeshark: Stop Kubeshark and clean up resources
|
||||
|
||||
Traffic Analysis (require Kubeshark running):
|
||||
- list_workloads: Discover pods, services, namespaces, and nodes with L7 traffic
|
||||
- list_api_calls: Query L7 API transactions (HTTP, gRPC, etc.)
|
||||
- get_api_call: Get detailed information about a specific API call
|
||||
- get_api_stats: Get aggregated API statistics
|
||||
|
||||
CONFIGURATION:
|
||||
|
||||
To use with Claude Desktop, add to your claude_desktop_config.json
|
||||
(typically at ~/Library/Application Support/Claude/claude_desktop_config.json):
|
||||
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "/path/to/kubeshark",
|
||||
"args": ["mcp", "--kubeconfig", "/Users/YOUR_USERNAME/.kube/config"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DIRECT URL MODE:
|
||||
|
||||
If Kubeshark is already running and accessible via URL (e.g., exposed via ingress),
|
||||
you can connect directly without needing kubectl/kubeconfig:
|
||||
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "/path/to/kubeshark",
|
||||
"args": ["mcp", "--url", "https://kubeshark.example.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
In URL mode, destructive tools (start/stop) are disabled since Kubeshark is
|
||||
managed externally. The check_kubeshark_status tool remains available to confirm connectivity.
|
||||
|
||||
DESTRUCTIVE OPERATIONS:
|
||||
|
||||
By default, destructive operations (start_kubeshark, stop_kubeshark) are disabled
|
||||
to prevent accidental cluster modifications. To enable them, use --allow-destructive:
|
||||
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "/path/to/kubeshark",
|
||||
"args": ["mcp", "--allow-destructive", "--kubeconfig", "/path/to/.kube/config"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CUSTOM SETTINGS:
|
||||
|
||||
To use custom settings when starting Kubeshark, use the --set flag:
|
||||
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "/path/to/kubeshark",
|
||||
"args": ["mcp", "--set", "tap.docker.tag=v52.3"],
|
||||
...
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Multiple --set flags can be used for different settings.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Handle --mcp-config flag
|
||||
if mcpConfig {
|
||||
printMCPConfig(mcpURL, mcpKubeconfig)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set kubeconfig path if provided
|
||||
if mcpKubeconfig != "" {
|
||||
config.Config.Kube.ConfigPathStr = mcpKubeconfig
|
||||
}
|
||||
|
||||
// Handle --list-tools flag
|
||||
if mcpListTools {
|
||||
listMCPTools(mcpURL)
|
||||
return nil
|
||||
}
|
||||
|
||||
setFlags, _ := cmd.Flags().GetStringSlice(config.SetCommandName)
|
||||
runMCPWithConfig(setFlags, mcpURL, mcpAllowDestructive)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(mcpCmd)
|
||||
|
||||
mcpCmd.Flags().StringVar(&mcpURL, "url", "", "Direct URL to Kubeshark (e.g., https://kubeshark.example.com). When set, connects directly without kubectl/proxy and disables start/stop tools.")
|
||||
mcpCmd.Flags().StringVar(&mcpKubeconfig, "kubeconfig", "", "Path to kubeconfig file (e.g., /Users/me/.kube/config)")
|
||||
mcpCmd.Flags().BoolVar(&mcpListTools, "list-tools", false, "List available MCP tools and exit")
|
||||
mcpCmd.Flags().BoolVar(&mcpConfig, "mcp-config", false, "Print MCP client configuration JSON and exit")
|
||||
mcpCmd.Flags().BoolVar(&mcpAllowDestructive, "allow-destructive", false, "Enable destructive operations (start_kubeshark, stop_kubeshark). Without this flag, only read-only traffic analysis tools are available.")
|
||||
}
|
||||
1071
cmd/mcpRunner.go
Normal file
1071
cmd/mcpRunner.go
Normal file
File diff suppressed because it is too large
Load Diff
495
cmd/mcp_test.go
Normal file
495
cmd/mcp_test.go
Normal file
@@ -0,0 +1,495 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func newTestMCPServer() *mcpServer {
|
||||
return &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}}
|
||||
}
|
||||
|
||||
func sendRequest(s *mcpServer, method string, id any, params any) string {
|
||||
req := jsonRPCRequest{
|
||||
JSONRPC: "2.0",
|
||||
ID: id,
|
||||
Method: method,
|
||||
}
|
||||
if params != nil {
|
||||
paramsBytes, _ := json.Marshal(params)
|
||||
req.Params = paramsBytes
|
||||
}
|
||||
|
||||
s.handleRequest(&req)
|
||||
|
||||
output := s.stdout.(*bytes.Buffer).String()
|
||||
s.stdout.(*bytes.Buffer).Reset()
|
||||
return output
|
||||
}
|
||||
|
||||
func parseResponse(t *testing.T, output string) jsonRPCResponse {
|
||||
var resp jsonRPCResponse
|
||||
if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &resp); err != nil {
|
||||
t.Fatalf("Failed to parse response: %v\nOutput: %s", err, output)
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
func TestMCP_Initialize(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
resp := parseResponse(t, sendRequest(s, "initialize", 1, nil))
|
||||
|
||||
if resp.ID != float64(1) || resp.Error != nil {
|
||||
t.Fatalf("Expected ID 1 with no error, got ID=%v, error=%v", resp.ID, resp.Error)
|
||||
}
|
||||
|
||||
result := resp.Result.(map[string]any)
|
||||
if result["protocolVersion"] != "2024-11-05" {
|
||||
t.Errorf("Expected protocolVersion 2024-11-05, got %v", result["protocolVersion"])
|
||||
}
|
||||
if result["serverInfo"].(map[string]any)["name"] != "kubeshark-mcp" {
|
||||
t.Error("Expected server name kubeshark-mcp")
|
||||
}
|
||||
if !strings.Contains(result["instructions"].(string), "check_kubeshark_status") {
|
||||
t.Error("Instructions should mention check_kubeshark_status")
|
||||
}
|
||||
if _, ok := result["capabilities"].(map[string]any)["prompts"]; !ok {
|
||||
t.Error("Expected prompts capability")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_Ping(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "ping", 42, nil))
|
||||
if resp.ID != float64(42) || resp.Error != nil || len(resp.Result.(map[string]any)) != 0 {
|
||||
t.Errorf("Expected ID 42, no error, empty result")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_InitializedNotification(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
for _, method := range []string{"initialized", "notifications/initialized"} {
|
||||
if output := sendRequest(s, method, nil, nil); output != "" {
|
||||
t.Errorf("Expected no output for %s, got: %s", method, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_UnknownMethod(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "unknown/method", 1, nil))
|
||||
if resp.Error == nil || resp.Error.Code != -32601 {
|
||||
t.Fatalf("Expected error code -32601, got %v", resp.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_PromptsList(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "prompts/list", 1, nil))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
prompts := resp.Result.(map[string]any)["prompts"].([]any)
|
||||
if len(prompts) != 1 || prompts[0].(map[string]any)["name"] != "kubeshark_usage" {
|
||||
t.Error("Expected 1 prompt named 'kubeshark_usage'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_PromptsGet(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "prompts/get", 1, map[string]any{"name": "kubeshark_usage"}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
messages := resp.Result.(map[string]any)["messages"].([]any)
|
||||
if len(messages) == 0 {
|
||||
t.Fatal("Expected at least one message")
|
||||
}
|
||||
text := messages[0].(map[string]any)["content"].(map[string]any)["text"].(string)
|
||||
for _, phrase := range []string{"check_kubeshark_status", "start_kubeshark", "stop_kubeshark"} {
|
||||
if !strings.Contains(text, phrase) {
|
||||
t.Errorf("Prompt should contain '%s'", phrase)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_PromptsGet_UnknownPrompt(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "prompts/get", 1, map[string]any{"name": "unknown"}))
|
||||
if resp.Error == nil || resp.Error.Code != -32602 {
|
||||
t.Fatalf("Expected error code -32602, got %v", resp.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsList_CLIOnly(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "tools/list", 1, nil))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
tools := resp.Result.(map[string]any)["tools"].([]any)
|
||||
if len(tools) != 1 || tools[0].(map[string]any)["name"] != "check_kubeshark_status" {
|
||||
t.Error("Expected only check_kubeshark_status tool")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsList_WithDestructive(t *testing.T) {
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}, allowDestructive: true}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/list", 1, nil))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
tools := resp.Result.(map[string]any)["tools"].([]any)
|
||||
toolNames := make(map[string]bool)
|
||||
for _, tool := range tools {
|
||||
toolNames[tool.(map[string]any)["name"].(string)] = true
|
||||
}
|
||||
for _, expected := range []string{"check_kubeshark_status", "start_kubeshark", "stop_kubeshark"} {
|
||||
if !toolNames[expected] {
|
||||
t.Errorf("Missing expected tool: %s", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsList_WithHubBackend(t *testing.T) {
|
||||
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/" || r.URL.Path == "" {
|
||||
_, _ = w.Write([]byte(`{"name":"hub","tools":[{"name":"list_workloads","description":"","inputSchema":{}},{"name":"list_api_calls","description":"","inputSchema":{}}]}`))
|
||||
}
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}, hubBaseURL: mockServer.URL, backendInitialized: true, allowDestructive: true}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/list", 1, nil))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
tools := resp.Result.(map[string]any)["tools"].([]any)
|
||||
// Should have CLI tools (3) + Hub tools (2) = 5 tools
|
||||
if len(tools) < 5 {
|
||||
t.Errorf("Expected at least 5 tools, got %d", len(tools))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsCallUnknownTool(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "unknown"}))
|
||||
if !resp.Result.(map[string]any)["isError"].(bool) {
|
||||
t.Error("Expected isError=true for unknown tool")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsCallInvalidParams(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
req := jsonRPCRequest{JSONRPC: "2.0", ID: 1, Method: "tools/call", Params: json.RawMessage(`"invalid"`)}
|
||||
s.handleRequest(&req)
|
||||
resp := parseResponse(t, s.stdout.(*bytes.Buffer).String())
|
||||
if resp.Error == nil || resp.Error.Code != -32602 {
|
||||
t.Fatalf("Expected error code -32602")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_CheckKubesharkStatus(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
args map[string]any
|
||||
}{
|
||||
{"no_config", map[string]any{}},
|
||||
{"with_namespace", map[string]any{"release_namespace": "custom-ns"}},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "tools/call", 1, mcpCallToolParams{Name: "check_kubeshark_status", Arguments: tc.args}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
content := resp.Result.(map[string]any)["content"].([]any)
|
||||
if len(content) == 0 || content[0].(map[string]any)["text"].(string) == "" {
|
||||
t.Error("Expected non-empty response")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newTestMCPServerWithMockBackend(handler http.HandlerFunc) (*mcpServer, *httptest.Server) {
|
||||
mockServer := httptest.NewServer(handler)
|
||||
return &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}, hubBaseURL: mockServer.URL, backendInitialized: true}, mockServer
|
||||
}
|
||||
|
||||
type hubToolCallRequest struct {
|
||||
Tool string `json:"tool"`
|
||||
Arguments map[string]any `json:"arguments"`
|
||||
}
|
||||
|
||||
func newMockHubHandler(t *testing.T, handler func(req hubToolCallRequest) (string, int)) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/tools/call" || r.Method != http.MethodPost {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
var req hubToolCallRequest
|
||||
_ = json.NewDecoder(r.Body).Decode(&req)
|
||||
resp, status := handler(req)
|
||||
w.WriteHeader(status)
|
||||
_, _ = w.Write([]byte(resp))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ListWorkloads(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
if req.Tool != "list_workloads" {
|
||||
t.Errorf("Expected tool 'list_workloads', got %s", req.Tool)
|
||||
}
|
||||
return `{"workloads": [{"name": "test-pod"}]}`, http.StatusOK
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads", Arguments: map[string]any{"type": "pod"}}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
text := resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
if !strings.Contains(text, "test-pod") {
|
||||
t.Errorf("Expected 'test-pod' in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ListAPICalls(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
if req.Tool != "list_api_calls" {
|
||||
t.Errorf("Expected tool 'list_api_calls', got %s", req.Tool)
|
||||
}
|
||||
return `{"calls": [{"id": "123", "path": "/api/users"}]}`, http.StatusOK
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_api_calls", Arguments: map[string]any{"proto": "http"}}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
if !strings.Contains(resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string), "/api/users") {
|
||||
t.Error("Expected '/api/users' in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetAPICall(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
if req.Tool != "get_api_call" || req.Arguments["id"] != "abc123" {
|
||||
t.Errorf("Expected get_api_call with id=abc123")
|
||||
}
|
||||
return `{"id": "abc123", "path": "/api/orders"}`, http.StatusOK
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "get_api_call", Arguments: map[string]any{"id": "abc123"}}))
|
||||
if resp.Error != nil || !strings.Contains(resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string), "abc123") {
|
||||
t.Error("Expected response containing 'abc123'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetAPICall_MissingID(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
return `{"error": "id is required"}`, http.StatusBadRequest
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "get_api_call", Arguments: map[string]any{}}))
|
||||
if !resp.Result.(map[string]any)["isError"].(bool) {
|
||||
t.Error("Expected isError=true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetAPIStats(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
if req.Tool != "get_api_stats" {
|
||||
t.Errorf("Expected get_api_stats")
|
||||
}
|
||||
return `{"stats": {"total_calls": 1000}}`, http.StatusOK
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "get_api_stats", Arguments: map[string]any{"ns": "prod"}}))
|
||||
if resp.Error != nil || !strings.Contains(resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string), "total_calls") {
|
||||
t.Error("Expected 'total_calls' in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_APITools_BackendError(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads"}))
|
||||
if !resp.Result.(map[string]any)["isError"].(bool) {
|
||||
t.Error("Expected isError=true for backend error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_APITools_BackendConnectionError(t *testing.T) {
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}, hubBaseURL: "http://localhost:99999", backendInitialized: true}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads"}))
|
||||
if !resp.Result.(map[string]any)["isError"].(bool) {
|
||||
t.Error("Expected isError=true for connection error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_RunLoop_ParseError(t *testing.T) {
|
||||
output := &bytes.Buffer{}
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: strings.NewReader("invalid\n"), stdout: output}
|
||||
s.run()
|
||||
if resp := parseResponse(t, output.String()); resp.Error == nil || resp.Error.Code != -32700 {
|
||||
t.Fatalf("Expected error code -32700")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_RunLoop_MultipleRequests(t *testing.T) {
|
||||
output := &bytes.Buffer{}
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: strings.NewReader(`{"jsonrpc":"2.0","id":1,"method":"ping"}
|
||||
{"jsonrpc":"2.0","id":2,"method":"ping"}
|
||||
`), stdout: output}
|
||||
s.run()
|
||||
if lines := strings.Split(strings.TrimSpace(output.String()), "\n"); len(lines) != 2 {
|
||||
t.Fatalf("Expected 2 responses, got %d", len(lines))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_RunLoop_EmptyLines(t *testing.T) {
|
||||
output := &bytes.Buffer{}
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: strings.NewReader("\n\n{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"ping\"}\n"), stdout: output}
|
||||
s.run()
|
||||
if lines := strings.Split(strings.TrimSpace(output.String()), "\n"); len(lines) != 1 {
|
||||
t.Fatalf("Expected 1 response, got %d", len(lines))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ResponseFormat(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
// Numeric ID
|
||||
if resp := parseResponse(t, sendRequest(s, "ping", 123, nil)); resp.ID != float64(123) || resp.JSONRPC != "2.0" {
|
||||
t.Errorf("Expected ID 123 and jsonrpc 2.0")
|
||||
}
|
||||
// String ID
|
||||
if resp := parseResponse(t, sendRequest(s, "ping", "str", nil)); resp.ID != "str" {
|
||||
t.Errorf("Expected ID 'str'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolCallResult_ContentFormat(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte(`{"data": "test"}`))
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads"}))
|
||||
content := resp.Result.(map[string]any)["content"].([]any)
|
||||
if len(content) == 0 || content[0].(map[string]any)["type"] != "text" {
|
||||
t.Error("Expected content with type=text")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_CommandArgs(t *testing.T) {
|
||||
// Test start command args building
|
||||
for _, tc := range []struct {
|
||||
args map[string]any
|
||||
expected string
|
||||
}{
|
||||
{map[string]any{}, "tap --set headless=true"},
|
||||
{map[string]any{"pod_regex": "nginx.*"}, "tap nginx.* --set headless=true"},
|
||||
{map[string]any{"namespaces": "default"}, "tap -n default --set headless=true"},
|
||||
{map[string]any{"release_namespace": "ks"}, "tap -s ks --set headless=true"},
|
||||
} {
|
||||
cmdArgs := []string{"tap"}
|
||||
if v, _ := tc.args["pod_regex"].(string); v != "" {
|
||||
cmdArgs = append(cmdArgs, v)
|
||||
}
|
||||
if v, _ := tc.args["namespaces"].(string); v != "" {
|
||||
for _, ns := range strings.Split(v, ",") {
|
||||
cmdArgs = append(cmdArgs, "-n", strings.TrimSpace(ns))
|
||||
}
|
||||
}
|
||||
if v, _ := tc.args["release_namespace"].(string); v != "" {
|
||||
cmdArgs = append(cmdArgs, "-s", v)
|
||||
}
|
||||
cmdArgs = append(cmdArgs, "--set", "headless=true")
|
||||
if got := strings.Join(cmdArgs, " "); got != tc.expected {
|
||||
t.Errorf("Expected %q, got %q", tc.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_PrettyPrintJSON(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte(`{"key":"value"}`))
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads"}))
|
||||
text := resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
if !strings.Contains(text, "\n") {
|
||||
t.Error("Expected pretty-printed JSON")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_SpecialCharsAndEdgeCases(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte(`{}`))
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
// Test special chars, empty args, nil args
|
||||
for _, args := range []map[string]any{
|
||||
{"path": "/api?id=123"},
|
||||
{"id": "abc/123"},
|
||||
{},
|
||||
nil,
|
||||
} {
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads", Arguments: args}))
|
||||
if resp.Error != nil {
|
||||
t.Errorf("Unexpected error with args %v: %v", args, resp.Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_BackendInitialization_Concurrent(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
done := make(chan bool, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() { s.ensureBackendConnection(); done <- true }()
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_FullConversation(t *testing.T) {
|
||||
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/" {
|
||||
_, _ = w.Write([]byte(`{"name":"hub","tools":[{"name":"list_workloads","description":"","inputSchema":{}}]}`))
|
||||
} else if r.URL.Path == "/tools/call" {
|
||||
_, _ = w.Write([]byte(`{"data":"ok"}`))
|
||||
}
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
input := `{"jsonrpc":"2.0","id":1,"method":"initialize"}
|
||||
{"jsonrpc":"2.0","method":"notifications/initialized"}
|
||||
{"jsonrpc":"2.0","id":2,"method":"tools/list"}
|
||||
{"jsonrpc":"2.0","id":3,"method":"tools/call","params":{"name":"list_workloads","arguments":{}}}
|
||||
`
|
||||
output := &bytes.Buffer{}
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: strings.NewReader(input), stdout: output, hubBaseURL: mockServer.URL, backendInitialized: true}
|
||||
s.run()
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(output.String()), "\n")
|
||||
if len(lines) != 3 { // 3 responses (notification has no response)
|
||||
t.Errorf("Expected 3 responses, got %d", len(lines))
|
||||
}
|
||||
for i, line := range lines {
|
||||
var resp jsonRPCResponse
|
||||
if err := json.Unmarshal([]byte(line), &resp); err != nil || resp.Error != nil {
|
||||
t.Errorf("Response %d: parse error or unexpected error", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
label = "app.kubehq.com/app=worker"
|
||||
label = "app.kubeshark.com/app=worker"
|
||||
srcDir = "pcapdump"
|
||||
maxSnaplen uint32 = 262144
|
||||
maxTimePerFile = time.Minute * 5
|
||||
|
||||
@@ -58,6 +58,7 @@ func InitConfig(cmd *cobra.Command) error {
|
||||
"pro",
|
||||
"manifests",
|
||||
"license",
|
||||
"mcp",
|
||||
}, cmd.Use) {
|
||||
go version.CheckNewerVersion()
|
||||
}
|
||||
|
||||
@@ -137,6 +137,10 @@ func CreateDefaultConfig() ConfigStruct {
|
||||
"ldap",
|
||||
"radius",
|
||||
"diameter",
|
||||
"udp-flow",
|
||||
"tcp-flow",
|
||||
"udp-flow-full",
|
||||
"tcp-flow-full",
|
||||
},
|
||||
PortMapping: configStructs.PortMapping{
|
||||
HTTP: []uint16{80, 443, 8080},
|
||||
@@ -175,6 +179,7 @@ type ConfigStruct struct {
|
||||
DumpLogs bool `yaml:"dumpLogs" json:"dumpLogs" default:"false"`
|
||||
HeadlessMode bool `yaml:"headless" json:"headless" default:"false"`
|
||||
License string `yaml:"license" json:"license" default:""`
|
||||
CloudApiUrl string `yaml:"cloudApiUrl" json:"cloudApiUrl" default:"https://api.kubeshark.com"`
|
||||
CloudLicenseEnabled bool `yaml:"cloudLicenseEnabled" json:"cloudLicenseEnabled" default:"true"`
|
||||
AiAssistantEnabled bool `yaml:"aiAssistantEnabled" json:"aiAssistantEnabled" default:"true"`
|
||||
DemoModeEnabled bool `yaml:"demoModeEnabled" json:"demoModeEnabled" default:"false"`
|
||||
|
||||
@@ -198,7 +198,7 @@ type RoutingConfig struct {
|
||||
}
|
||||
|
||||
type DashboardConfig struct {
|
||||
StreamingType string `yaml:"streamingType" json:"streamingType" default:""`
|
||||
StreamingType string `yaml:"streamingType" json:"streamingType" default:"connect-rpc"`
|
||||
CompleteStreamingEnabled bool `yaml:"completeStreamingEnabled" json:"completeStreamingEnabled" default:"true"`
|
||||
}
|
||||
|
||||
@@ -207,7 +207,7 @@ type FrontRoutingConfig struct {
|
||||
}
|
||||
|
||||
type ReleaseConfig struct {
|
||||
Repo string `yaml:"repo" json:"repo" default:"https://helm.kubehq.com"`
|
||||
Repo string `yaml:"repo" json:"repo" default:"https://helm.kubeshark.com"`
|
||||
Name string `yaml:"name" json:"name" default:"kubeshark"`
|
||||
Namespace string `yaml:"namespace" json:"namespace" default:"default"`
|
||||
}
|
||||
@@ -251,8 +251,8 @@ type PprofConfig struct {
|
||||
|
||||
type MiscConfig struct {
|
||||
JsonTTL string `yaml:"jsonTTL" json:"jsonTTL" default:"5m"`
|
||||
PcapTTL string `yaml:"pcapTTL" json:"pcapTTL" default:"10s"`
|
||||
PcapErrorTTL string `yaml:"pcapErrorTTL" json:"pcapErrorTTL" default:"60s"`
|
||||
PcapTTL string `yaml:"pcapTTL" json:"pcapTTL" default:"0"`
|
||||
PcapErrorTTL string `yaml:"pcapErrorTTL" json:"pcapErrorTTL" default:"0"`
|
||||
TrafficSampleRate int `yaml:"trafficSampleRate" json:"trafficSampleRate" default:"100"`
|
||||
TcpStreamChannelTimeoutMs int `yaml:"tcpStreamChannelTimeoutMs" json:"tcpStreamChannelTimeoutMs" default:"10000"`
|
||||
TcpStreamChannelTimeoutShow bool `yaml:"tcpStreamChannelTimeoutShow" json:"tcpStreamChannelTimeoutShow" default:"false"`
|
||||
@@ -263,7 +263,7 @@ type MiscConfig struct {
|
||||
}
|
||||
|
||||
type PcapDumpConfig struct {
|
||||
PcapDumpEnabled bool `yaml:"enabled" json:"enabled" default:"true"`
|
||||
PcapDumpEnabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
PcapTimeInterval string `yaml:"timeInterval" json:"timeInterval" default:"1m"`
|
||||
PcapMaxTime string `yaml:"maxTime" json:"maxTime" default:"1h"`
|
||||
PcapMaxSize string `yaml:"maxSize" json:"maxSize" default:"500MB"`
|
||||
@@ -301,20 +301,27 @@ type SeLinuxOptionsConfig struct {
|
||||
}
|
||||
|
||||
type RawCaptureConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"true"`
|
||||
StorageSize string `yaml:"storageSize" json:"storageSize" default:"1Gi"`
|
||||
}
|
||||
|
||||
type SnapshotsConfig struct {
|
||||
StorageClass string `yaml:"storageClass" json:"storageClass" default:""`
|
||||
StorageSize string `yaml:"storageSize" json:"storageSize" default:"1Gi"`
|
||||
StorageSize string `yaml:"storageSize" json:"storageSize" default:"20Gi"`
|
||||
}
|
||||
|
||||
type DelayedDissectionConfig struct {
|
||||
Image string `yaml:"image" json:"image" default:"kubeshark/worker:master"`
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"1"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"4Gi"`
|
||||
}
|
||||
|
||||
type CaptureConfig struct {
|
||||
Stopped bool `yaml:"stopped" json:"stopped" default:"false"`
|
||||
StopAfter string `yaml:"stopAfter" json:"stopAfter" default:"5m"`
|
||||
Raw RawCaptureConfig `yaml:"raw" json:"raw"`
|
||||
DbMaxSize string `yaml:"dbMaxSize" json:"dbMaxSize" default:"500Mi"`
|
||||
Stopped bool `yaml:"stopped" json:"stopped" default:"false"`
|
||||
StopAfter string `yaml:"stopAfter" json:"stopAfter" default:"5m"`
|
||||
CaptureSelf bool `yaml:"captureSelf" json:"captureSelf" default:"false"`
|
||||
Raw RawCaptureConfig `yaml:"raw" json:"raw"`
|
||||
DbMaxSize string `yaml:"dbMaxSize" json:"dbMaxSize" default:"500Mi"`
|
||||
}
|
||||
|
||||
type TapConfig struct {
|
||||
@@ -325,6 +332,7 @@ type TapConfig struct {
|
||||
ExcludedNamespaces []string `yaml:"excludedNamespaces" json:"excludedNamespaces" default:"[]"`
|
||||
BpfOverride string `yaml:"bpfOverride" json:"bpfOverride" default:""`
|
||||
Capture CaptureConfig `yaml:"capture" json:"capture"`
|
||||
DelayedDissection DelayedDissectionConfig `yaml:"delayedDissection" json:"delayedDissection"`
|
||||
Snapshots SnapshotsConfig `yaml:"snapshots" json:"snapshots"`
|
||||
Release ReleaseConfig `yaml:"release" json:"release"`
|
||||
PersistentStorage bool `yaml:"persistentStorage" json:"persistentStorage" default:"false"`
|
||||
@@ -332,7 +340,7 @@ type TapConfig struct {
|
||||
PersistentStoragePvcVolumeMode string `yaml:"persistentStoragePvcVolumeMode" json:"persistentStoragePvcVolumeMode" default:"FileSystem"`
|
||||
EfsFileSytemIdAndPath string `yaml:"efsFileSytemIdAndPath" json:"efsFileSytemIdAndPath" default:""`
|
||||
Secrets []string `yaml:"secrets" json:"secrets" default:"[]"`
|
||||
StorageLimit string `yaml:"storageLimit" json:"storageLimit" default:"5Gi"`
|
||||
StorageLimit string `yaml:"storageLimit" json:"storageLimit" default:"10Gi"`
|
||||
StorageClass string `yaml:"storageClass" json:"storageClass" default:"standard"`
|
||||
DryRun bool `yaml:"dryRun" json:"dryRun" default:"false"`
|
||||
DnsConfig DnsConfig `yaml:"dns" json:"dns"`
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
name: kubeshark
|
||||
version: "52.10.0"
|
||||
version: "52.12.0"
|
||||
description: The API Traffic Analyzer for Kubernetes
|
||||
home: https://kubehq.com
|
||||
home: https://kubeshark.com
|
||||
keywords:
|
||||
- kubeshark
|
||||
- packet capture
|
||||
@@ -16,9 +16,9 @@ keywords:
|
||||
- api
|
||||
kubeVersion: '>= 1.16.0-0'
|
||||
maintainers:
|
||||
- email: info@kubehq.com
|
||||
- email: support@kubeshark.com
|
||||
name: Kubeshark
|
||||
url: https://kubehq.com
|
||||
url: https://kubeshark.com
|
||||
sources:
|
||||
- https://github.com/kubeshark/kubeshark/tree/master/helm-chart
|
||||
type: application
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
Add the Helm repo for Kubeshark:
|
||||
|
||||
```shell
|
||||
helm repo add kubeshark https://helm.kubehq.com
|
||||
helm repo add kubeshark https://helm.kubeshark.com
|
||||
```
|
||||
|
||||
then install Kubeshark:
|
||||
@@ -69,7 +69,7 @@ When it's necessary, you can use:
|
||||
--set license=YOUR_LICENSE_GOES_HERE
|
||||
```
|
||||
|
||||
Get your license from Kubeshark's [Admin Console](https://console.kubehq.com/).
|
||||
Get your license from Kubeshark's [Admin Console](https://console.kubeshark.com/).
|
||||
|
||||
## Installing with Ingress (EKS) enabled
|
||||
|
||||
@@ -140,12 +140,12 @@ Example for overriding image names:
|
||||
| `tap.bpfOverride` | When using AF_PACKET as a traffic capture backend, override any existing pod targeting rules and set explicit BPF expression (e.g. `net 0.0.0.0/0`). | `[]` |
|
||||
| `tap.capture.stopped` | Set to `false` to have traffic processing start automatically. When set to `true`, traffic processing is stopped by default, resulting in almost no resource consumption (e.g. Kubeshark is dormant). This property can be dynamically control via the dashboard. | `false` |
|
||||
| `tap.capture.stopAfter` | Set to a duration (e.g. `30s`) to have traffic processing stop after no websocket activity between worker and hub. | `30s` |
|
||||
| `tap.capture.raw.enabled` | Enable raw capture of packets and syscalls to disk for offline analysis | `false` |
|
||||
| `tap.capture.raw.enabled` | Enable raw capture of packets and syscalls to disk for offline analysis | `true` |
|
||||
| `tap.capture.raw.storageSize` | Maximum storage size for raw capture files (supports K8s quantity format: `1Gi`, `500Mi`, etc.) | `1Gi` |
|
||||
| `tap.capture.dbMaxSize` | Maximum size for capture database (e.g., `4Gi`, `2000Mi`). When empty, automatically uses 80% of allocated storage (`tap.storageLimit`). | `""` |
|
||||
| `tap.snapshots.storageClass` | Storage class for snapshots volume. When empty, uses `emptyDir`. When set, creates a PVC with this storage class | `""` |
|
||||
| `tap.snapshots.storageSize` | Storage size for snapshots volume (supports K8s quantity format: `1Gi`, `500Mi`, etc.) | `1Gi` |
|
||||
| `tap.release.repo` | URL of the Helm chart repository | `https://helm.kubehq.com` |
|
||||
| `tap.snapshots.storageSize` | Storage size for snapshots volume (supports K8s quantity format: `1Gi`, `500Mi`, etc.) | `10Gi` |
|
||||
| `tap.release.repo` | URL of the Helm chart repository | `https://helm.kubeshark.com` |
|
||||
| `tap.release.name` | Helm release name | `kubeshark` |
|
||||
| `tap.release.namespace` | Helm release namespace | `default` |
|
||||
| `tap.persistentStorage` | Use `persistentVolumeClaim` instead of `emptyDir` | `false` |
|
||||
@@ -221,7 +221,7 @@ Example for overriding image names:
|
||||
| `tap.hostNetwork` | Enable host network mode for worker DaemonSet pods. When enabled, worker pods use the host's network namespace for direct network access. | `true` |
|
||||
| `tap.gitops.enabled` | Enable GitOps functionality. This will allow you to use GitOps to manage your Kubeshark configuration. | `false` |
|
||||
| `logs.file` | Logs dump path | `""` |
|
||||
| `pcapdump.enabled` | Enable recording of all traffic captured according to other parameters. Whatever Kubeshark captures, considering pod targeting rules, will be stored in pcap files ready to be viewed by tools | `true` |
|
||||
| `pcapdump.enabled` | Enable recording of all traffic captured according to other parameters. Whatever Kubeshark captures, considering pod targeting rules, will be stored in pcap files ready to be viewed by tools | `false` |
|
||||
| `pcapdump.maxTime` | The time window into the past that will be stored. Older traffic will be discarded. | `2h` |
|
||||
| `pcapdump.maxSize` | The maximum storage size the PCAP files will consume. Old files that cause to surpass storage consumption will get discarded. | `500MB` |
|
||||
| `kube.configPath` | Path to the `kubeconfig` file (`$HOME/.kube/config`) | `""` |
|
||||
@@ -308,7 +308,7 @@ tap:
|
||||
|
||||
# Installing with Dex OIDC authentication
|
||||
|
||||
[**Click here to see full docs**](https://docs.kubehq.com/en/saml#installing-with-oidc-enabled-dex-idp).
|
||||
[**Click here to see full docs**](https://docs.kubeshark.com/en/saml#installing-with-oidc-enabled-dex-idp).
|
||||
|
||||
Choose this option, if **you already have a running instance** of Dex in your cluster &
|
||||
you want to set up Dex OIDC authentication for Kubeshark users.
|
||||
|
||||
@@ -86,3 +86,9 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- "*"
|
||||
@@ -3,7 +3,7 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
@@ -15,12 +15,12 @@ spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
@@ -40,9 +40,23 @@ spec:
|
||||
- "{{ if hasKey .Values.tap.capture "stopAfter" }}{{ .Values.tap.capture.stopAfter }}{{ else }}5m{{ end }}"
|
||||
- -snapshot-size-limit
|
||||
- '{{ .Values.tap.snapshots.storageSize }}'
|
||||
{{- if .Values.tap.delayedDissection.image }}
|
||||
- -dissector-image
|
||||
- '{{ .Values.tap.delayedDissection.image }}'
|
||||
{{- end }}
|
||||
{{- if .Values.tap.delayedDissection.cpu }}
|
||||
- -dissector-cpu
|
||||
- '{{ .Values.tap.delayedDissection.cpu }}'
|
||||
{{- end }}
|
||||
{{- if .Values.tap.delayedDissection.memory }}
|
||||
- -dissector-memory
|
||||
- '{{ .Values.tap.delayedDissection.memory }}'
|
||||
{{- end }}
|
||||
{{- if .Values.tap.gitops.enabled }}
|
||||
- -gitops
|
||||
{{- end }}
|
||||
- -cloud-api-url
|
||||
- '{{ .Values.cloudApiUrl }}'
|
||||
{{- if .Values.tap.secrets }}
|
||||
envFrom:
|
||||
{{- range .Values.tap.secrets }}
|
||||
@@ -63,8 +77,6 @@ spec:
|
||||
value: '{{ (include "sentry.enabled" .) }}'
|
||||
- name: SENTRY_ENVIRONMENT
|
||||
value: '{{ .Values.tap.sentry.environment }}'
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubehq.com'
|
||||
- name: PROFILING_ENABLED
|
||||
value: '{{ .Values.tap.pprof.enabled }}'
|
||||
{{- if .Values.tap.docker.overrideImage.hub }}
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
@@ -17,5 +17,5 @@ spec:
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
type: ClusterIP
|
||||
|
||||
@@ -2,7 +2,7 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: front
|
||||
app.kubeshark.com/app: front
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
@@ -14,12 +14,12 @@ spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: front
|
||||
app.kubeshark.com/app: front
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: front
|
||||
app.kubeshark.com/app: front
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
@@ -86,6 +86,8 @@ spec:
|
||||
value: '{{ default false .Values.betaEnabled | ternary "true" "false" }}'
|
||||
- name: REACT_APP_DISSECTORS_UPDATING_ENABLED
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
- name: REACT_APP_RAW_CAPTURE_ENABLED
|
||||
value: '{{ .Values.tap.capture.raw.enabled | ternary "true" "false" }}'
|
||||
- name: REACT_APP_SENTRY_ENABLED
|
||||
value: '{{ (include "sentry.enabled" .) }}'
|
||||
- name: REACT_APP_SENTRY_ENVIRONMENT
|
||||
|
||||
@@ -16,5 +16,5 @@ spec:
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubehq.com/app: front
|
||||
app.kubeshark.com/app: front
|
||||
type: ClusterIP
|
||||
|
||||
@@ -3,7 +3,7 @@ apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
sidecar.istio.io/inject: "false"
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
{{- if .Values.tap.annotations }}
|
||||
@@ -15,12 +15,12 @@ metadata:
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: kubeshark
|
||||
@@ -103,6 +103,8 @@ spec:
|
||||
- '{{ .Values.tap.storageLimit }}'
|
||||
- -capture-db-max-size
|
||||
- '{{ .Values.tap.capture.dbMaxSize }}'
|
||||
- -cloud-api-url
|
||||
- '{{ .Values.cloudApiUrl }}'
|
||||
{{- if .Values.tap.docker.overrideImage.worker }}
|
||||
image: '{{ .Values.tap.docker.overrideImage.worker }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.worker }}
|
||||
@@ -129,8 +131,6 @@ spec:
|
||||
value: '{{ .Values.tap.misc.tcpStreamChannelTimeoutMs }}'
|
||||
- name: TCP_STREAM_CHANNEL_TIMEOUT_SHOW
|
||||
value: '{{ .Values.tap.misc.tcpStreamChannelTimeoutShow }}'
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubehq.com'
|
||||
- name: PROFILING_ENABLED
|
||||
value: '{{ .Values.tap.pprof.enabled }}'
|
||||
- name: SENTRY_ENABLED
|
||||
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: {{ include "kubeshark.configmapName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
data:
|
||||
POD_REGEX: '{{ .Values.tap.regex }}'
|
||||
@@ -12,6 +12,7 @@ data:
|
||||
EXCLUDED_NAMESPACES: '{{ gt (len .Values.tap.excludedNamespaces) 0 | ternary (join "," .Values.tap.excludedNamespaces) "" }}'
|
||||
BPF_OVERRIDE: '{{ .Values.tap.bpfOverride }}'
|
||||
STOPPED: '{{ .Values.tap.capture.stopped | ternary "true" "false" }}'
|
||||
CAPTURE_SELF: '{{ .Values.tap.capture.captureSelf | ternary "true" "false" }}'
|
||||
SCRIPTING_SCRIPTS: '{}'
|
||||
SCRIPTING_ACTIVE_SCRIPTS: '{{ gt (len .Values.scripting.active) 0 | ternary (join "," .Values.scripting.active) "" }}'
|
||||
INGRESS_ENABLED: '{{ .Values.tap.ingress.enabled }}'
|
||||
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: {{ include "kubeshark.secretName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
stringData:
|
||||
LICENSE: '{{ .Values.license }}'
|
||||
@@ -20,7 +20,7 @@ metadata:
|
||||
name: kubeshark-saml-x509-crt-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
stringData:
|
||||
AUTH_SAML_X509_CRT: |
|
||||
@@ -34,7 +34,7 @@ metadata:
|
||||
name: kubeshark-saml-x509-key-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
stringData:
|
||||
AUTH_SAML_X509_KEY: |
|
||||
|
||||
@@ -14,7 +14,7 @@ metadata:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
app.kubehq.com/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
ports:
|
||||
- name: metrics
|
||||
|
||||
@@ -14,7 +14,7 @@ metadata:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
ports:
|
||||
- name: metrics
|
||||
|
||||
@@ -12,7 +12,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
@@ -40,7 +40,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: front
|
||||
app.kubeshark.com/app: front
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
@@ -65,7 +65,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: dex
|
||||
app.kubeshark.com/app: dex
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
@@ -90,7 +90,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
|
||||
@@ -5,7 +5,7 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: dex
|
||||
app.kubeshark.com/app: dex
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
@@ -17,12 +17,12 @@ spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: dex
|
||||
app.kubeshark.com/app: dex
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: dex
|
||||
app.kubeshark.com/app: dex
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
|
||||
@@ -5,7 +5,7 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: dex
|
||||
app.kubeshark.com/app: dex
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
port: 80
|
||||
targetPort: 5556
|
||||
selector:
|
||||
app.kubehq.com/app: dex
|
||||
app.kubeshark.com/app: dex
|
||||
type: ClusterIP
|
||||
|
||||
{{- end }}
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
name: kubeshark-dex-conf-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
data:
|
||||
dex-config.yaml: {{ .Values.tap.auth.dexConfig | toYaml | b64enc | quote }}
|
||||
|
||||
@@ -28,9 +28,12 @@ Notices:
|
||||
- Support chat using Intercom is enabled. It can be disabled using `--set supportChatEnabled=false`
|
||||
{{- end }}
|
||||
{{- if eq .Values.license ""}}
|
||||
- No license key was detected. You can either log-in/sign-up through the dashboard, or download the license key from https://console.kubehq.com/ and add it as 'license: <license>' in helm values or as `--set license=<license>` or as `LICENSE` via mounted secret (`tap.secrets`).
|
||||
- No license key was detected.
|
||||
- Authenticate through the dashboard to activate a complementary COMMUNITY license.
|
||||
- If you have an Enterprise license, download the license key from https://console.kubeshark.com/
|
||||
- An Enterprise license-key can be added as 'license: <license>' in helm values or as `--set license=<license>` or as `LICENSE` via mounted secret (`tap.secrets`).
|
||||
- Contact us to get an Enterprise license: https://kubeshark.com/contact-us.
|
||||
{{- end }}
|
||||
|
||||
{{ if .Values.tap.ingress.enabled }}
|
||||
|
||||
You can now access the application through the following URL:
|
||||
@@ -42,8 +45,9 @@ To access the application, follow these steps:
|
||||
1. Perform port forwarding with the following commands:
|
||||
|
||||
kubectl port-forward -n {{ .Release.Namespace }} service/kubeshark-front 8899:80
|
||||
you could also run: `kubeshark proxy` (which simply manages the port-forward connection)
|
||||
|
||||
2. Once port forwarding is done, you can access the application by visiting the following URL in your web browser:
|
||||
http://0.0.0.0:8899{{ default "" (((.Values.tap).routing).front).basePath }}/
|
||||
http://127.0.0.1:8899{{ default "" (((.Values.tap).routing).front).basePath }}/
|
||||
|
||||
{{- end }}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
# find a detailed description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md
|
||||
tap:
|
||||
docker:
|
||||
registry: docker.io/kubeshark
|
||||
@@ -29,15 +28,20 @@ tap:
|
||||
capture:
|
||||
stopped: false
|
||||
stopAfter: 5m
|
||||
captureSelf: false
|
||||
raw:
|
||||
enabled: false
|
||||
enabled: true
|
||||
storageSize: 1Gi
|
||||
dbMaxSize: 500Mi
|
||||
delayedDissection:
|
||||
image: kubeshark/worker:master
|
||||
cpu: "1"
|
||||
memory: 4Gi
|
||||
snapshots:
|
||||
storageClass: ""
|
||||
storageSize: 1Gi
|
||||
storageSize: 20Gi
|
||||
release:
|
||||
repo: https://helm.kubehq.com
|
||||
repo: https://helm.kubeshark.com
|
||||
name: kubeshark
|
||||
namespace: default
|
||||
persistentStorage: false
|
||||
@@ -45,7 +49,7 @@ tap:
|
||||
persistentStoragePvcVolumeMode: FileSystem
|
||||
efsFileSytemIdAndPath: ""
|
||||
secrets: []
|
||||
storageLimit: 5Gi
|
||||
storageLimit: 10Gi
|
||||
storageClass: standard
|
||||
dryRun: false
|
||||
dns:
|
||||
@@ -156,7 +160,7 @@ tap:
|
||||
ipv6: true
|
||||
debug: false
|
||||
dashboard:
|
||||
streamingType: ""
|
||||
streamingType: connect-rpc
|
||||
completeStreamingEnabled: true
|
||||
telemetry:
|
||||
enabled: true
|
||||
@@ -183,6 +187,10 @@ tap:
|
||||
- ldap
|
||||
- radius
|
||||
- diameter
|
||||
- udp-flow
|
||||
- tcp-flow
|
||||
- tcp-flow-full
|
||||
- udp-flow-full
|
||||
portMapping:
|
||||
http:
|
||||
- 80
|
||||
@@ -209,8 +217,8 @@ tap:
|
||||
view: flamegraph
|
||||
misc:
|
||||
jsonTTL: 5m
|
||||
pcapTTL: 10s
|
||||
pcapErrorTTL: 60s
|
||||
pcapTTL: "0"
|
||||
pcapErrorTTL: "0"
|
||||
trafficSampleRate: 100
|
||||
tcpStreamChannelTimeoutMs: 10000
|
||||
tcpStreamChannelTimeoutShow: false
|
||||
@@ -247,7 +255,7 @@ logs:
|
||||
file: ""
|
||||
grep: ""
|
||||
pcapdump:
|
||||
enabled: true
|
||||
enabled: false
|
||||
timeInterval: 1m
|
||||
maxTime: 1h
|
||||
maxSize: 500MB
|
||||
@@ -260,6 +268,7 @@ kube:
|
||||
dumpLogs: false
|
||||
headless: false
|
||||
license: ""
|
||||
cloudApiUrl: "https://api.kubeshark.com"
|
||||
cloudLicenseEnabled: true
|
||||
aiAssistantEnabled: true
|
||||
demoModeEnabled: false
|
||||
|
||||
57
integration/README.md
Normal file
57
integration/README.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# Integration Tests
|
||||
|
||||
This directory contains integration tests that run against a real Kubernetes cluster.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. **Kubernetes cluster** - A running cluster accessible via `kubectl`
|
||||
2. **kubectl** - Configured with appropriate context
|
||||
3. **Go 1.21+** - For running tests
|
||||
|
||||
## Running Tests
|
||||
|
||||
```bash
|
||||
# Run all integration tests
|
||||
make test-integration
|
||||
|
||||
# Run specific command tests
|
||||
make test-integration-mcp
|
||||
|
||||
# Run with verbose output
|
||||
make test-integration-verbose
|
||||
|
||||
# Run with custom timeout (default: 5m)
|
||||
INTEGRATION_TIMEOUT=10m make test-integration
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `KUBESHARK_BINARY` | Auto-built | Path to pre-built kubeshark binary |
|
||||
| `INTEGRATION_TIMEOUT` | `5m` | Test timeout duration |
|
||||
| `KUBECONFIG` | `~/.kube/config` | Kubernetes config file |
|
||||
| `INTEGRATION_SKIP_CLEANUP` | `false` | Skip cleanup after tests (for debugging) |
|
||||
|
||||
## Test Structure
|
||||
|
||||
```
|
||||
integration/
|
||||
├── README.md # This file
|
||||
├── common_test.go # Shared test helpers
|
||||
├── mcp_test.go # MCP command integration tests
|
||||
├── tap_test.go # Tap command tests (future)
|
||||
└── ... # Additional command tests
|
||||
```
|
||||
|
||||
## Writing New Tests
|
||||
|
||||
1. Create `<command>_test.go` with build tag `//go:build integration`
|
||||
2. Use helpers from `common_test.go`: `requireKubernetesCluster(t)`, `getKubesharkBinary(t)`, `cleanupKubeshark(t, binary)`
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
```bash
|
||||
# JSON output for CI parsing
|
||||
go test -tags=integration -json ./integration/...
|
||||
```
|
||||
217
integration/common_test.go
Normal file
217
integration/common_test.go
Normal file
@@ -0,0 +1,217 @@
|
||||
//go:build integration
|
||||
|
||||
// Package integration contains integration tests that run against a real Kubernetes cluster.
|
||||
// Run with: go test -tags=integration ./integration/...
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
binaryName = "kubeshark"
|
||||
defaultTimeout = 2 * time.Minute
|
||||
startupTimeout = 3 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
// binaryPath caches the built binary path
|
||||
binaryPath string
|
||||
buildOnce sync.Once
|
||||
buildErr error
|
||||
)
|
||||
|
||||
// requireKubernetesCluster skips the test if no Kubernetes cluster is available.
|
||||
func requireKubernetesCluster(t *testing.T) {
|
||||
t.Helper()
|
||||
if !hasKubernetesCluster() {
|
||||
t.Skip("Skipping: no Kubernetes cluster available")
|
||||
}
|
||||
}
|
||||
|
||||
// hasKubernetesCluster returns true if a Kubernetes cluster is accessible.
|
||||
func hasKubernetesCluster() bool {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
return exec.CommandContext(ctx, "kubectl", "cluster-info").Run() == nil
|
||||
}
|
||||
|
||||
// getKubesharkBinary returns the path to the kubeshark binary, building it if necessary.
|
||||
func getKubesharkBinary(t *testing.T) string {
|
||||
t.Helper()
|
||||
|
||||
// Check if binary path is provided via environment
|
||||
if envBinary := os.Getenv("KUBESHARK_BINARY"); envBinary != "" {
|
||||
if _, err := os.Stat(envBinary); err == nil {
|
||||
return envBinary
|
||||
}
|
||||
t.Fatalf("KUBESHARK_BINARY set but file not found: %s", envBinary)
|
||||
}
|
||||
|
||||
// Build once per test run
|
||||
buildOnce.Do(func() {
|
||||
binaryPath, buildErr = buildBinary(t)
|
||||
})
|
||||
|
||||
if buildErr != nil {
|
||||
t.Fatalf("Failed to build binary: %v", buildErr)
|
||||
}
|
||||
|
||||
return binaryPath
|
||||
}
|
||||
|
||||
// buildBinary compiles the binary and returns its path.
|
||||
func buildBinary(t *testing.T) (string, error) {
|
||||
t.Helper()
|
||||
|
||||
// Find project root (directory containing go.mod)
|
||||
projectRoot, err := findProjectRoot()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("finding project root: %w", err)
|
||||
}
|
||||
|
||||
outputPath := filepath.Join(projectRoot, "bin", binaryName+"_integration_test")
|
||||
|
||||
t.Logf("Building %s binary at %s", binaryName, outputPath)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, "go", "build",
|
||||
"-o", outputPath,
|
||||
filepath.Join(projectRoot, binaryName+".go"),
|
||||
)
|
||||
cmd.Dir = projectRoot
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("build failed: %w\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
return outputPath, nil
|
||||
}
|
||||
|
||||
// findProjectRoot locates the project root by finding go.mod
|
||||
func findProjectRoot() (string, error) {
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for {
|
||||
if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil {
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
parent := filepath.Dir(dir)
|
||||
if parent == dir {
|
||||
return "", fmt.Errorf("could not find go.mod in any parent directory")
|
||||
}
|
||||
dir = parent
|
||||
}
|
||||
}
|
||||
|
||||
// runKubeshark executes the kubeshark binary with the given arguments.
|
||||
// Returns combined stdout/stderr and any error.
|
||||
func runKubeshark(t *testing.T, binary string, args ...string) (string, error) {
|
||||
t.Helper()
|
||||
return runKubesharkWithTimeout(t, binary, defaultTimeout, args...)
|
||||
}
|
||||
|
||||
// runKubesharkWithTimeout executes the kubeshark binary with a custom timeout.
|
||||
func runKubesharkWithTimeout(t *testing.T, binary string, timeout time.Duration, args ...string) (string, error) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
t.Logf("Running: %s %s", binary, strings.Join(args, " "))
|
||||
|
||||
cmd := exec.CommandContext(ctx, binary, args...)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
|
||||
output := stdout.String()
|
||||
if stderr.Len() > 0 {
|
||||
output += "\n[stderr]\n" + stderr.String()
|
||||
}
|
||||
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
return output, fmt.Errorf("command timed out after %v", timeout)
|
||||
}
|
||||
|
||||
return output, err
|
||||
}
|
||||
|
||||
// cleanupKubeshark ensures Kubeshark is not running in the cluster.
|
||||
func cleanupKubeshark(t *testing.T, binary string) {
|
||||
t.Helper()
|
||||
|
||||
if os.Getenv("INTEGRATION_SKIP_CLEANUP") == "true" {
|
||||
t.Log("Skipping cleanup (INTEGRATION_SKIP_CLEANUP=true)")
|
||||
return
|
||||
}
|
||||
|
||||
t.Log("Cleaning up any existing Kubeshark installation...")
|
||||
|
||||
// Run clean command, ignore errors (might not be installed)
|
||||
_, _ = runKubeshark(t, binary, "clean")
|
||||
|
||||
// Wait a moment for resources to be deleted
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
// waitForKubesharkReady waits for Kubeshark to be ready after starting.
|
||||
func waitForKubesharkReady(t *testing.T, binary string, timeout time.Duration) error {
|
||||
t.Helper()
|
||||
|
||||
t.Log("Waiting for Kubeshark to be ready...")
|
||||
|
||||
deadline := time.Now().Add(timeout)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
// Check if pods are running
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
cmd := exec.CommandContext(ctx, "kubectl", "get", "pods", "-l", "app.kubernetes.io/name=kubeshark", "-o", "jsonpath={.items[*].status.phase}")
|
||||
output, err := cmd.Output()
|
||||
cancel()
|
||||
|
||||
if err == nil && strings.Contains(string(output), "Running") {
|
||||
t.Log("Kubeshark is ready")
|
||||
return nil
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
return fmt.Errorf("timeout waiting for Kubeshark to be ready")
|
||||
}
|
||||
|
||||
// isKubesharkRunning checks if Kubeshark is currently running in the cluster.
|
||||
func isKubesharkRunning(t *testing.T) bool {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, "kubectl", "get", "pods", "-l", "app.kubernetes.io/name=kubeshark", "-o", "name")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(output)) != ""
|
||||
}
|
||||
529
integration/mcp_test.go
Normal file
529
integration/mcp_test.go
Normal file
@@ -0,0 +1,529 @@
|
||||
//go:build integration
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MCPRequest represents a JSON-RPC request
|
||||
type MCPRequest struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
ID int `json:"id"`
|
||||
Method string `json:"method"`
|
||||
Params interface{} `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// MCPResponse represents a JSON-RPC response
|
||||
type MCPResponse struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
ID int `json:"id"`
|
||||
Result json.RawMessage `json:"result,omitempty"`
|
||||
Error *MCPError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// MCPError represents a JSON-RPC error
|
||||
type MCPError struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// mcpSession represents a running MCP server session
|
||||
type mcpSession struct {
|
||||
cmd *exec.Cmd
|
||||
stdin io.WriteCloser
|
||||
stdout *bufio.Reader
|
||||
stderr *bytes.Buffer // Captured stderr for debugging
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// startMCPSession starts an MCP server and returns a session for sending requests.
|
||||
// By default, starts in read-only mode (no --allow-destructive).
|
||||
func startMCPSession(t *testing.T, binary string, args ...string) *mcpSession {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
cmdArgs := append([]string{"mcp"}, args...)
|
||||
cmd := exec.CommandContext(ctx, binary, cmdArgs...)
|
||||
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
cancel()
|
||||
t.Fatalf("Failed to create stdin pipe: %v", err)
|
||||
}
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
cancel()
|
||||
t.Fatalf("Failed to create stdout pipe: %v", err)
|
||||
}
|
||||
|
||||
// Capture stderr for debugging
|
||||
var stderrBuf bytes.Buffer
|
||||
cmd.Stderr = &stderrBuf
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
cancel()
|
||||
t.Fatalf("Failed to start MCP server: %v", err)
|
||||
}
|
||||
|
||||
return &mcpSession{
|
||||
cmd: cmd,
|
||||
stdin: stdin,
|
||||
stdout: bufio.NewReader(stdout),
|
||||
stderr: &stderrBuf,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// startMCPSessionWithDestructive starts an MCP server with --allow-destructive flag.
|
||||
func startMCPSessionWithDestructive(t *testing.T, binary string, args ...string) *mcpSession {
|
||||
t.Helper()
|
||||
allArgs := append([]string{"--allow-destructive"}, args...)
|
||||
return startMCPSession(t, binary, allArgs...)
|
||||
}
|
||||
|
||||
// sendRequest sends a JSON-RPC request and returns the response (30s timeout).
|
||||
func (s *mcpSession) sendRequest(t *testing.T, req MCPRequest) MCPResponse {
|
||||
t.Helper()
|
||||
return s.sendRequestWithTimeout(t, req, 30*time.Second)
|
||||
}
|
||||
|
||||
// sendRequestWithTimeout sends a JSON-RPC request with a custom timeout.
|
||||
func (s *mcpSession) sendRequestWithTimeout(t *testing.T, req MCPRequest, timeout time.Duration) MCPResponse {
|
||||
t.Helper()
|
||||
|
||||
reqBytes, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal request: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Sending: %s", string(reqBytes))
|
||||
|
||||
if _, err := s.stdin.Write(append(reqBytes, '\n')); err != nil {
|
||||
t.Fatalf("Failed to write request: %v", err)
|
||||
}
|
||||
|
||||
// Read response with timeout
|
||||
responseChan := make(chan string, 1)
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
line, err := s.stdout.ReadString('\n')
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
responseChan <- line
|
||||
}()
|
||||
|
||||
select {
|
||||
case line := <-responseChan:
|
||||
t.Logf("Received: %s", strings.TrimSpace(line))
|
||||
var resp MCPResponse
|
||||
if err := json.Unmarshal([]byte(line), &resp); err != nil {
|
||||
t.Fatalf("Failed to unmarshal response: %v\nResponse: %s", err, line)
|
||||
}
|
||||
return resp
|
||||
case err := <-errChan:
|
||||
t.Fatalf("Failed to read response: %v", err)
|
||||
return MCPResponse{}
|
||||
case <-time.After(timeout):
|
||||
t.Fatalf("Timeout waiting for MCP response after %v", timeout)
|
||||
return MCPResponse{}
|
||||
}
|
||||
}
|
||||
|
||||
// callTool invokes an MCP tool and returns the response (30s timeout).
|
||||
func (s *mcpSession) callTool(t *testing.T, id int, toolName string, args map[string]interface{}) MCPResponse {
|
||||
t.Helper()
|
||||
return s.callToolWithTimeout(t, id, toolName, args, 30*time.Second)
|
||||
}
|
||||
|
||||
// callToolWithTimeout invokes an MCP tool with a custom timeout.
|
||||
func (s *mcpSession) callToolWithTimeout(t *testing.T, id int, toolName string, args map[string]interface{}, timeout time.Duration) MCPResponse {
|
||||
t.Helper()
|
||||
|
||||
return s.sendRequestWithTimeout(t, MCPRequest{
|
||||
JSONRPC: "2.0",
|
||||
ID: id,
|
||||
Method: "tools/call",
|
||||
Params: map[string]interface{}{
|
||||
"name": toolName,
|
||||
"arguments": args,
|
||||
},
|
||||
}, timeout)
|
||||
}
|
||||
|
||||
// close terminates the MCP session.
|
||||
func (s *mcpSession) close() {
|
||||
s.cancel()
|
||||
_ = s.cmd.Wait()
|
||||
}
|
||||
|
||||
// getStderr returns any captured stderr output (useful for debugging failures).
|
||||
func (s *mcpSession) getStderr() string {
|
||||
if s.stderr == nil {
|
||||
return ""
|
||||
}
|
||||
return s.stderr.String()
|
||||
}
|
||||
|
||||
// initialize sends the MCP initialize request and returns the response.
|
||||
func (s *mcpSession) initialize(t *testing.T, id int) MCPResponse {
|
||||
t.Helper()
|
||||
return s.sendRequest(t, MCPRequest{
|
||||
JSONRPC: "2.0",
|
||||
ID: id,
|
||||
Method: "initialize",
|
||||
Params: map[string]interface{}{
|
||||
"protocolVersion": "2024-11-05",
|
||||
"capabilities": map[string]interface{}{},
|
||||
"clientInfo": map[string]interface{}{"name": "test", "version": "1.0"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestMCP_Initialize tests the MCP initialization handshake.
|
||||
func TestMCP_Initialize(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
resp := session.initialize(t, 1)
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Initialize failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if _, ok := result["capabilities"]; !ok {
|
||||
t.Error("Response missing capabilities")
|
||||
}
|
||||
if _, ok := result["serverInfo"]; !ok {
|
||||
t.Error("Response missing serverInfo")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_ToolsList_ReadOnly tests that tools/list returns only safe tools in read-only mode.
|
||||
func TestMCP_ToolsList_ReadOnly(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.sendRequest(t, MCPRequest{JSONRPC: "2.0", ID: 2, Method: "tools/list"})
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("tools/list failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Tools []struct{ Name string `json:"name"` } `json:"tools"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
toolNames := make(map[string]bool)
|
||||
for _, tool := range result.Tools {
|
||||
toolNames[tool.Name] = true
|
||||
}
|
||||
|
||||
if !toolNames["check_kubeshark_status"] {
|
||||
t.Error("Missing expected tool: check_kubeshark_status")
|
||||
}
|
||||
if toolNames["start_kubeshark"] || toolNames["stop_kubeshark"] {
|
||||
t.Error("Destructive tools should not be available in read-only mode")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_ToolsList_WithDestructive tests that tools/list includes destructive tools when flag is set.
|
||||
func TestMCP_ToolsList_WithDestructive(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSessionWithDestructive(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.sendRequest(t, MCPRequest{JSONRPC: "2.0", ID: 2, Method: "tools/list"})
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("tools/list failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Tools []struct{ Name string `json:"name"` } `json:"tools"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
toolNames := make(map[string]bool)
|
||||
for _, tool := range result.Tools {
|
||||
toolNames[tool.Name] = true
|
||||
}
|
||||
|
||||
for _, expected := range []string{"check_kubeshark_status", "start_kubeshark", "stop_kubeshark"} {
|
||||
if !toolNames[expected] {
|
||||
t.Errorf("Missing expected tool: %s", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_CheckKubesharkStatus_NotRunning tests check_kubeshark_status when Kubeshark is not running.
|
||||
func TestMCP_CheckKubesharkStatus_NotRunning(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
cleanupKubeshark(t, binary)
|
||||
|
||||
session := startMCPSession(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.callTool(t, 2, "check_kubeshark_status", nil)
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("check_kubeshark_status failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Content []struct{ Text string `json:"text"` } `json:"content"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if len(result.Content) == 0 || (!strings.Contains(result.Content[0].Text, "not running") && !strings.Contains(result.Content[0].Text, "NOT")) {
|
||||
t.Errorf("Expected 'not running' status")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_StartKubeshark tests the start_kubeshark tool.
|
||||
func TestMCP_StartKubeshark(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping in short mode")
|
||||
}
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
cleanupKubeshark(t, binary)
|
||||
t.Cleanup(func() { cleanupKubeshark(t, binary) })
|
||||
|
||||
session := startMCPSessionWithDestructive(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.callToolWithTimeout(t, 2, "start_kubeshark", nil, 3*time.Minute)
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("start_kubeshark failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
if !isKubesharkRunning(t) {
|
||||
t.Error("Kubeshark should be running after start_kubeshark")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_StartKubeshark_WithoutFlag tests that start_kubeshark fails without --allow-destructive.
|
||||
func TestMCP_StartKubeshark_WithoutFlag(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.callTool(t, 2, "start_kubeshark", nil)
|
||||
|
||||
var result struct {
|
||||
Content []struct{ Text string `json:"text"` } `json:"content"`
|
||||
IsError bool `json:"isError"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if !result.IsError {
|
||||
t.Error("Expected isError=true without --allow-destructive")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_StopKubeshark tests the stop_kubeshark tool.
|
||||
func TestMCP_StopKubeshark(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping in short mode")
|
||||
}
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
|
||||
session := startMCPSessionWithDestructive(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 0)
|
||||
|
||||
// Start Kubeshark if not running
|
||||
if !isKubesharkRunning(t) {
|
||||
resp := session.callToolWithTimeout(t, 1, "start_kubeshark", nil, 2*time.Minute)
|
||||
if resp.Error != nil {
|
||||
t.Skipf("Could not start Kubeshark: %v", resp.Error.Message)
|
||||
}
|
||||
}
|
||||
|
||||
resp := session.callToolWithTimeout(t, 2, "stop_kubeshark", nil, 2*time.Minute)
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("stop_kubeshark failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
if isKubesharkRunning(t) {
|
||||
t.Error("Kubeshark should not be running after stop_kubeshark")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_StopKubeshark_WithoutFlag tests that stop_kubeshark fails without --allow-destructive.
|
||||
func TestMCP_StopKubeshark_WithoutFlag(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.callTool(t, 2, "stop_kubeshark", nil)
|
||||
|
||||
var result struct {
|
||||
IsError bool `json:"isError"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if !result.IsError {
|
||||
t.Error("Expected isError=true without --allow-destructive")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_FullLifecycle tests the complete lifecycle: check -> start -> check -> stop -> check
|
||||
func TestMCP_FullLifecycle(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping in short mode")
|
||||
}
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
cleanupKubeshark(t, binary)
|
||||
|
||||
session := startMCPSessionWithDestructive(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
|
||||
// Check -> Start -> Check -> Stop -> Check
|
||||
if resp := session.callTool(t, 2, "check_kubeshark_status", nil); resp.Error != nil {
|
||||
t.Fatalf("Initial status check failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
if resp := session.callToolWithTimeout(t, 3, "start_kubeshark", nil, 3*time.Minute); resp.Error != nil {
|
||||
t.Fatalf("Start failed: %s", resp.Error.Message)
|
||||
}
|
||||
if err := waitForKubesharkReady(t, binary, startupTimeout); err != nil {
|
||||
t.Fatalf("Kubeshark did not become ready: %v", err)
|
||||
}
|
||||
|
||||
if resp := session.callTool(t, 4, "check_kubeshark_status", nil); resp.Error != nil {
|
||||
t.Fatalf("Status check after start failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
if resp := session.callToolWithTimeout(t, 5, "stop_kubeshark", nil, 2*time.Minute); resp.Error != nil {
|
||||
t.Fatalf("Stop failed: %s", resp.Error.Message)
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
if resp := session.callTool(t, 6, "check_kubeshark_status", nil); resp.Error != nil {
|
||||
t.Fatalf("Final status check failed: %s", resp.Error.Message)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_APIToolsRequireKubeshark tests that API tools return helpful errors when Kubeshark isn't running.
|
||||
func TestMCP_APIToolsRequireKubeshark(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
cleanupKubeshark(t, binary)
|
||||
|
||||
session := startMCPSession(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
|
||||
for i, tool := range []string{"list_workloads", "list_api_calls", "get_api_stats"} {
|
||||
resp := session.callTool(t, i+2, tool, nil)
|
||||
// Either error or helpful message is acceptable
|
||||
if resp.Error != nil {
|
||||
t.Logf("%s returned error (expected): %s", tool, resp.Error.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_SetFlags tests that --set flags are passed correctly.
|
||||
func TestMCP_SetFlags(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t), "--set", "tap.namespaces={default}")
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.sendRequest(t, MCPRequest{JSONRPC: "2.0", ID: 2, Method: "tools/list"})
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("tools/list failed with --set flags: %s", resp.Error.Message)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkMCP_CheckStatus benchmarks the check_kubeshark_status tool.
|
||||
func BenchmarkMCP_CheckStatus(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping benchmark in short mode")
|
||||
}
|
||||
if !hasKubernetesCluster() {
|
||||
b.Skip("Skipping: no Kubernetes cluster available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, getKubesharkBinary(b), "mcp")
|
||||
stdin, _ := cmd.StdinPipe()
|
||||
stdout, _ := cmd.StdoutPipe()
|
||||
reader := bufio.NewReader(stdout)
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
b.Fatalf("Failed to start MCP: %v", err)
|
||||
}
|
||||
defer func() { cancel(); _ = cmd.Wait() }()
|
||||
|
||||
// Initialize
|
||||
initReq, _ := json.Marshal(MCPRequest{
|
||||
JSONRPC: "2.0", ID: 0, Method: "initialize",
|
||||
Params: map[string]interface{}{
|
||||
"protocolVersion": "2024-11-05",
|
||||
"capabilities": map[string]interface{}{},
|
||||
"clientInfo": map[string]interface{}{"name": "bench", "version": "1.0"},
|
||||
},
|
||||
})
|
||||
_, _ = stdin.Write(append(initReq, '\n'))
|
||||
_, _ = reader.ReadString('\n')
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := json.Marshal(MCPRequest{
|
||||
JSONRPC: "2.0", ID: i + 1, Method: "tools/call",
|
||||
Params: map[string]interface{}{"name": "check_kubeshark_status", "arguments": map[string]interface{}{}},
|
||||
})
|
||||
if _, err := stdin.Write(append(req, '\n')); err != nil {
|
||||
b.Fatalf("Write failed: %v", err)
|
||||
}
|
||||
if _, err := reader.ReadString('\n'); err != nil {
|
||||
b.Fatalf("Read failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -8,5 +8,5 @@ const (
|
||||
HubServiceName = HubPodName
|
||||
K8sAllNamespaces = ""
|
||||
MinKubernetesServerVersion = "1.16.0"
|
||||
AppLabelKey = "app.kubehq.com/app"
|
||||
AppLabelKey = "app.kubeshark.com/app"
|
||||
)
|
||||
|
||||
@@ -4,17 +4,17 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-hub-network-policy
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
@@ -33,10 +33,10 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-front-network-policy
|
||||
@@ -44,7 +44,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: front
|
||||
app.kubeshark.com/app: front
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
@@ -60,10 +60,10 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-dex-network-policy
|
||||
@@ -71,7 +71,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: dex
|
||||
app.kubeshark.com/app: dex
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
@@ -87,10 +87,10 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-worker-network-policy
|
||||
@@ -98,7 +98,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
@@ -116,10 +116,10 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-service-account
|
||||
namespace: default
|
||||
@@ -131,11 +131,11 @@ metadata:
|
||||
name: kubeshark-secret
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
LICENSE: ''
|
||||
@@ -150,11 +150,11 @@ metadata:
|
||||
name: kubeshark-saml-x509-crt-secret
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
AUTH_SAML_X509_CRT: |
|
||||
@@ -166,11 +166,11 @@ metadata:
|
||||
name: kubeshark-saml-x509-key-secret
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
AUTH_SAML_X509_KEY: |
|
||||
@@ -182,10 +182,10 @@ metadata:
|
||||
name: kubeshark-nginx-config-map
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
data:
|
||||
default.conf: |
|
||||
@@ -245,11 +245,11 @@ metadata:
|
||||
name: kubeshark-config-map
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
data:
|
||||
POD_REGEX: '.*'
|
||||
@@ -281,22 +281,22 @@ data:
|
||||
DEFAULT_FILTER: ""
|
||||
TRAFFIC_SAMPLE_RATE: '100'
|
||||
JSON_TTL: '5m'
|
||||
PCAP_TTL: '10s'
|
||||
PCAP_ERROR_TTL: '60s'
|
||||
PCAP_TTL: '0'
|
||||
PCAP_ERROR_TTL: '0'
|
||||
TIMEZONE: ' '
|
||||
CLOUD_LICENSE_ENABLED: 'true'
|
||||
AI_ASSISTANT_ENABLED: 'true'
|
||||
DUPLICATE_TIMEFRAME: '200ms'
|
||||
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,ws,ldap,radius,diameter'
|
||||
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,ws,ldap,radius,diameter,udp-flow,tcp-flow'
|
||||
CUSTOM_MACROS: '{"https":"tls and (http or http2)"}'
|
||||
DISSECTORS_UPDATING_ENABLED: 'true'
|
||||
DETECT_DUPLICATES: 'false'
|
||||
PCAP_DUMP_ENABLE: 'true'
|
||||
PCAP_DUMP_ENABLE: 'false'
|
||||
PCAP_TIME_INTERVAL: '1m'
|
||||
PCAP_MAX_TIME: '1h'
|
||||
PCAP_MAX_SIZE: '500MB'
|
||||
PORT_MAPPING: '{"amqp":[5671,5672],"diameter":[3868],"http":[80,443,8080],"kafka":[9092],"ldap":[389],"redis":[6379]}'
|
||||
RAW_CAPTURE: 'false'
|
||||
RAW_CAPTURE: 'true'
|
||||
RAW_CAPTURE_STORAGE_SIZE: '1Gi'
|
||||
---
|
||||
# Source: kubeshark/templates/02-cluster-role.yaml
|
||||
@@ -304,10 +304,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-cluster-role-default
|
||||
namespace: default
|
||||
@@ -351,10 +351,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-cluster-role-binding-default
|
||||
namespace: default
|
||||
@@ -372,10 +372,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-self-config-role
|
||||
@@ -410,16 +410,22 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- "*"
|
||||
---
|
||||
# Source: kubeshark/templates/03-cluster-role-binding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-self-config-role-binding
|
||||
@@ -438,11 +444,11 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-hub
|
||||
namespace: default
|
||||
@@ -452,7 +458,7 @@ spec:
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
type: ClusterIP
|
||||
---
|
||||
# Source: kubeshark/templates/07-front-service.yaml
|
||||
@@ -460,10 +466,10 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-front
|
||||
namespace: default
|
||||
@@ -473,7 +479,7 @@ spec:
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubehq.com/app: front
|
||||
app.kubeshark.com/app: front
|
||||
type: ClusterIP
|
||||
---
|
||||
# Source: kubeshark/templates/15-worker-service-metrics.yaml
|
||||
@@ -481,10 +487,10 @@ kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
@@ -493,11 +499,11 @@ metadata:
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
app.kubehq.com/app: worker
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
app.kubeshark.com/app: worker
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
ports:
|
||||
- name: metrics
|
||||
@@ -510,10 +516,10 @@ kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
@@ -522,11 +528,11 @@ metadata:
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
app.kubehq.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
ports:
|
||||
- name: metrics
|
||||
@@ -539,29 +545,29 @@ apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
sidecar.istio.io/inject: "false"
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: worker
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
app.kubeshark.com/app: worker
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: kubeshark
|
||||
@@ -571,7 +577,7 @@ spec:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mkdir -p /sys/fs/bpf && mount | grep -q '/sys/fs/bpf' || mount -t bpf bpf /sys/fs/bpf
|
||||
image: 'docker.io/kubeshark/worker:v52.10'
|
||||
image: 'docker.io/kubeshark/worker:v52.12'
|
||||
imagePullPolicy: Always
|
||||
name: mount-bpf
|
||||
securityContext:
|
||||
@@ -601,10 +607,10 @@ spec:
|
||||
- -staletimeout
|
||||
- '30'
|
||||
- -storage-size
|
||||
- '5Gi'
|
||||
- '10Gi'
|
||||
- -capture-db-max-size
|
||||
- '500Mi'
|
||||
image: 'docker.io/kubeshark/worker:v52.10'
|
||||
image: 'docker.io/kubeshark/worker:v52.12'
|
||||
imagePullPolicy: Always
|
||||
name: sniffer
|
||||
ports:
|
||||
@@ -625,7 +631,7 @@ spec:
|
||||
- name: TCP_STREAM_CHANNEL_TIMEOUT_SHOW
|
||||
value: 'false'
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubehq.com'
|
||||
value: 'https://api.kubeshark.com'
|
||||
- name: PROFILING_ENABLED
|
||||
value: 'false'
|
||||
- name: SENTRY_ENABLED
|
||||
@@ -678,7 +684,7 @@ spec:
|
||||
- -disable-tls-log
|
||||
- -loglevel
|
||||
- 'warning'
|
||||
image: 'docker.io/kubeshark/worker:v52.10'
|
||||
image: 'docker.io/kubeshark/worker:v52.12'
|
||||
imagePullPolicy: Always
|
||||
name: tracer
|
||||
env:
|
||||
@@ -762,18 +768,18 @@ spec:
|
||||
name: root
|
||||
- name: data
|
||||
emptyDir:
|
||||
sizeLimit: 5Gi
|
||||
sizeLimit: 10Gi
|
||||
---
|
||||
# Source: kubeshark/templates/04-hub-deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-hub
|
||||
namespace: default
|
||||
@@ -781,17 +787,17 @@ spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
@@ -807,7 +813,13 @@ spec:
|
||||
- -capture-stop-after
|
||||
- "5m"
|
||||
- -snapshot-size-limit
|
||||
- '1Gi'
|
||||
- '20Gi'
|
||||
- -dissector-image
|
||||
- 'kubeshark/worker:master'
|
||||
- -dissector-cpu
|
||||
- '1'
|
||||
- -dissector-memory
|
||||
- '4Gi'
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
@@ -822,10 +834,10 @@ spec:
|
||||
- name: SENTRY_ENVIRONMENT
|
||||
value: 'production'
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubehq.com'
|
||||
value: 'https://api.kubeshark.com'
|
||||
- name: PROFILING_ENABLED
|
||||
value: 'false'
|
||||
image: 'docker.io/kubeshark/hub:v52.10'
|
||||
image: 'docker.io/kubeshark/hub:v52.12'
|
||||
imagePullPolicy: Always
|
||||
readinessProbe:
|
||||
periodSeconds: 5
|
||||
@@ -885,18 +897,18 @@ spec:
|
||||
path: kubeshark.key
|
||||
- name: snapshots-volume
|
||||
emptyDir:
|
||||
sizeLimit: 1Gi
|
||||
sizeLimit: 20Gi
|
||||
---
|
||||
# Source: kubeshark/templates/06-front-deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: front
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
app.kubeshark.com/app: front
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-front
|
||||
namespace: default
|
||||
@@ -904,17 +916,17 @@ spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubehq.com/app: front
|
||||
app.kubeshark.com/app: front
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubehq.com/app: front
|
||||
helm.sh/chart: kubeshark-52.10.0
|
||||
app.kubeshark.com/app: front
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.10.0"
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
containers:
|
||||
@@ -926,7 +938,7 @@ spec:
|
||||
- name: REACT_APP_COMPLETE_STREAMING_ENABLED
|
||||
value: 'true'
|
||||
- name: REACT_APP_STREAMING_TYPE
|
||||
value: ''
|
||||
value: 'connect-rpc'
|
||||
- name: REACT_APP_AUTH_SAML_IDP_METADATA_URL
|
||||
value: ' '
|
||||
- name: REACT_APP_TIMEZONE
|
||||
@@ -953,11 +965,13 @@ spec:
|
||||
value: 'false'
|
||||
- name: REACT_APP_DISSECTORS_UPDATING_ENABLED
|
||||
value: 'true'
|
||||
- name: REACT_APP_RAW_CAPTURE_ENABLED
|
||||
value: 'true'
|
||||
- name: REACT_APP_SENTRY_ENABLED
|
||||
value: 'false'
|
||||
- name: REACT_APP_SENTRY_ENVIRONMENT
|
||||
value: 'production'
|
||||
image: 'docker.io/kubeshark/front:v52.10'
|
||||
image: 'docker.io/kubeshark/front:v52.12'
|
||||
imagePullPolicy: Always
|
||||
name: kubeshark-front
|
||||
livenessProbe:
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: info@kubehq.com
|
||||
email: info@kubeshark.com
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod-key
|
||||
solvers:
|
||||
|
||||
185
mcp/README.md
Normal file
185
mcp/README.md
Normal file
@@ -0,0 +1,185 @@
|
||||
# Kubeshark MCP Server
|
||||
|
||||
[Kubeshark](https://kubeshark.com) MCP (Model Context Protocol) server enables AI assistants like Claude Desktop, Cursor, and other MCP-compatible clients to query real-time Kubernetes network traffic.
|
||||
|
||||
## Features
|
||||
|
||||
- **L7 API Traffic Analysis**: Query HTTP, gRPC, Redis, Kafka, DNS transactions
|
||||
- **L4 Network Flows**: View TCP/UDP flows with traffic statistics
|
||||
- **Cluster Management**: Start/stop Kubeshark deployments (with safety controls)
|
||||
- **PCAP Snapshots**: Create and export network captures
|
||||
- **Built-in Prompts**: Pre-configured prompts for common analysis tasks
|
||||
|
||||
## Installation
|
||||
|
||||
### 1. Install Kubeshark CLI
|
||||
|
||||
```bash
|
||||
# macOS
|
||||
brew install kubeshark
|
||||
|
||||
# Linux
|
||||
sh <(curl -Ls https://kubeshark.com/install)
|
||||
|
||||
# Windows (PowerShell)
|
||||
choco install kubeshark
|
||||
```
|
||||
|
||||
Or download from [GitHub Releases](https://github.com/kubeshark/kubeshark/releases).
|
||||
|
||||
### 2. Configure Claude Desktop
|
||||
|
||||
Add to your Claude Desktop configuration:
|
||||
|
||||
**macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json`
|
||||
**Windows**: `%APPDATA%\Claude\claude_desktop_config.json`
|
||||
|
||||
#### URL Mode (Recommended for existing deployments)
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp", "--url", "https://kubeshark.example.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Proxy Mode (Requires kubectl access)
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp", "--kubeconfig", "/path/to/.kube/config"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### With Destructive Operations
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp", "--allow-destructive", "--kubeconfig", "/path/to/.kube/config"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Generate Configuration
|
||||
|
||||
Use the CLI to generate configuration:
|
||||
|
||||
```bash
|
||||
kubeshark mcp --mcp-config --url https://kubeshark.example.com
|
||||
```
|
||||
|
||||
## Available Tools
|
||||
|
||||
### Traffic Analysis (All Modes)
|
||||
|
||||
| Tool | Description |
|
||||
|------|-------------|
|
||||
| `list_workloads` | List pods, services, namespaces with observed traffic |
|
||||
| `list_api_calls` | Query L7 API transactions with KFL filtering |
|
||||
| `get_api_call` | Get detailed info about a specific API call |
|
||||
| `get_api_stats` | Get aggregated API statistics |
|
||||
| `list_l4_flows` | List L4 (TCP/UDP) network flows |
|
||||
| `get_l4_flow_summary` | Get L4 connectivity summary |
|
||||
| `list_snapshots` | List all PCAP snapshots |
|
||||
| `create_snapshot` | Create a new PCAP snapshot |
|
||||
| `get_dissection_status` | Check L7 protocol parsing status |
|
||||
| `enable_dissection` | Enable L7 protocol dissection |
|
||||
| `disable_dissection` | Disable L7 protocol dissection |
|
||||
|
||||
### Cluster Management (Proxy Mode Only)
|
||||
|
||||
| Tool | Description | Requires |
|
||||
|------|-------------|----------|
|
||||
| `check_kubeshark_status` | Check if Kubeshark is running | - |
|
||||
| `start_kubeshark` | Deploy Kubeshark to cluster | `--allow-destructive` |
|
||||
| `stop_kubeshark` | Remove Kubeshark from cluster | `--allow-destructive` |
|
||||
|
||||
## Available Prompts
|
||||
|
||||
| Prompt | Description |
|
||||
|--------|-------------|
|
||||
| `analyze_traffic` | Analyze API traffic patterns and identify issues |
|
||||
| `find_errors` | Find and summarize API errors and failures |
|
||||
| `trace_request` | Trace a request path through microservices |
|
||||
| `show_topology` | Show service communication topology |
|
||||
| `latency_analysis` | Analyze latency patterns and identify slow endpoints |
|
||||
| `security_audit` | Audit traffic for security concerns |
|
||||
| `compare_traffic` | Compare traffic patterns between time periods |
|
||||
| `debug_connection` | Debug connectivity issues between services |
|
||||
|
||||
## Example Conversations
|
||||
|
||||
```
|
||||
User: Show me all HTTP 500 errors in the last hour
|
||||
|
||||
Claude: I'll query the API traffic for 500 errors.
|
||||
[Calling list_api_calls with kfl="http and response.status == 500"]
|
||||
|
||||
Found 12 HTTP 500 errors:
|
||||
1. POST /api/checkout -> payment-service (500)
|
||||
Time: 10:23:45 | Latency: 2340ms
|
||||
...
|
||||
```
|
||||
|
||||
```
|
||||
User: What services are communicating with the database?
|
||||
|
||||
Claude: Let me check the L4 flows to the database.
|
||||
[Calling list_l4_flows with dst_filter="postgres"]
|
||||
|
||||
Found 5 services connecting to postgres:5432:
|
||||
- orders-service: 456KB transferred
|
||||
- users-service: 123KB transferred
|
||||
...
|
||||
```
|
||||
|
||||
## CLI Options
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--url` | Direct URL to Kubeshark Hub |
|
||||
| `--kubeconfig` | Path to kubeconfig file |
|
||||
| `--allow-destructive` | Enable start/stop operations |
|
||||
| `--list-tools` | List available tools and exit |
|
||||
| `--mcp-config` | Print Claude Desktop config JSON |
|
||||
|
||||
## KFL (Kubeshark Filter Language)
|
||||
|
||||
Query traffic using KFL syntax:
|
||||
|
||||
```
|
||||
# HTTP requests to a specific path
|
||||
http and request.path == "/api/users"
|
||||
|
||||
# Errors only
|
||||
response.status >= 400
|
||||
|
||||
# Specific source pod
|
||||
src.pod.name == "frontend-.*"
|
||||
|
||||
# Multiple conditions
|
||||
http and src.namespace == "default" and response.status == 500
|
||||
```
|
||||
|
||||
## Links
|
||||
|
||||
- [Documentation](https://docs.kubeshark.com/en/mcp)
|
||||
- [GitHub](https://github.com/kubeshark/kubeshark)
|
||||
- [Website](https://kubeshark.com)
|
||||
|
||||
## License
|
||||
|
||||
Apache-2.0
|
||||
205
mcp/server.json
Normal file
205
mcp/server.json
Normal file
@@ -0,0 +1,205 @@
|
||||
{
|
||||
"$schema": "https://registry.modelcontextprotocol.io/schemas/server.schema.json",
|
||||
"name": "com.kubeshark/mcp",
|
||||
"displayName": "Kubeshark",
|
||||
"description": "Real-time Kubernetes network traffic visibility and API analysis. Query L7 API transactions (HTTP, gRPC, Redis, Kafka, DNS), L4 network flows, and manage Kubeshark deployments directly from AI assistants.",
|
||||
"icon": "https://kubeshark.com/favicon.ico",
|
||||
"repository": {
|
||||
"url": "https://github.com/kubeshark/kubeshark",
|
||||
"source": "github"
|
||||
},
|
||||
"homepage": "https://kubeshark.com",
|
||||
"license": "Apache-2.0",
|
||||
"version": "52.12.0",
|
||||
"authors": [
|
||||
{
|
||||
"name": "Kubeshark",
|
||||
"url": "https://kubeshark.com"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"kubernetes",
|
||||
"networking",
|
||||
"observability",
|
||||
"debugging",
|
||||
"security"
|
||||
],
|
||||
"tags": [
|
||||
"kubernetes",
|
||||
"network",
|
||||
"traffic",
|
||||
"api",
|
||||
"http",
|
||||
"grpc",
|
||||
"kafka",
|
||||
"redis",
|
||||
"dns",
|
||||
"pcap",
|
||||
"wireshark",
|
||||
"tcpdump",
|
||||
"observability",
|
||||
"debugging",
|
||||
"microservices"
|
||||
],
|
||||
"packages": [
|
||||
{
|
||||
"registryType": "github-releases",
|
||||
"name": "kubeshark/kubeshark",
|
||||
"version": "52.12.0",
|
||||
"runtime": "binary",
|
||||
"platforms": [
|
||||
"darwin-arm64",
|
||||
"darwin-amd64",
|
||||
"linux-arm64",
|
||||
"linux-amd64",
|
||||
"windows-amd64"
|
||||
],
|
||||
"transport": {
|
||||
"type": "stdio",
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp"]
|
||||
}
|
||||
}
|
||||
],
|
||||
"tools": [
|
||||
{
|
||||
"name": "check_kubeshark_status",
|
||||
"description": "Check if Kubeshark is currently running in the cluster. Read-only operation.",
|
||||
"mode": "proxy"
|
||||
},
|
||||
{
|
||||
"name": "start_kubeshark",
|
||||
"description": "Deploy Kubeshark to the Kubernetes cluster. Requires --allow-destructive flag.",
|
||||
"mode": "proxy",
|
||||
"destructive": true
|
||||
},
|
||||
{
|
||||
"name": "stop_kubeshark",
|
||||
"description": "Remove Kubeshark from the Kubernetes cluster. Requires --allow-destructive flag.",
|
||||
"mode": "proxy",
|
||||
"destructive": true
|
||||
},
|
||||
{
|
||||
"name": "list_workloads",
|
||||
"description": "List pods, services, namespaces, and nodes with observed L7 traffic.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "list_api_calls",
|
||||
"description": "Query L7 API transactions (HTTP, gRPC, Redis, Kafka, DNS) with KFL filtering.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "get_api_call",
|
||||
"description": "Get detailed information about a specific API call including headers and body.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "get_api_stats",
|
||||
"description": "Get aggregated API statistics and metrics.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "list_l4_flows",
|
||||
"description": "List L4 (TCP/UDP) network flows with traffic statistics.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "get_l4_flow_summary",
|
||||
"description": "Get L4 connectivity summary including top talkers and cross-namespace traffic.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "list_snapshots",
|
||||
"description": "List all PCAP snapshots.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "create_snapshot",
|
||||
"description": "Create a new PCAP snapshot of captured traffic.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "get_dissection_status",
|
||||
"description": "Check L7 protocol parsing status.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "enable_dissection",
|
||||
"description": "Enable L7 protocol dissection.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "disable_dissection",
|
||||
"description": "Disable L7 protocol dissection.",
|
||||
"mode": "all"
|
||||
}
|
||||
],
|
||||
"prompts": [
|
||||
{
|
||||
"name": "analyze_traffic",
|
||||
"description": "Analyze API traffic patterns and identify issues"
|
||||
},
|
||||
{
|
||||
"name": "find_errors",
|
||||
"description": "Find and summarize API errors and failures"
|
||||
},
|
||||
{
|
||||
"name": "trace_request",
|
||||
"description": "Trace a request path through microservices"
|
||||
},
|
||||
{
|
||||
"name": "show_topology",
|
||||
"description": "Show service communication topology"
|
||||
},
|
||||
{
|
||||
"name": "latency_analysis",
|
||||
"description": "Analyze latency patterns and identify slow endpoints"
|
||||
},
|
||||
{
|
||||
"name": "security_audit",
|
||||
"description": "Audit traffic for security concerns"
|
||||
},
|
||||
{
|
||||
"name": "compare_traffic",
|
||||
"description": "Compare traffic patterns between time periods"
|
||||
},
|
||||
{
|
||||
"name": "debug_connection",
|
||||
"description": "Debug connectivity issues between services"
|
||||
}
|
||||
],
|
||||
"configuration": {
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "Direct URL to Kubeshark Hub (e.g., https://kubeshark.example.com). When set, connects directly without kubectl/proxy.",
|
||||
"examples": ["https://kubeshark.example.com", "http://localhost:8899"]
|
||||
},
|
||||
"kubeconfig": {
|
||||
"type": "string",
|
||||
"description": "Path to kubeconfig file for proxy mode.",
|
||||
"examples": ["~/.kube/config", "/path/to/.kube/config"]
|
||||
},
|
||||
"allow-destructive": {
|
||||
"type": "boolean",
|
||||
"description": "Enable destructive operations (start_kubeshark, stop_kubeshark). Default: false for safety.",
|
||||
"default": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"modes": {
|
||||
"url": {
|
||||
"description": "Connect directly to an existing Kubeshark deployment via URL. Cluster management tools are disabled.",
|
||||
"args": ["mcp", "--url", "${url}"]
|
||||
},
|
||||
"proxy": {
|
||||
"description": "Connect via kubectl port-forward. Requires kubeconfig access to the cluster.",
|
||||
"args": ["mcp", "--kubeconfig", "${kubeconfig}"]
|
||||
},
|
||||
"proxy-destructive": {
|
||||
"description": "Proxy mode with destructive operations enabled.",
|
||||
"args": ["mcp", "--kubeconfig", "${kubeconfig}", "--allow-destructive"]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,8 +10,8 @@ var (
|
||||
Software = "Kubeshark"
|
||||
Program = "kubeshark"
|
||||
Description = "The API Traffic Analyzer for Kubernetes"
|
||||
Website = "https://kubehq.com"
|
||||
Email = "support@kubehq.io"
|
||||
Website = "https://kubeshark.com"
|
||||
Email = "support@kubeshark.com"
|
||||
Ver = "0.0.0"
|
||||
Branch = "master"
|
||||
GitCommitHash = "" // this var is overridden using ldflags in makefile when building
|
||||
|
||||
Reference in New Issue
Block a user