mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-03-18 18:42:44 +00:00
Compare commits
90 Commits
v52.7.6
...
add-kubesh
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
710345c628 | ||
|
|
963b3e4ac2 | ||
|
|
b2813e02bd | ||
|
|
707d7351b6 | ||
|
|
23c86be773 | ||
|
|
3f8a067f9b | ||
|
|
33f5310e8e | ||
|
|
5f2f34e826 | ||
|
|
f9a5fbbb78 | ||
|
|
73f8e3585d | ||
|
|
a6daefc567 | ||
|
|
e6a67cc3b7 | ||
|
|
eb7dc42b6e | ||
|
|
d266408377 | ||
|
|
40ae6c626b | ||
|
|
e3283327f9 | ||
|
|
a46f05c4aa | ||
|
|
dbfd17d901 | ||
|
|
95c18b57a4 | ||
|
|
6fd2e4b1b2 | ||
|
|
686c7eba54 | ||
|
|
1ad61798f6 | ||
|
|
318b35e785 | ||
|
|
fecf290a25 | ||
|
|
a01f7bed74 | ||
|
|
633a17a0e0 | ||
|
|
8fac9a5ad5 | ||
|
|
76c5eb6b59 | ||
|
|
482082ba49 | ||
|
|
6ae379cbff | ||
|
|
3f6c62a7e3 | ||
|
|
717433badb | ||
|
|
a973d6916d | ||
|
|
2ccd716a68 | ||
|
|
0bbbb473ea | ||
|
|
d012ea89b6 | ||
|
|
0f1c9c52ea | ||
|
|
f3a0d35485 | ||
|
|
d6631e8565 | ||
|
|
1669680d10 | ||
|
|
19389fcba7 | ||
|
|
1b027153e3 | ||
|
|
77d16e73e8 | ||
|
|
a73c904a9b | ||
|
|
8f3f136be6 | ||
|
|
897aa44965 | ||
|
|
57093e8a25 | ||
|
|
1fd9dffc60 | ||
|
|
3b315eb89f | ||
|
|
6645e23704 | ||
|
|
b7190162ec | ||
|
|
9570b2e317 | ||
|
|
b98113a2b5 | ||
|
|
9724a0c279 | ||
|
|
47ac96a71b | ||
|
|
4dea643781 | ||
|
|
03a53ad6d5 | ||
|
|
a12a5aec19 | ||
|
|
4931116881 | ||
|
|
eb9a82962f | ||
|
|
bd10e035ff | ||
|
|
25832ce596 | ||
|
|
38a13d19e1 | ||
|
|
a7b9e09f2b | ||
|
|
dcb84e0520 | ||
|
|
773fefae21 | ||
|
|
d640128e85 | ||
|
|
7dcacf14f2 | ||
|
|
fabf30c039 | ||
|
|
e55b62491a | ||
|
|
f5167cbb2a | ||
|
|
349d8b07df | ||
|
|
88f43b94d9 | ||
|
|
cf867fe701 | ||
|
|
635fcabecd | ||
|
|
099b79f3ce | ||
|
|
56b936b8b8 | ||
|
|
352484b5f6 | ||
|
|
eee3030410 | ||
|
|
5231546210 | ||
|
|
d845bb18a3 | ||
|
|
abee96a863 | ||
|
|
efe6b0e7b7 | ||
|
|
bedecdb080 | ||
|
|
c2d10f8cfa | ||
|
|
161a525b67 | ||
|
|
33353ef21e | ||
|
|
c751a8a6ad | ||
|
|
8c9473626e | ||
|
|
1d8fa774d3 |
201
.github/workflows/mcp-publish.yml
vendored
Normal file
201
.github/workflows/mcp-publish.yml
vendored
Normal file
@@ -0,0 +1,201 @@
|
||||
name: MCP Registry Publish
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
release_tag:
|
||||
description: 'Release tag to publish (e.g., v52.13.0)'
|
||||
type: string
|
||||
required: true
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dry_run:
|
||||
description: 'Dry run - generate server.json but skip actual publishing'
|
||||
type: boolean
|
||||
default: true
|
||||
release_tag:
|
||||
description: 'Release tag to publish (e.g., v52.13.0)'
|
||||
type: string
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
mcp-publish:
|
||||
name: Publish to MCP Registry
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
id-token: write # Required for OIDC authentication with MCP Registry
|
||||
contents: read # Required for checkout
|
||||
steps:
|
||||
- name: Check out the repo
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Determine version
|
||||
id: version
|
||||
shell: bash
|
||||
run: |
|
||||
# inputs.release_tag works for both workflow_call and workflow_dispatch
|
||||
VERSION="${{ inputs.release_tag }}"
|
||||
echo "tag=${VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "Publishing MCP server for version: ${VERSION}"
|
||||
|
||||
- name: Download SHA256 files from release
|
||||
shell: bash
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.tag }}"
|
||||
mkdir -p bin
|
||||
echo "Downloading SHA256 checksums from release ${VERSION}..."
|
||||
for platform in darwin_arm64 darwin_amd64 linux_arm64 linux_amd64 windows_amd64; do
|
||||
url="https://github.com/kubeshark/kubeshark/releases/download/${VERSION}/kubeshark-mcp_${platform}.mcpb.sha256"
|
||||
echo " Fetching ${platform}..."
|
||||
if ! curl -sfL "${url}" -o "bin/kubeshark-mcp_${platform}.mcpb.sha256"; then
|
||||
echo "::warning::Failed to download SHA256 for ${platform}"
|
||||
fi
|
||||
done
|
||||
echo "Downloaded checksums:"
|
||||
ls -la bin/*.sha256 2>/dev/null || echo "No SHA256 files found"
|
||||
|
||||
- name: Generate server.json
|
||||
shell: bash
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.tag }}"
|
||||
CLEAN_VERSION="${VERSION#v}"
|
||||
|
||||
# Read SHA256 hashes
|
||||
get_sha256() {
|
||||
local file="bin/kubeshark-mcp_$1.mcpb.sha256"
|
||||
if [ -f "$file" ]; then
|
||||
awk '{print $1}' "$file"
|
||||
else
|
||||
echo "HASH_NOT_FOUND"
|
||||
fi
|
||||
}
|
||||
|
||||
DARWIN_ARM64_SHA256=$(get_sha256 "darwin_arm64")
|
||||
DARWIN_AMD64_SHA256=$(get_sha256 "darwin_amd64")
|
||||
LINUX_ARM64_SHA256=$(get_sha256 "linux_arm64")
|
||||
LINUX_AMD64_SHA256=$(get_sha256 "linux_amd64")
|
||||
WINDOWS_AMD64_SHA256=$(get_sha256 "windows_amd64")
|
||||
|
||||
echo "SHA256 hashes:"
|
||||
echo " darwin_arm64: ${DARWIN_ARM64_SHA256}"
|
||||
echo " darwin_amd64: ${DARWIN_AMD64_SHA256}"
|
||||
echo " linux_arm64: ${LINUX_ARM64_SHA256}"
|
||||
echo " linux_amd64: ${LINUX_AMD64_SHA256}"
|
||||
echo " windows_amd64: ${WINDOWS_AMD64_SHA256}"
|
||||
|
||||
# Generate server.json using jq for proper formatting
|
||||
jq -n \
|
||||
--arg version "$CLEAN_VERSION" \
|
||||
--arg full_version "$VERSION" \
|
||||
--arg darwin_arm64_sha "$DARWIN_ARM64_SHA256" \
|
||||
--arg darwin_amd64_sha "$DARWIN_AMD64_SHA256" \
|
||||
--arg linux_arm64_sha "$LINUX_ARM64_SHA256" \
|
||||
--arg linux_amd64_sha "$LINUX_AMD64_SHA256" \
|
||||
--arg windows_amd64_sha "$WINDOWS_AMD64_SHA256" \
|
||||
'{
|
||||
"$schema": "https://static.modelcontextprotocol.io/schemas/2025-12-11/server.schema.json",
|
||||
"name": "io.github.kubeshark/mcp",
|
||||
"displayName": "Kubeshark",
|
||||
"description": "Real-time Kubernetes network traffic visibility and API analysis for HTTP, gRPC, Redis, Kafka, DNS.",
|
||||
"icon": "https://raw.githubusercontent.com/kubeshark/assets/refs/heads/master/logo/ico/icon.ico",
|
||||
"repository": { "url": "https://github.com/kubeshark/kubeshark", "source": "github" },
|
||||
"homepage": "https://kubeshark.com",
|
||||
"license": "Apache-2.0",
|
||||
"version": $version,
|
||||
"authors": [{ "name": "Kubeshark", "url": "https://kubeshark.com" }],
|
||||
"categories": ["kubernetes", "networking", "observability", "debugging", "security"],
|
||||
"tags": ["kubernetes", "network", "traffic", "api", "http", "grpc", "kafka", "redis", "dns", "pcap", "wireshark", "tcpdump", "observability", "debugging", "microservices"],
|
||||
"packages": [
|
||||
{ "registryType": "mcpb", "identifier": ("https://github.com/kubeshark/kubeshark/releases/download/" + $full_version + "/kubeshark-mcp_darwin_arm64.mcpb"), "fileSha256": $darwin_arm64_sha, "transport": { "type": "stdio" } },
|
||||
{ "registryType": "mcpb", "identifier": ("https://github.com/kubeshark/kubeshark/releases/download/" + $full_version + "/kubeshark-mcp_darwin_amd64.mcpb"), "fileSha256": $darwin_amd64_sha, "transport": { "type": "stdio" } },
|
||||
{ "registryType": "mcpb", "identifier": ("https://github.com/kubeshark/kubeshark/releases/download/" + $full_version + "/kubeshark-mcp_linux_arm64.mcpb"), "fileSha256": $linux_arm64_sha, "transport": { "type": "stdio" } },
|
||||
{ "registryType": "mcpb", "identifier": ("https://github.com/kubeshark/kubeshark/releases/download/" + $full_version + "/kubeshark-mcp_linux_amd64.mcpb"), "fileSha256": $linux_amd64_sha, "transport": { "type": "stdio" } },
|
||||
{ "registryType": "mcpb", "identifier": ("https://github.com/kubeshark/kubeshark/releases/download/" + $full_version + "/kubeshark-mcp_windows_amd64.mcpb"), "fileSha256": $windows_amd64_sha, "transport": { "type": "stdio" } }
|
||||
],
|
||||
"tools": [
|
||||
{ "name": "check_kubeshark_status", "description": "Check if Kubeshark is currently running in the cluster.", "mode": "proxy" },
|
||||
{ "name": "start_kubeshark", "description": "Deploy Kubeshark to the Kubernetes cluster. Requires --allow-destructive flag.", "mode": "proxy", "destructive": true },
|
||||
{ "name": "stop_kubeshark", "description": "Remove Kubeshark from the Kubernetes cluster. Requires --allow-destructive flag.", "mode": "proxy", "destructive": true },
|
||||
{ "name": "list_workloads", "description": "List pods, services, namespaces, and nodes with observed L7 traffic.", "mode": "all" },
|
||||
{ "name": "list_api_calls", "description": "Query L7 API transactions (HTTP, gRPC, Redis, Kafka, DNS) with KFL filtering.", "mode": "all" },
|
||||
{ "name": "get_api_call", "description": "Get detailed information about a specific API call including headers and body.", "mode": "all" },
|
||||
{ "name": "get_api_stats", "description": "Get aggregated API statistics and metrics.", "mode": "all" },
|
||||
{ "name": "list_l4_flows", "description": "List L4 (TCP/UDP) network flows with traffic statistics.", "mode": "all" },
|
||||
{ "name": "get_l4_flow_summary", "description": "Get L4 connectivity summary including top talkers and cross-namespace traffic.", "mode": "all" },
|
||||
{ "name": "list_snapshots", "description": "List all PCAP snapshots.", "mode": "all" },
|
||||
{ "name": "create_snapshot", "description": "Create a new PCAP snapshot of captured traffic.", "mode": "all" },
|
||||
{ "name": "get_dissection_status", "description": "Check L7 protocol parsing status.", "mode": "all" },
|
||||
{ "name": "enable_dissection", "description": "Enable L7 protocol dissection.", "mode": "all" },
|
||||
{ "name": "disable_dissection", "description": "Disable L7 protocol dissection.", "mode": "all" }
|
||||
],
|
||||
"prompts": [
|
||||
{ "name": "analyze_traffic", "description": "Analyze API traffic patterns and identify issues" },
|
||||
{ "name": "find_errors", "description": "Find and summarize API errors and failures" },
|
||||
{ "name": "trace_request", "description": "Trace a request path through microservices" },
|
||||
{ "name": "show_topology", "description": "Show service communication topology" },
|
||||
{ "name": "latency_analysis", "description": "Analyze latency patterns and identify slow endpoints" },
|
||||
{ "name": "security_audit", "description": "Audit traffic for security concerns" },
|
||||
{ "name": "compare_traffic", "description": "Compare traffic patterns between time periods" },
|
||||
{ "name": "debug_connection", "description": "Debug connectivity issues between services" }
|
||||
],
|
||||
"configuration": {
|
||||
"properties": {
|
||||
"url": { "type": "string", "description": "Direct URL to Kubeshark Hub (e.g., https://kubeshark.example.com). When set, connects directly without kubectl/proxy.", "examples": ["https://kubeshark.example.com", "http://localhost:8899"] },
|
||||
"kubeconfig": { "type": "string", "description": "Path to kubeconfig file for proxy mode.", "examples": ["~/.kube/config", "/path/to/.kube/config"] },
|
||||
"allow-destructive": { "type": "boolean", "description": "Enable destructive operations (start_kubeshark, stop_kubeshark). Default: false for safety.", "default": false }
|
||||
}
|
||||
},
|
||||
"modes": {
|
||||
"url": { "description": "Connect directly to an existing Kubeshark deployment via URL. Cluster management tools are disabled.", "args": ["mcp", "--url", "${url}"] },
|
||||
"proxy": { "description": "Connect via kubectl port-forward. Requires kubeconfig access to the cluster.", "args": ["mcp", "--kubeconfig", "${kubeconfig}"] },
|
||||
"proxy-destructive": { "description": "Proxy mode with destructive operations enabled.", "args": ["mcp", "--kubeconfig", "${kubeconfig}", "--allow-destructive"] }
|
||||
}
|
||||
}' > mcp/server.json
|
||||
|
||||
echo ""
|
||||
echo "Generated server.json:"
|
||||
cat mcp/server.json
|
||||
|
||||
- name: Install mcp-publisher
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Installing mcp-publisher..."
|
||||
curl -sfL "https://github.com/modelcontextprotocol/registry/releases/latest/download/mcp-publisher_linux_amd64.tar.gz" | tar xz
|
||||
chmod +x mcp-publisher
|
||||
sudo mv mcp-publisher /usr/local/bin/
|
||||
echo "mcp-publisher installed successfully"
|
||||
|
||||
- name: Login to MCP Registry
|
||||
if: github.event_name != 'workflow_dispatch' || github.event.inputs.dry_run != 'true'
|
||||
shell: bash
|
||||
run: mcp-publisher login github-oidc
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Publish to MCP Registry
|
||||
if: github.event_name != 'workflow_dispatch' || github.event.inputs.dry_run != 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
cd mcp
|
||||
echo "Publishing to MCP Registry..."
|
||||
if ! mcp-publisher publish; then
|
||||
echo "::error::Failed to publish to MCP Registry"
|
||||
exit 1
|
||||
fi
|
||||
echo "Successfully published to MCP Registry"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Dry-run summary
|
||||
if: github.event_name == 'workflow_dispatch' && github.event.inputs.dry_run == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
echo "=============================================="
|
||||
echo "DRY RUN - Would publish the following server.json"
|
||||
echo "=============================================="
|
||||
cat mcp/server.json
|
||||
echo ""
|
||||
echo "=============================================="
|
||||
echo "SHA256 checksums downloaded:"
|
||||
echo "=============================================="
|
||||
cat bin/*.sha256 2>/dev/null || echo "No SHA256 files found"
|
||||
24
.github/workflows/release-tag.yml
vendored
Normal file
24
.github/workflows/release-tag.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Auto-tag release
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
tag:
|
||||
if: github.event.pull_request.merged == true && startsWith(github.event.pull_request.head.ref, 'release/v')
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create and push tag
|
||||
run: |
|
||||
VERSION="${GITHUB_HEAD_REF#release/}"
|
||||
echo "Creating tag $VERSION on master"
|
||||
git tag "$VERSION"
|
||||
git push origin "$VERSION"
|
||||
34
.github/workflows/release.yml
vendored
34
.github/workflows/release.yml
vendored
@@ -43,6 +43,23 @@ jobs:
|
||||
run: |
|
||||
echo '${{ steps.version.outputs.tag }}' >> bin/version.txt
|
||||
|
||||
- name: Create MCP Registry artifacts
|
||||
shell: bash
|
||||
run: |
|
||||
cd bin
|
||||
# Create .mcpb copies for MCP Registry (URL must contain "mcp")
|
||||
for f in kubeshark_linux_amd64 kubeshark_linux_arm64 kubeshark_darwin_amd64 kubeshark_darwin_arm64; do
|
||||
if [ -f "$f" ]; then
|
||||
cp "$f" "${f/kubeshark_/kubeshark-mcp_}.mcpb"
|
||||
shasum -a 256 "${f/kubeshark_/kubeshark-mcp_}.mcpb" > "${f/kubeshark_/kubeshark-mcp_}.mcpb.sha256"
|
||||
fi
|
||||
done
|
||||
# Handle Windows executable
|
||||
if [ -f "kubeshark.exe" ]; then
|
||||
cp kubeshark.exe kubeshark-mcp_windows_amd64.mcpb
|
||||
shasum -a 256 kubeshark-mcp_windows_amd64.mcpb > kubeshark-mcp_windows_amd64.mcpb.sha256
|
||||
fi
|
||||
|
||||
- name: Release
|
||||
uses: ncipollo/release-action@v1
|
||||
with:
|
||||
@@ -52,16 +69,9 @@ jobs:
|
||||
prerelease: false
|
||||
bodyFile: 'bin/README.md'
|
||||
|
||||
brew:
|
||||
name: Publish a new Homebrew formulae
|
||||
mcp-publish:
|
||||
name: Publish to MCP Registry
|
||||
needs: [release]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Bump core homebrew formula
|
||||
uses: mislav/bump-homebrew-formula-action@v3
|
||||
with:
|
||||
# A PR will be sent to github.com/Homebrew/homebrew-core to update this formula:
|
||||
formula-name: kubeshark
|
||||
push-to: kubeshark/homebrew-core
|
||||
env:
|
||||
COMMITTER_TOKEN: ${{ secrets.COMMITTER_TOKEN }}
|
||||
uses: ./.github/workflows/mcp-publish.yml
|
||||
with:
|
||||
release_tag: ${{ needs.release.outputs.version }}
|
||||
|
||||
51
.github/workflows/test.yml
vendored
51
.github/workflows/test.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
@@ -29,3 +29,52 @@ jobs:
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
|
||||
helm-tests:
|
||||
name: Helm Chart Tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
|
||||
- name: Helm lint (default values)
|
||||
run: helm lint ./helm-chart
|
||||
|
||||
- name: Helm lint (S3 values)
|
||||
run: helm lint ./helm-chart -f ./helm-chart/tests/fixtures/values-s3.yaml
|
||||
|
||||
- name: Helm lint (Azure Blob values)
|
||||
run: helm lint ./helm-chart -f ./helm-chart/tests/fixtures/values-azblob.yaml
|
||||
|
||||
- name: Helm lint (GCS values)
|
||||
run: helm lint ./helm-chart -f ./helm-chart/tests/fixtures/values-gcs.yaml
|
||||
|
||||
- name: Helm lint (cloud refs values)
|
||||
run: helm lint ./helm-chart -f ./helm-chart/tests/fixtures/values-cloud-refs.yaml
|
||||
|
||||
- name: Install helm-unittest plugin
|
||||
run: helm plugin install https://github.com/helm-unittest/helm-unittest --verify=false
|
||||
|
||||
- name: Run helm unit tests
|
||||
run: helm unittest ./helm-chart
|
||||
|
||||
- name: Install kubeconform
|
||||
run: |
|
||||
curl -sL https://github.com/yannh/kubeconform/releases/latest/download/kubeconform-linux-amd64.tar.gz | tar xz
|
||||
sudo mv kubeconform /usr/local/bin/
|
||||
|
||||
- name: Validate default template
|
||||
run: helm template kubeshark ./helm-chart | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
|
||||
- name: Validate S3 template
|
||||
run: helm template kubeshark ./helm-chart -f ./helm-chart/tests/fixtures/values-s3.yaml | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
|
||||
- name: Validate Azure Blob template
|
||||
run: helm template kubeshark ./helm-chart -f ./helm-chart/tests/fixtures/values-azblob.yaml | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
|
||||
- name: Validate GCS template
|
||||
run: helm template kubeshark ./helm-chart -f ./helm-chart/tests/fixtures/values-gcs.yaml | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -63,4 +63,7 @@ bin
|
||||
scripts/
|
||||
|
||||
# CWD config YAML
|
||||
kubeshark.yaml
|
||||
kubeshark.yaml
|
||||
|
||||
# Claude Code
|
||||
CLAUDE.md
|
||||
102
CLAUDE.md
Normal file
102
CLAUDE.md
Normal file
@@ -0,0 +1,102 @@
|
||||
Do not include any Claude/AI attribution (Co-Authored-By lines, badges, etc.) in commit messages or pull request descriptions.
|
||||
|
||||
## Skills
|
||||
|
||||
Kubeshark is building an ecosystem of open-source AI skills that work with the Kubeshark MCP.
|
||||
Skills live in the `skills/` directory at the root of this repo.
|
||||
|
||||
### What is a skill?
|
||||
|
||||
A skill is a SKILL.md file (with optional reference docs) that teaches an AI agent a domain-specific
|
||||
methodology. The Kubeshark MCP provides the tools (snapshot creation, API call queries, PCAP export,
|
||||
etc.) — a skill tells the agent *how* to use those tools for a specific job.
|
||||
|
||||
### Skill structure
|
||||
|
||||
```
|
||||
skills/
|
||||
└── <skill-name>/
|
||||
├── SKILL.md # Required. YAML frontmatter + markdown instructions.
|
||||
└── references/ # Optional. Reference docs loaded on demand.
|
||||
└── *.md
|
||||
```
|
||||
|
||||
### SKILL.md format
|
||||
|
||||
Every SKILL.md starts with YAML frontmatter:
|
||||
|
||||
```yaml
|
||||
---
|
||||
name: skill-name
|
||||
description: >
|
||||
When to trigger this skill. Be specific about user intents, keywords, and contexts.
|
||||
The description is the primary mechanism for AI agents to decide whether to load the skill.
|
||||
---
|
||||
```
|
||||
|
||||
The body is markdown instructions that define the methodology: prerequisites, workflows,
|
||||
tool usage patterns, output guidelines, and reference pointers.
|
||||
|
||||
### Guidelines for writing skills
|
||||
|
||||
- Keep SKILL.md under 500 lines. Put detailed references in `references/` with clear pointers.
|
||||
- Use imperative tone ("Check data boundaries", "Create a snapshot").
|
||||
- Reference Kubeshark MCP tools by exact name (e.g., `create_snapshot`, `list_api_calls`).
|
||||
- Include realistic example tool responses so the agent knows what to expect.
|
||||
- Explain *why* things matter, not just *what* to do — the agent is smart and benefits from context.
|
||||
- Include a Setup Reference section with MCP configuration for Claude Code and Claude Desktop.
|
||||
- The description frontmatter should be "pushy" — include trigger keywords generously so the skill
|
||||
activates when needed. Better to over-trigger than under-trigger.
|
||||
|
||||
### Kubeshark MCP tools available to skills
|
||||
|
||||
**Cluster management**: `check_kubeshark_status`, `start_kubeshark`, `stop_kubeshark`
|
||||
**Inventory**: `list_workloads`
|
||||
**L7 API**: `list_api_calls`, `get_api_call`, `get_api_stats`
|
||||
**L4 flows**: `list_l4_flows`, `get_l4_flow_summary`
|
||||
**Snapshots**: `get_data_boundaries`, `create_snapshot`, `get_snapshot`, `list_snapshots`, `start_snapshot_dissection`
|
||||
**PCAP**: `export_snapshot_pcap`, `resolve_workload`
|
||||
**Cloud storage**: `get_cloud_storage_status`, `upload_snapshot_to_cloud`, `download_snapshot_from_cloud`
|
||||
**Dissection**: `get_dissection_status`, `enable_dissection`, `disable_dissection`
|
||||
|
||||
### KFL (Kubeshark Filter Language)
|
||||
|
||||
KFL2 is built on CEL (Common Expression Language). Skills that involve traffic filtering should
|
||||
reference KFL. Key concepts:
|
||||
|
||||
- Display filter (post-capture), not capture filter
|
||||
- Fields: `src.ip`, `dst.ip`, `src.pod.name`, `dst.pod.namespace`, `src.service.name`, etc.
|
||||
- Protocol booleans: `http`, `dns`, `redis`, `kafka`, `tls`, `grpc`, `amqp`, `ws`
|
||||
- HTTP fields: `url`, `method`, `status_code`, `path`, `request.headers`, `response.headers`,
|
||||
`request_body_size`, `response_body_size`, `elapsed_time` (microseconds)
|
||||
- DNS fields: `dns_questions`, `dns_answers`, `dns_question_types`
|
||||
- Operators: `==`, `!=`, `<`, `>`, `&&`, `||`, `in`
|
||||
- String functions: `.contains()`, `.startsWith()`, `.endsWith()`, `.matches()` (regex)
|
||||
- Collection: `size()`, `[index]`, `[key]`
|
||||
- Full reference: https://docs.kubeshark.com/en/v2/kfl2
|
||||
|
||||
### Key Kubeshark concepts for skill authors
|
||||
|
||||
- **eBPF capture**: Kernel-level, no sidecars/proxies. Decrypts TLS without private keys.
|
||||
- **Protocols**: HTTP, gRPC, GraphQL, WebSocket, Kafka, Redis, AMQP, DNS, and more.
|
||||
- **Raw capture**: FIFO buffer per node. Must be enabled for retrospective analysis.
|
||||
- **Snapshots**: Immutable freeze of traffic in a time window. Includes raw capture files,
|
||||
K8s pod events, and eBPF cgroup events.
|
||||
- **Dissection**: The "indexing" step. Reconstructs raw packets into structured L7 API calls.
|
||||
Think of it like a search engine indexing web pages — without dissection you have PCAPs,
|
||||
with dissection you have a queryable database. Kubeshark is the search engine for network traffic.
|
||||
- **Cloud storage**: Snapshots can be uploaded to S3/GCS/Azure and downloaded to any cluster.
|
||||
A production snapshot can be analyzed on a local KinD cluster.
|
||||
|
||||
### Current skills
|
||||
|
||||
- `skills/network-rca/` — Network Root Cause Analysis. Retrospective traffic analysis via
|
||||
snapshots, dissection, KFL queries, PCAP extraction, trend comparison.
|
||||
- `skills/kfl/` — KFL2 (Kubeshark Filter Language) expert. Complete reference for writing,
|
||||
debugging, and optimizing CEL-based traffic filters across all supported protocols.
|
||||
|
||||
### Planned skills (not yet created)
|
||||
|
||||
- `skills/api-security/` — OWASP API Top 10 assessment against live or snapshot traffic.
|
||||
- `skills/incident-response/` — 7-phase forensic incident investigation methodology.
|
||||
- `skills/network-engineering/` — Real-time traffic analysis, latency debugging, dependency mapping.
|
||||
@@ -7,7 +7,7 @@ Please read and follow the guidelines below.
|
||||
|
||||
## Communication
|
||||
|
||||
* Before starting work on a major feature, please reach out to us via [GitHub](https://github.com/kubeshark/kubeshark), [Discord](https://discord.gg/WkvRGMUcx7), [Slack](https://join.slack.com/t/kubeshark/shared_invite/zt-1k3sybpq9-uAhFkuPJiJftKniqrGHGhg), [email](mailto:info@kubeshark.co), etc. We will make sure no one else is already working on it. A _major feature_ is defined as any change that is > 100 LOC altered (not including tests), or changes any user-facing behavior
|
||||
* Before starting work on a major feature, please reach out to us via [GitHub](https://github.com/kubeshark/kubeshark), [Discord](https://discord.gg/WkvRGMUcx7), [Slack](https://join.slack.com/t/kubeshark/shared_invite/zt-1k3sybpq9-uAhFkuPJiJftKniqrGHGhg), [email](mailto:info@kubeshark.com), etc. We will make sure no one else is already working on it. A _major feature_ is defined as any change that is > 100 LOC altered (not including tests), or changes any user-facing behavior
|
||||
* Small patches and bug fixes don't need prior communication.
|
||||
|
||||
## Contribution Requirements
|
||||
|
||||
153
Makefile
153
Makefile
@@ -74,6 +74,79 @@ clean: ## Clean all build artifacts.
|
||||
test: ## Run cli tests.
|
||||
@go test ./... -coverpkg=./... -race -coverprofile=coverage.out -covermode=atomic
|
||||
|
||||
test-integration: ## Run integration tests (requires Kubernetes cluster).
|
||||
@echo "Running integration tests..."
|
||||
@LOG_FILE=$$(mktemp /tmp/integration-test.XXXXXX.log); \
|
||||
go test -tags=integration -timeout $${INTEGRATION_TIMEOUT:-5m} -v ./integration/... 2>&1 | tee $$LOG_FILE; \
|
||||
status=$$?; \
|
||||
echo ""; \
|
||||
echo "========================================"; \
|
||||
echo " INTEGRATION TEST SUMMARY"; \
|
||||
echo "========================================"; \
|
||||
grep -E "^(--- PASS|--- FAIL|--- SKIP)" $$LOG_FILE || true; \
|
||||
echo "----------------------------------------"; \
|
||||
pass=$$(grep -c "^--- PASS" $$LOG_FILE 2>/dev/null || true); \
|
||||
fail=$$(grep -c "^--- FAIL" $$LOG_FILE 2>/dev/null || true); \
|
||||
skip=$$(grep -c "^--- SKIP" $$LOG_FILE 2>/dev/null || true); \
|
||||
echo "PASSED: $${pass:-0}"; \
|
||||
echo "FAILED: $${fail:-0}"; \
|
||||
echo "SKIPPED: $${skip:-0}"; \
|
||||
echo "========================================"; \
|
||||
rm -f $$LOG_FILE; \
|
||||
exit $$status
|
||||
|
||||
test-integration-mcp: ## Run only MCP integration tests.
|
||||
@echo "Running MCP integration tests..."
|
||||
@LOG_FILE=$$(mktemp /tmp/integration-test.XXXXXX.log); \
|
||||
go test -tags=integration -timeout $${INTEGRATION_TIMEOUT:-5m} -v ./integration/ -run "MCP" 2>&1 | tee $$LOG_FILE; \
|
||||
status=$$?; \
|
||||
echo ""; \
|
||||
echo "========================================"; \
|
||||
echo " INTEGRATION TEST SUMMARY"; \
|
||||
echo "========================================"; \
|
||||
grep -E "^(--- PASS|--- FAIL|--- SKIP)" $$LOG_FILE || true; \
|
||||
echo "----------------------------------------"; \
|
||||
pass=$$(grep -c "^--- PASS" $$LOG_FILE 2>/dev/null || true); \
|
||||
fail=$$(grep -c "^--- FAIL" $$LOG_FILE 2>/dev/null || true); \
|
||||
skip=$$(grep -c "^--- SKIP" $$LOG_FILE 2>/dev/null || true); \
|
||||
echo "PASSED: $${pass:-0}"; \
|
||||
echo "FAILED: $${fail:-0}"; \
|
||||
echo "SKIPPED: $${skip:-0}"; \
|
||||
echo "========================================"; \
|
||||
rm -f $$LOG_FILE; \
|
||||
exit $$status
|
||||
|
||||
test-integration-short: ## Run quick integration tests (skips long-running tests).
|
||||
@echo "Running quick integration tests..."
|
||||
@LOG_FILE=$$(mktemp /tmp/integration-test.XXXXXX.log); \
|
||||
go test -tags=integration -timeout $${INTEGRATION_TIMEOUT:-2m} -short -v ./integration/... 2>&1 | tee $$LOG_FILE; \
|
||||
status=$$?; \
|
||||
echo ""; \
|
||||
echo "========================================"; \
|
||||
echo " INTEGRATION TEST SUMMARY"; \
|
||||
echo "========================================"; \
|
||||
grep -E "^(--- PASS|--- FAIL|--- SKIP)" $$LOG_FILE || true; \
|
||||
echo "----------------------------------------"; \
|
||||
pass=$$(grep -c "^--- PASS" $$LOG_FILE 2>/dev/null || true); \
|
||||
fail=$$(grep -c "^--- FAIL" $$LOG_FILE 2>/dev/null || true); \
|
||||
skip=$$(grep -c "^--- SKIP" $$LOG_FILE 2>/dev/null || true); \
|
||||
echo "PASSED: $${pass:-0}"; \
|
||||
echo "FAILED: $${fail:-0}"; \
|
||||
echo "SKIPPED: $${skip:-0}"; \
|
||||
echo "========================================"; \
|
||||
rm -f $$LOG_FILE; \
|
||||
exit $$status
|
||||
|
||||
helm-test: ## Run Helm lint and unit tests.
|
||||
helm lint ./helm-chart
|
||||
helm unittest ./helm-chart
|
||||
|
||||
helm-test-full: helm-test ## Run Helm tests with kubeconform schema validation.
|
||||
helm template kubeshark ./helm-chart | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
helm template kubeshark ./helm-chart -f ./helm-chart/tests/fixtures/values-s3.yaml | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
helm template kubeshark ./helm-chart -f ./helm-chart/tests/fixtures/values-azblob.yaml | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
helm template kubeshark ./helm-chart -f ./helm-chart/tests/fixtures/values-gcs.yaml | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
|
||||
lint: ## Lint the source code.
|
||||
golangci-lint run
|
||||
|
||||
@@ -84,9 +157,10 @@ kubectl-view-kubeshark-resources: ## This command outputs all Kubernetes resourc
|
||||
./kubectl.sh view-kubeshark-resources
|
||||
|
||||
generate-helm-values: ## Generate the Helm values from config.yaml
|
||||
mv ~/.kubeshark/config.yaml ~/.kubeshark/config.yaml.old; bin/kubeshark__ config>helm-chart/values.yaml;mv ~/.kubeshark/config.yaml.old ~/.kubeshark/config.yaml
|
||||
sed -i 's/^license:.*/license: ""/' helm-chart/values.yaml && sed -i '1i # find a detailed description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md' helm-chart/values.yaml
|
||||
sed -i "s/^ tag:.*/ tag: \"$(shell echo $(VERSION) | sed -E 's/^v?([0-9]+\.[0-9]+)\..*/v\1/')\"/" helm-chart/values.yaml
|
||||
# [ -f ~/.kubeshark/config.yaml ] && mv ~/.kubeshark/config.yaml ~/.kubeshark/config.yaml.old
|
||||
bin/kubeshark__ config>helm-chart/values.yaml
|
||||
# [ -f ~/.kubeshark/config.yaml.old ] && mv ~/.kubeshark/config.yaml.old ~/.kubeshark/config.yaml
|
||||
# sed -i 's/^license:.*/license: ""/' helm-chart/values.yaml && sed -i '1i # find a detailed description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md' helm-chart/values.yaml
|
||||
|
||||
generate-manifests: ## Generate the manifests from the Helm chart using default configuration
|
||||
helm template kubeshark -n default ./helm-chart > ./manifests/complete.yaml
|
||||
@@ -178,25 +252,78 @@ proxy:
|
||||
port-forward:
|
||||
kubectl port-forward $$(kubectl get pods | awk '$$1 ~ /^$(POD_PREFIX)/' | awk 'END {print $$1}') $(SRC_PORT):$(DST_PORT)
|
||||
|
||||
release:
|
||||
release: ## Print release workflow instructions.
|
||||
@echo "Release workflow (2 steps):"
|
||||
@echo ""
|
||||
@echo " 1. make release-pr VERSION=x.y.z"
|
||||
@echo " Tags sibling repos, bumps version, creates PRs"
|
||||
@echo " (kubeshark + kubeshark.github.io helm chart)."
|
||||
@echo " Review and merge both PRs manually."
|
||||
@echo ""
|
||||
@echo " 2. (automatic) Tag is created when release PR merges."
|
||||
@echo " Fallback: make release-tag VERSION=x.y.z"
|
||||
|
||||
release-pr: ## Step 1: Tag sibling repos, bump version, create release PR.
|
||||
@cd ../worker && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../tracer && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../hub && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../front && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../kubeshark && git checkout master && git pull && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make
|
||||
@cd ../kubeshark && git checkout master && git pull
|
||||
@sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml
|
||||
@$(MAKE) build VER=$(VERSION)
|
||||
@if [ "$(shell uname)" = "Darwin" ]; then \
|
||||
codesign --sign - --force --preserve-metadata=entitlements,requirements,flags,runtime ./bin/kubeshark__; \
|
||||
fi
|
||||
@make generate-helm-values && make generate-manifests
|
||||
@git add -A . && git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)" && git push
|
||||
@git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd helm-chart && rm -r ../../kubeshark.github.io/charts/chart/* && cp -r . ../../kubeshark.github.io/charts/chart
|
||||
@cd ../../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push
|
||||
@$(MAKE) generate-helm-values && $(MAKE) generate-manifests
|
||||
@git checkout -b release/v$(VERSION)
|
||||
@git add -A .
|
||||
@git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)"
|
||||
@git push -u origin release/v$(VERSION)
|
||||
@gh pr create --title ":bookmark: Release v$(VERSION)" \
|
||||
--body "Automated release PR for v$(VERSION)." \
|
||||
--base master \
|
||||
--reviewer corest
|
||||
@rm -rf ../kubeshark.github.io/charts/chart
|
||||
@mkdir ../kubeshark.github.io/charts/chart
|
||||
@cp -r helm-chart/ ../kubeshark.github.io/charts/chart/
|
||||
@cd ../kubeshark.github.io && git checkout master && git pull \
|
||||
&& git checkout -b helm-v$(VERSION) \
|
||||
&& git add -A . \
|
||||
&& git commit -m ":sparkles: Update the Helm chart to v$(VERSION)" \
|
||||
&& git push -u origin helm-v$(VERSION) \
|
||||
&& gh pr create --title ":sparkles: Helm chart v$(VERSION)" \
|
||||
--body "Update Helm chart for release v$(VERSION)." \
|
||||
--base master \
|
||||
--reviewer corest
|
||||
@cd ../kubeshark
|
||||
@echo ""
|
||||
@echo "Release PRs created:"
|
||||
@echo " - kubeshark: Review and merge the release PR."
|
||||
@echo " - kubeshark.github.io: Review and merge the helm chart PR."
|
||||
@echo "Tag will be created automatically, or run: make release-tag VERSION=$(VERSION)"
|
||||
|
||||
release-tag: ## Step 2 (fallback): Tag master after release PR is merged.
|
||||
@echo "Verifying release PR was merged..."
|
||||
@if ! gh pr list --state merged --head release/v$(VERSION) --json number --jq '.[0].number' | grep -q .; then \
|
||||
echo "Error: No merged PR found for release/v$(VERSION). Merge the PR first."; \
|
||||
exit 1; \
|
||||
fi
|
||||
@git checkout master && git pull
|
||||
@git tag -d v$(VERSION) 2>/dev/null; git tag v$(VERSION) && git push origin --tags
|
||||
@echo ""
|
||||
@echo "Tagged v$(VERSION) on master. GitHub Actions will build the release."
|
||||
|
||||
release-dry-run:
|
||||
@cd ../kubeshark && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make && make generate-helm-values && make generate-manifests
|
||||
@cd helm-chart && rm -r ../../kubeshark.github.io/charts/chart/* && cp -r . ../../kubeshark.github.io/charts/chart
|
||||
@cd ../worker && git checkout master && git pull
|
||||
# @cd ../tracer && git checkout master && git pull
|
||||
@cd ../hub && git checkout master && git pull
|
||||
@cd ../front && git checkout master && git pull
|
||||
@cd ../kubeshark && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make
|
||||
# @if [ "$(shell uname)" = "Darwin" ]; then \
|
||||
# codesign --sign - --force --preserve-metadata=entitlements,requirements,flags,runtime ./bin/kubeshark__; \
|
||||
# fi
|
||||
@make generate-helm-values && make generate-manifests
|
||||
@rm -rf ../kubeshark.github.io/charts/chart && mkdir ../kubeshark.github.io/charts/chart && cp -r helm-chart/ ../kubeshark.github.io/charts/chart/
|
||||
@cd ../kubeshark.github.io/
|
||||
@cd ../kubeshark
|
||||
|
||||
branch:
|
||||
|
||||
162
README.md
162
README.md
@@ -1,96 +1,132 @@
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/kubeshark/assets/master/svg/kubeshark-logo.svg" alt="Kubeshark: Traffic analyzer for Kubernetes." height="128px"/>
|
||||
<img src="https://raw.githubusercontent.com/kubeshark/assets/master/svg/kubeshark-logo.svg" alt="Kubeshark" height="120px"/>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/kubeshark/kubeshark/releases/latest">
|
||||
<img alt="GitHub Latest Release" src="https://img.shields.io/github/v/release/kubeshark/kubeshark?logo=GitHub&style=flat-square">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/kubeshark/worker">
|
||||
<img alt="Docker pulls" src="https://img.shields.io/docker/pulls/kubeshark/worker?color=%23099cec&logo=Docker&style=flat-square">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/kubeshark/worker">
|
||||
<img alt="Image size" src="https://img.shields.io/docker/image-size/kubeshark/kubeshark/latest?logo=Docker&style=flat-square">
|
||||
</a>
|
||||
<a href="https://discord.gg/WkvRGMUcx7">
|
||||
<img alt="Discord" src="https://img.shields.io/discord/1042559155224973352?logo=Discord&style=flat-square&label=discord">
|
||||
</a>
|
||||
<a href="https://join.slack.com/t/kubeshark/shared_invite/zt-1m90td3n7-VHxN_~V5kVp80SfQW3SfpA">
|
||||
<img alt="Slack" src="https://img.shields.io/badge/slack-join_chat-green?logo=Slack&style=flat-square&label=slack">
|
||||
</a>
|
||||
<a href="https://github.com/kubeshark/kubeshark/releases/latest"><img alt="Release" src="https://img.shields.io/github/v/release/kubeshark/kubeshark?logo=GitHub&style=flat-square"></a>
|
||||
<a href="https://hub.docker.com/r/kubeshark/worker"><img alt="Docker pulls" src="https://img.shields.io/docker/pulls/kubeshark/worker?color=%23099cec&logo=Docker&style=flat-square"></a>
|
||||
<a href="https://discord.gg/WkvRGMUcx7"><img alt="Discord" src="https://img.shields.io/discord/1042559155224973352?logo=Discord&style=flat-square&label=discord"></a>
|
||||
<a href="https://join.slack.com/t/kubeshark/shared_invite/zt-3jdcdgxdv-1qNkhBh9c6CFoE7bSPkpBQ"><img alt="Slack" src="https://img.shields.io/badge/slack-join_chat-green?logo=Slack&style=flat-square"></a>
|
||||
</p>
|
||||
|
||||
<p align="center"><b>Network Observability for SREs & AI Agents</b></p>
|
||||
|
||||
<p align="center">
|
||||
<b>
|
||||
Want to see Kubeshark in action right now? Visit this
|
||||
<a href="https://demo.kubeshark.co/">live demo deployment</a> of Kubeshark.
|
||||
</b>
|
||||
<a href="https://demo.kubeshark.com/">Live Demo</a> · <a href="https://docs.kubeshark.com">Docs</a>
|
||||
</p>
|
||||
|
||||
**Kubeshark** is a network observability platform for [**Kubernetes**](https://kubernetes.io/), providing real-time, protocol-level visibility into Kubernetes’ network. It enables users to inspect all internal and external cluster connections, API calls, and data in transit. Additionally, Kubeshark detects suspicious network behaviors, triggers automated actions, and provides deep insights into the network.
|
||||
---
|
||||
|
||||

|
||||
Kubeshark captures cluster-wide network traffic at the speed and scale of Kubernetes, continuously, at the kernel level using eBPF. It consolidates a highly fragmented picture — dozens of nodes, thousands of workloads, millions of connections — into a single, queryable view with full Kubernetes and API context.
|
||||
|
||||
Think [TCPDump](https://en.wikipedia.org/wiki/Tcpdump) and [Wireshark](https://www.wireshark.org/) reimagined for Kubernetes.
|
||||
Network data is available to **AI agents via [MCP](https://docs.kubeshark.com/en/mcp)** and to **human operators via a [dashboard](https://docs.kubeshark.com/en/v2)**.
|
||||
|
||||
## Getting Started
|
||||
Download **Kubeshark**'s binary distribution [latest release](https://github.com/kubeshark/kubeshark/releases/latest) or use one of the following methods to deploy **Kubeshark**. The [web-based dashboard](https://docs.kubeshark.co/en/ui) should open in your browser, showing a real-time view of your cluster's traffic.
|
||||
**What's captured, cluster-wide:**
|
||||
|
||||
### Homebrew
|
||||
- **L4 Packets & TCP Metrics** — retransmissions, RTT, window saturation, connection lifecycle, packet loss across every node-to-node path ([TCP insights →](https://docs.kubeshark.com/en/mcp/tcp_insights))
|
||||
- **L7 API Calls** — real-time request/response matching with full payload parsing: HTTP, gRPC, GraphQL, Redis, Kafka, DNS ([API dissection →](https://docs.kubeshark.com/en/v2/l7_api_dissection))
|
||||
- **Decrypted TLS** — eBPF-based TLS decryption without key management
|
||||
- **Kubernetes Context** — every packet and API call resolved to pod, service, namespace, and node
|
||||
- **PCAP Retention** — point-in-time raw packet snapshots, exportable for Wireshark ([Snapshots →](https://docs.kubeshark.com/en/v2/traffic_snapshots))
|
||||
|
||||
[Homebrew](https://brew.sh/) :beer: users can install the Kubeshark CLI with:
|
||||

|
||||
|
||||
```shell
|
||||
brew install kubeshark
|
||||
kubeshark tap
|
||||
```
|
||||
---
|
||||
|
||||
To clean up:
|
||||
```shell
|
||||
kubeshark clean
|
||||
```
|
||||
## Get Started
|
||||
|
||||
### Helm
|
||||
|
||||
Add the Helm repository and install the chart:
|
||||
|
||||
```shell
|
||||
helm repo add kubeshark https://helm.kubeshark.co
|
||||
```bash
|
||||
helm repo add kubeshark https://helm.kubeshark.com
|
||||
helm install kubeshark kubeshark/kubeshark
|
||||
```
|
||||
Follow the on-screen instructions how to connect to the dashboard.
|
||||
|
||||
To clean up:
|
||||
```shell
|
||||
helm uninstall kubeshark
|
||||
Dashboard opens automatically. You're capturing traffic.
|
||||
|
||||
**Connect an AI agent** via MCP:
|
||||
|
||||
```bash
|
||||
brew install kubeshark
|
||||
claude mcp add kubeshark -- kubeshark mcp
|
||||
```
|
||||
|
||||
## Building From Source
|
||||
[MCP setup guide →](https://docs.kubeshark.com/en/mcp)
|
||||
|
||||
Clone this repository and run the `make` command to build it. After the build is complete, the executable can be found at `./bin/kubeshark`.
|
||||
---
|
||||
|
||||
## Documentation
|
||||
### AI-Powered Network Analysis
|
||||
|
||||
To learn more, read the [documentation](https://docs.kubeshark.co).
|
||||
Kubeshark exposes all cluster-wide network data via MCP (Model Context Protocol). AI agents can query L4 metrics, investigate L7 API calls, analyze traffic patterns, and run root cause analysis — through natural language. Use cases include incident response, root cause analysis, troubleshooting, debugging, and reliability workflows.
|
||||
|
||||
## Additional Use Cases
|
||||
> *"Why did checkout fail at 2:15 PM?"*
|
||||
> *"Which services have error rates above 1%?"*
|
||||
> *"Show TCP retransmission rates across all node-to-node paths"*
|
||||
> *"Trace request abc123 through all services"*
|
||||
|
||||
### Dump All Cluster-wide Traffic into a Single PCAP File
|
||||
Works with Claude Code, Cursor, and any MCP-compatible AI.
|
||||
|
||||
Record **all** cluster traffic and consolidate it into a single PCAP file (tcpdump-style).
|
||||

|
||||
|
||||
Run Kubeshark to start capturing traffic:
|
||||
```shell
|
||||
kubeshark tap --set headless=true
|
||||
```
|
||||
> You can press `^C` to stop the command. Kubeshark will continue running in the background.
|
||||
[MCP setup guide →](https://docs.kubeshark.com/en/mcp)
|
||||
|
||||
Take a snapshot of traffic (e.g., from the past 5 minutes):
|
||||
```shell
|
||||
kubeshark pcapdump --time 5m
|
||||
```
|
||||
> Read more [here](https://docs.kubeshark.co/en/pcapdump).
|
||||
---
|
||||
|
||||
### L7 API Dissection
|
||||
|
||||
Cluster-wide request/response matching with full payloads, parsed according to protocol specifications. HTTP, gRPC, Redis, Kafka, DNS, and more. Every API call resolved to source and destination pod, service, namespace, and node. No code instrumentation required.
|
||||
|
||||

|
||||
|
||||
[Learn more →](https://docs.kubeshark.com/en/v2/l7_api_dissection)
|
||||
|
||||
### L4/L7 Workload Map
|
||||
|
||||
Cluster-wide view of service communication: dependencies, traffic flow, and anomalies across all nodes and namespaces.
|
||||
|
||||

|
||||
|
||||
[Learn more →](https://docs.kubeshark.com/en/v2/service_map)
|
||||
|
||||
### Traffic Retention
|
||||
|
||||
Continuous raw packet capture with point-in-time snapshots. Export PCAP files for offline analysis with Wireshark or other tools.
|
||||
|
||||

|
||||
|
||||
[Snapshots guide →](https://docs.kubeshark.com/en/v2/traffic_snapshots)
|
||||
|
||||
---
|
||||
|
||||
## Features
|
||||
|
||||
| Feature | Description |
|
||||
|---------|-------------|
|
||||
| [**Raw Capture**](https://docs.kubeshark.com/en/v2/raw_capture) | Continuous cluster-wide packet capture with minimal overhead |
|
||||
| [**Traffic Snapshots**](https://docs.kubeshark.com/en/v2/traffic_snapshots) | Point-in-time snapshots, export as PCAP for Wireshark |
|
||||
| [**L7 API Dissection**](https://docs.kubeshark.com/en/v2/l7_api_dissection) | Request/response matching with full payloads and protocol parsing |
|
||||
| [**Protocol Support**](https://docs.kubeshark.com/en/protocols) | HTTP, gRPC, GraphQL, Redis, Kafka, DNS, and more |
|
||||
| [**TLS Decryption**](https://docs.kubeshark.com/en/encrypted_traffic) | eBPF-based decryption without key management |
|
||||
| [**AI-Powered Analysis**](https://docs.kubeshark.com/en/v2/ai_powered_analysis) | Query cluster-wide network data with Claude, Cursor, or any MCP-compatible AI |
|
||||
| [**Display Filters**](https://docs.kubeshark.com/en/v2/kfl2) | Wireshark-inspired display filters for precise traffic analysis |
|
||||
| [**100% On-Premises**](https://docs.kubeshark.com/en/air_gapped) | Air-gapped support, no external dependencies |
|
||||
|
||||
---
|
||||
|
||||
## Install
|
||||
|
||||
| Method | Command |
|
||||
|--------|---------|
|
||||
| Helm | `helm repo add kubeshark https://helm.kubeshark.com && helm install kubeshark kubeshark/kubeshark` |
|
||||
| Homebrew | `brew install kubeshark && kubeshark tap` |
|
||||
| Binary | [Download](https://github.com/kubeshark/kubeshark/releases/latest) |
|
||||
|
||||
[Installation guide →](https://docs.kubeshark.com/en/install)
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
We :heart: pull requests! See [CONTRIBUTING.md](CONTRIBUTING.md) for the contribution guide.
|
||||
We welcome contributions. See [CONTRIBUTING.md](CONTRIBUTING.md).
|
||||
|
||||
## License
|
||||
|
||||
[Apache-2.0](LICENSE)
|
||||
|
||||
125
cmd/mcp.go
Normal file
125
cmd/mcp.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/kubeshark/kubeshark/config"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var mcpURL string
|
||||
var mcpKubeconfig string
|
||||
var mcpListTools bool
|
||||
var mcpConfig bool
|
||||
var mcpAllowDestructive bool
|
||||
|
||||
var mcpCmd = &cobra.Command{
|
||||
Use: "mcp",
|
||||
Short: "Run MCP (Model Context Protocol) server for AI assistant integration",
|
||||
Long: `Run an MCP server over stdio that exposes Kubeshark's L7 API visibility
|
||||
to AI assistants like Claude Desktop.
|
||||
|
||||
TOOLS PROVIDED:
|
||||
|
||||
Cluster Management (work without Kubeshark running):
|
||||
- check_kubeshark_status: Check if Kubeshark is running in the cluster
|
||||
- start_kubeshark: Start Kubeshark to capture traffic
|
||||
- stop_kubeshark: Stop Kubeshark and clean up resources
|
||||
|
||||
Traffic Analysis (require Kubeshark running):
|
||||
- list_workloads: Discover pods, services, namespaces, and nodes with L7 traffic
|
||||
- list_api_calls: Query L7 API transactions (HTTP, gRPC, etc.)
|
||||
- get_api_call: Get detailed information about a specific API call
|
||||
- get_api_stats: Get aggregated API statistics
|
||||
|
||||
CONFIGURATION:
|
||||
|
||||
To use with Claude Desktop, add to your claude_desktop_config.json
|
||||
(typically at ~/Library/Application Support/Claude/claude_desktop_config.json):
|
||||
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "/path/to/kubeshark",
|
||||
"args": ["mcp", "--kubeconfig", "/Users/YOUR_USERNAME/.kube/config"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DIRECT URL MODE:
|
||||
|
||||
If Kubeshark is already running and accessible via URL (e.g., exposed via ingress),
|
||||
you can connect directly without needing kubectl/kubeconfig:
|
||||
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "/path/to/kubeshark",
|
||||
"args": ["mcp", "--url", "https://kubeshark.example.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
In URL mode, destructive tools (start/stop) are disabled since Kubeshark is
|
||||
managed externally. The check_kubeshark_status tool remains available to confirm connectivity.
|
||||
|
||||
DESTRUCTIVE OPERATIONS:
|
||||
|
||||
By default, destructive operations (start_kubeshark, stop_kubeshark) are disabled
|
||||
to prevent accidental cluster modifications. To enable them, use --allow-destructive:
|
||||
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "/path/to/kubeshark",
|
||||
"args": ["mcp", "--allow-destructive", "--kubeconfig", "/path/to/.kube/config"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CUSTOM SETTINGS:
|
||||
|
||||
To use custom settings when starting Kubeshark, use the --set flag:
|
||||
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "/path/to/kubeshark",
|
||||
"args": ["mcp", "--set", "tap.docker.tag=v52.3"],
|
||||
...
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Multiple --set flags can be used for different settings.`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
// Handle --mcp-config flag
|
||||
if mcpConfig {
|
||||
printMCPConfig(mcpURL, mcpKubeconfig)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set kubeconfig path if provided
|
||||
if mcpKubeconfig != "" {
|
||||
config.Config.Kube.ConfigPathStr = mcpKubeconfig
|
||||
}
|
||||
|
||||
// Handle --list-tools flag
|
||||
if mcpListTools {
|
||||
listMCPTools(mcpURL)
|
||||
return nil
|
||||
}
|
||||
|
||||
setFlags, _ := cmd.Flags().GetStringSlice(config.SetCommandName)
|
||||
runMCPWithConfig(setFlags, mcpURL, mcpAllowDestructive)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(mcpCmd)
|
||||
|
||||
mcpCmd.Flags().StringVar(&mcpURL, "url", "", "Direct URL to Kubeshark (e.g., https://kubeshark.example.com). When set, connects directly without kubectl/proxy and disables start/stop tools.")
|
||||
mcpCmd.Flags().StringVar(&mcpKubeconfig, "kubeconfig", "", "Path to kubeconfig file (e.g., /Users/me/.kube/config)")
|
||||
mcpCmd.Flags().BoolVar(&mcpListTools, "list-tools", false, "List available MCP tools and exit")
|
||||
mcpCmd.Flags().BoolVar(&mcpConfig, "mcp-config", false, "Print MCP client configuration JSON and exit")
|
||||
mcpCmd.Flags().BoolVar(&mcpAllowDestructive, "allow-destructive", false, "Enable destructive operations (start_kubeshark, stop_kubeshark). Without this flag, only read-only traffic analysis tools are available.")
|
||||
}
|
||||
1232
cmd/mcpRunner.go
Normal file
1232
cmd/mcpRunner.go
Normal file
File diff suppressed because it is too large
Load Diff
688
cmd/mcp_test.go
Normal file
688
cmd/mcp_test.go
Normal file
@@ -0,0 +1,688 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func newTestMCPServer() *mcpServer {
|
||||
return &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}}
|
||||
}
|
||||
|
||||
func sendRequest(s *mcpServer, method string, id any, params any) string {
|
||||
req := jsonRPCRequest{
|
||||
JSONRPC: "2.0",
|
||||
ID: id,
|
||||
Method: method,
|
||||
}
|
||||
if params != nil {
|
||||
paramsBytes, _ := json.Marshal(params)
|
||||
req.Params = paramsBytes
|
||||
}
|
||||
|
||||
s.handleRequest(&req)
|
||||
|
||||
output := s.stdout.(*bytes.Buffer).String()
|
||||
s.stdout.(*bytes.Buffer).Reset()
|
||||
return output
|
||||
}
|
||||
|
||||
func parseResponse(t *testing.T, output string) jsonRPCResponse {
|
||||
var resp jsonRPCResponse
|
||||
if err := json.Unmarshal([]byte(strings.TrimSpace(output)), &resp); err != nil {
|
||||
t.Fatalf("Failed to parse response: %v\nOutput: %s", err, output)
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
func TestMCP_Initialize(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
resp := parseResponse(t, sendRequest(s, "initialize", 1, nil))
|
||||
|
||||
if resp.ID != float64(1) || resp.Error != nil {
|
||||
t.Fatalf("Expected ID 1 with no error, got ID=%v, error=%v", resp.ID, resp.Error)
|
||||
}
|
||||
|
||||
result := resp.Result.(map[string]any)
|
||||
if result["protocolVersion"] != "2024-11-05" {
|
||||
t.Errorf("Expected protocolVersion 2024-11-05, got %v", result["protocolVersion"])
|
||||
}
|
||||
if result["serverInfo"].(map[string]any)["name"] != "kubeshark-mcp" {
|
||||
t.Error("Expected server name kubeshark-mcp")
|
||||
}
|
||||
if !strings.Contains(result["instructions"].(string), "check_kubeshark_status") {
|
||||
t.Error("Instructions should mention check_kubeshark_status")
|
||||
}
|
||||
if _, ok := result["capabilities"].(map[string]any)["prompts"]; !ok {
|
||||
t.Error("Expected prompts capability")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_Ping(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "ping", 42, nil))
|
||||
if resp.ID != float64(42) || resp.Error != nil || len(resp.Result.(map[string]any)) != 0 {
|
||||
t.Errorf("Expected ID 42, no error, empty result")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_InitializedNotification(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
for _, method := range []string{"initialized", "notifications/initialized"} {
|
||||
if output := sendRequest(s, method, nil, nil); output != "" {
|
||||
t.Errorf("Expected no output for %s, got: %s", method, output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_UnknownMethod(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "unknown/method", 1, nil))
|
||||
if resp.Error == nil || resp.Error.Code != -32601 {
|
||||
t.Fatalf("Expected error code -32601, got %v", resp.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_PromptsList(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "prompts/list", 1, nil))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
prompts := resp.Result.(map[string]any)["prompts"].([]any)
|
||||
if len(prompts) != 1 || prompts[0].(map[string]any)["name"] != "kubeshark_usage" {
|
||||
t.Error("Expected 1 prompt named 'kubeshark_usage'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_PromptsGet(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "prompts/get", 1, map[string]any{"name": "kubeshark_usage"}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
messages := resp.Result.(map[string]any)["messages"].([]any)
|
||||
if len(messages) == 0 {
|
||||
t.Fatal("Expected at least one message")
|
||||
}
|
||||
text := messages[0].(map[string]any)["content"].(map[string]any)["text"].(string)
|
||||
for _, phrase := range []string{"check_kubeshark_status", "start_kubeshark", "stop_kubeshark"} {
|
||||
if !strings.Contains(text, phrase) {
|
||||
t.Errorf("Prompt should contain '%s'", phrase)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_PromptsGet_UnknownPrompt(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "prompts/get", 1, map[string]any{"name": "unknown"}))
|
||||
if resp.Error == nil || resp.Error.Code != -32602 {
|
||||
t.Fatalf("Expected error code -32602, got %v", resp.Error)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsList_CLIOnly(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "tools/list", 1, nil))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
tools := resp.Result.(map[string]any)["tools"].([]any)
|
||||
// Should have check_kubeshark_status + get_file_url + download_file = 3 tools
|
||||
if len(tools) != 3 {
|
||||
t.Errorf("Expected 3 tools, got %d", len(tools))
|
||||
}
|
||||
toolNames := make(map[string]bool)
|
||||
for _, tool := range tools {
|
||||
toolNames[tool.(map[string]any)["name"].(string)] = true
|
||||
}
|
||||
for _, expected := range []string{"check_kubeshark_status", "get_file_url", "download_file"} {
|
||||
if !toolNames[expected] {
|
||||
t.Errorf("Missing expected tool: %s", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsList_WithDestructive(t *testing.T) {
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}, allowDestructive: true}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/list", 1, nil))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
tools := resp.Result.(map[string]any)["tools"].([]any)
|
||||
toolNames := make(map[string]bool)
|
||||
for _, tool := range tools {
|
||||
toolNames[tool.(map[string]any)["name"].(string)] = true
|
||||
}
|
||||
for _, expected := range []string{"check_kubeshark_status", "start_kubeshark", "stop_kubeshark"} {
|
||||
if !toolNames[expected] {
|
||||
t.Errorf("Missing expected tool: %s", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsList_WithHubBackend(t *testing.T) {
|
||||
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/" || r.URL.Path == "" {
|
||||
_, _ = w.Write([]byte(`{"name":"hub","tools":[{"name":"list_workloads","description":"","inputSchema":{}},{"name":"list_api_calls","description":"","inputSchema":{}}]}`))
|
||||
}
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}, hubBaseURL: mockServer.URL, backendInitialized: true, allowDestructive: true}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/list", 1, nil))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
tools := resp.Result.(map[string]any)["tools"].([]any)
|
||||
// Should have CLI tools (3) + file tools (2) + Hub tools (2) = 7 tools
|
||||
if len(tools) < 7 {
|
||||
t.Errorf("Expected at least 7 tools, got %d", len(tools))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsCallUnknownTool(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "unknown"}))
|
||||
if !resp.Result.(map[string]any)["isError"].(bool) {
|
||||
t.Error("Expected isError=true for unknown tool")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsCallInvalidParams(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
req := jsonRPCRequest{JSONRPC: "2.0", ID: 1, Method: "tools/call", Params: json.RawMessage(`"invalid"`)}
|
||||
s.handleRequest(&req)
|
||||
resp := parseResponse(t, s.stdout.(*bytes.Buffer).String())
|
||||
if resp.Error == nil || resp.Error.Code != -32602 {
|
||||
t.Fatalf("Expected error code -32602")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_CheckKubesharkStatus(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
args map[string]any
|
||||
}{
|
||||
{"no_config", map[string]any{}},
|
||||
{"with_namespace", map[string]any{"release_namespace": "custom-ns"}},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
resp := parseResponse(t, sendRequest(newTestMCPServer(), "tools/call", 1, mcpCallToolParams{Name: "check_kubeshark_status", Arguments: tc.args}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
content := resp.Result.(map[string]any)["content"].([]any)
|
||||
if len(content) == 0 || content[0].(map[string]any)["text"].(string) == "" {
|
||||
t.Error("Expected non-empty response")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newTestMCPServerWithMockBackend(handler http.HandlerFunc) (*mcpServer, *httptest.Server) {
|
||||
mockServer := httptest.NewServer(handler)
|
||||
return &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}, hubBaseURL: mockServer.URL, backendInitialized: true}, mockServer
|
||||
}
|
||||
|
||||
type hubToolCallRequest struct {
|
||||
Tool string `json:"name"`
|
||||
Arguments map[string]any `json:"arguments"`
|
||||
}
|
||||
|
||||
func newMockHubHandler(t *testing.T, handler func(req hubToolCallRequest) (string, int)) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path != "/tools/call" || r.Method != http.MethodPost {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
var req hubToolCallRequest
|
||||
_ = json.NewDecoder(r.Body).Decode(&req)
|
||||
resp, status := handler(req)
|
||||
w.WriteHeader(status)
|
||||
_, _ = w.Write([]byte(resp))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ListWorkloads(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
if req.Tool != "list_workloads" {
|
||||
t.Errorf("Expected tool 'list_workloads', got %s", req.Tool)
|
||||
}
|
||||
return `{"workloads": [{"name": "test-pod"}]}`, http.StatusOK
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads", Arguments: map[string]any{"type": "pod"}}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
text := resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
if !strings.Contains(text, "test-pod") {
|
||||
t.Errorf("Expected 'test-pod' in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ListAPICalls(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
if req.Tool != "list_api_calls" {
|
||||
t.Errorf("Expected tool 'list_api_calls', got %s", req.Tool)
|
||||
}
|
||||
return `{"calls": [{"id": "123", "path": "/api/users"}]}`, http.StatusOK
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_api_calls", Arguments: map[string]any{"proto": "http"}}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
if !strings.Contains(resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string), "/api/users") {
|
||||
t.Error("Expected '/api/users' in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetAPICall(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
if req.Tool != "get_api_call" || req.Arguments["id"] != "abc123" {
|
||||
t.Errorf("Expected get_api_call with id=abc123")
|
||||
}
|
||||
return `{"id": "abc123", "path": "/api/orders"}`, http.StatusOK
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "get_api_call", Arguments: map[string]any{"id": "abc123"}}))
|
||||
if resp.Error != nil || !strings.Contains(resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string), "abc123") {
|
||||
t.Error("Expected response containing 'abc123'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetAPICall_MissingID(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
return `{"error": "id is required"}`, http.StatusBadRequest
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "get_api_call", Arguments: map[string]any{}}))
|
||||
if !resp.Result.(map[string]any)["isError"].(bool) {
|
||||
t.Error("Expected isError=true")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetAPIStats(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(newMockHubHandler(t, func(req hubToolCallRequest) (string, int) {
|
||||
if req.Tool != "get_api_stats" {
|
||||
t.Errorf("Expected get_api_stats")
|
||||
}
|
||||
return `{"stats": {"total_calls": 1000}}`, http.StatusOK
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "get_api_stats", Arguments: map[string]any{"ns": "prod"}}))
|
||||
if resp.Error != nil || !strings.Contains(resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string), "total_calls") {
|
||||
t.Error("Expected 'total_calls' in response")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_APITools_BackendError(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads"}))
|
||||
if !resp.Result.(map[string]any)["isError"].(bool) {
|
||||
t.Error("Expected isError=true for backend error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_APITools_BackendConnectionError(t *testing.T) {
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: &bytes.Buffer{}, stdout: &bytes.Buffer{}, hubBaseURL: "http://localhost:99999", backendInitialized: true}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads"}))
|
||||
if !resp.Result.(map[string]any)["isError"].(bool) {
|
||||
t.Error("Expected isError=true for connection error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_RunLoop_ParseError(t *testing.T) {
|
||||
output := &bytes.Buffer{}
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: strings.NewReader("invalid\n"), stdout: output}
|
||||
s.run()
|
||||
if resp := parseResponse(t, output.String()); resp.Error == nil || resp.Error.Code != -32700 {
|
||||
t.Fatalf("Expected error code -32700")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_RunLoop_MultipleRequests(t *testing.T) {
|
||||
output := &bytes.Buffer{}
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: strings.NewReader(`{"jsonrpc":"2.0","id":1,"method":"ping"}
|
||||
{"jsonrpc":"2.0","id":2,"method":"ping"}
|
||||
`), stdout: output}
|
||||
s.run()
|
||||
if lines := strings.Split(strings.TrimSpace(output.String()), "\n"); len(lines) != 2 {
|
||||
t.Fatalf("Expected 2 responses, got %d", len(lines))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_RunLoop_EmptyLines(t *testing.T) {
|
||||
output := &bytes.Buffer{}
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: strings.NewReader("\n\n{\"jsonrpc\":\"2.0\",\"id\":1,\"method\":\"ping\"}\n"), stdout: output}
|
||||
s.run()
|
||||
if lines := strings.Split(strings.TrimSpace(output.String()), "\n"); len(lines) != 1 {
|
||||
t.Fatalf("Expected 1 response, got %d", len(lines))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ResponseFormat(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
// Numeric ID
|
||||
if resp := parseResponse(t, sendRequest(s, "ping", 123, nil)); resp.ID != float64(123) || resp.JSONRPC != "2.0" {
|
||||
t.Errorf("Expected ID 123 and jsonrpc 2.0")
|
||||
}
|
||||
// String ID
|
||||
if resp := parseResponse(t, sendRequest(s, "ping", "str", nil)); resp.ID != "str" {
|
||||
t.Errorf("Expected ID 'str'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolCallResult_ContentFormat(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte(`{"data": "test"}`))
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads"}))
|
||||
content := resp.Result.(map[string]any)["content"].([]any)
|
||||
if len(content) == 0 || content[0].(map[string]any)["type"] != "text" {
|
||||
t.Error("Expected content with type=text")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_CommandArgs(t *testing.T) {
|
||||
// Test start command args building
|
||||
for _, tc := range []struct {
|
||||
args map[string]any
|
||||
expected string
|
||||
}{
|
||||
{map[string]any{}, "tap --set headless=true"},
|
||||
{map[string]any{"pod_regex": "nginx.*"}, "tap nginx.* --set headless=true"},
|
||||
{map[string]any{"namespaces": "default"}, "tap -n default --set headless=true"},
|
||||
{map[string]any{"release_namespace": "ks"}, "tap -s ks --set headless=true"},
|
||||
} {
|
||||
cmdArgs := []string{"tap"}
|
||||
if v, _ := tc.args["pod_regex"].(string); v != "" {
|
||||
cmdArgs = append(cmdArgs, v)
|
||||
}
|
||||
if v, _ := tc.args["namespaces"].(string); v != "" {
|
||||
for _, ns := range strings.Split(v, ",") {
|
||||
cmdArgs = append(cmdArgs, "-n", strings.TrimSpace(ns))
|
||||
}
|
||||
}
|
||||
if v, _ := tc.args["release_namespace"].(string); v != "" {
|
||||
cmdArgs = append(cmdArgs, "-s", v)
|
||||
}
|
||||
cmdArgs = append(cmdArgs, "--set", "headless=true")
|
||||
if got := strings.Join(cmdArgs, " "); got != tc.expected {
|
||||
t.Errorf("Expected %q, got %q", tc.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_PrettyPrintJSON(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte(`{"key":"value"}`))
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads"}))
|
||||
text := resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
if !strings.Contains(text, "\n") {
|
||||
t.Error("Expected pretty-printed JSON")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_SpecialCharsAndEdgeCases(t *testing.T) {
|
||||
s, mockServer := newTestMCPServerWithMockBackend(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte(`{}`))
|
||||
})
|
||||
defer mockServer.Close()
|
||||
|
||||
// Test special chars, empty args, nil args
|
||||
for _, args := range []map[string]any{
|
||||
{"path": "/api?id=123"},
|
||||
{"id": "abc/123"},
|
||||
{},
|
||||
nil,
|
||||
} {
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{Name: "list_workloads", Arguments: args}))
|
||||
if resp.Error != nil {
|
||||
t.Errorf("Unexpected error with args %v: %v", args, resp.Error)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_BackendInitialization_Concurrent(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
done := make(chan bool, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() { s.ensureBackendConnection(); done <- true }()
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
<-done
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetFileURL_ProxyMode(t *testing.T) {
|
||||
s := &mcpServer{
|
||||
httpClient: &http.Client{},
|
||||
stdin: &bytes.Buffer{},
|
||||
stdout: &bytes.Buffer{},
|
||||
hubBaseURL: "http://127.0.0.1:8899/api/mcp",
|
||||
backendInitialized: true,
|
||||
}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
|
||||
Name: "get_file_url",
|
||||
Arguments: map[string]any{"path": "/snapshots/abc/data.pcap"},
|
||||
}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
text := resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
expected := "http://127.0.0.1:8899/api/snapshots/abc/data.pcap"
|
||||
if text != expected {
|
||||
t.Errorf("Expected %q, got %q", expected, text)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetFileURL_URLMode(t *testing.T) {
|
||||
s := &mcpServer{
|
||||
httpClient: &http.Client{},
|
||||
stdin: &bytes.Buffer{},
|
||||
stdout: &bytes.Buffer{},
|
||||
hubBaseURL: "https://kubeshark.example.com/api/mcp",
|
||||
backendInitialized: true,
|
||||
urlMode: true,
|
||||
directURL: "https://kubeshark.example.com",
|
||||
}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
|
||||
Name: "get_file_url",
|
||||
Arguments: map[string]any{"path": "/snapshots/xyz/export.pcap"},
|
||||
}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
text := resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
expected := "https://kubeshark.example.com/api/snapshots/xyz/export.pcap"
|
||||
if text != expected {
|
||||
t.Errorf("Expected %q, got %q", expected, text)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetFileURL_MissingPath(t *testing.T) {
|
||||
s := &mcpServer{
|
||||
httpClient: &http.Client{},
|
||||
stdin: &bytes.Buffer{},
|
||||
stdout: &bytes.Buffer{},
|
||||
hubBaseURL: "http://127.0.0.1:8899/api/mcp",
|
||||
backendInitialized: true,
|
||||
}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
|
||||
Name: "get_file_url",
|
||||
Arguments: map[string]any{},
|
||||
}))
|
||||
result := resp.Result.(map[string]any)
|
||||
if !result["isError"].(bool) {
|
||||
t.Error("Expected isError=true when path is missing")
|
||||
}
|
||||
text := result["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
if !strings.Contains(text, "path") {
|
||||
t.Error("Error message should mention 'path'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_DownloadFile(t *testing.T) {
|
||||
fileContent := "test pcap data content"
|
||||
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/api/snapshots/abc/data.pcap" {
|
||||
_, _ = w.Write([]byte(fileContent))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
// Use temp dir for download destination
|
||||
tmpDir := t.TempDir()
|
||||
dest := filepath.Join(tmpDir, "downloaded.pcap")
|
||||
|
||||
s := &mcpServer{
|
||||
httpClient: &http.Client{},
|
||||
stdin: &bytes.Buffer{},
|
||||
stdout: &bytes.Buffer{},
|
||||
hubBaseURL: mockServer.URL + "/api/mcp",
|
||||
backendInitialized: true,
|
||||
}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
|
||||
Name: "download_file",
|
||||
Arguments: map[string]any{"path": "/snapshots/abc/data.pcap", "dest": dest},
|
||||
}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
result := resp.Result.(map[string]any)
|
||||
if result["isError"] != nil && result["isError"].(bool) {
|
||||
t.Fatalf("Expected no error, got: %v", result["content"])
|
||||
}
|
||||
|
||||
text := result["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
var downloadResult map[string]any
|
||||
if err := json.Unmarshal([]byte(text), &downloadResult); err != nil {
|
||||
t.Fatalf("Failed to parse download result JSON: %v", err)
|
||||
}
|
||||
if downloadResult["path"] != dest {
|
||||
t.Errorf("Expected path %q, got %q", dest, downloadResult["path"])
|
||||
}
|
||||
if downloadResult["size"].(float64) != float64(len(fileContent)) {
|
||||
t.Errorf("Expected size %d, got %v", len(fileContent), downloadResult["size"])
|
||||
}
|
||||
|
||||
// Verify the file was actually written
|
||||
content, err := os.ReadFile(dest)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read downloaded file: %v", err)
|
||||
}
|
||||
if string(content) != fileContent {
|
||||
t.Errorf("Expected file content %q, got %q", fileContent, string(content))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_DownloadFile_CustomDest(t *testing.T) {
|
||||
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte("data"))
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
customDest := filepath.Join(tmpDir, "custom-name.pcap")
|
||||
|
||||
s := &mcpServer{
|
||||
httpClient: &http.Client{},
|
||||
stdin: &bytes.Buffer{},
|
||||
stdout: &bytes.Buffer{},
|
||||
hubBaseURL: mockServer.URL + "/api/mcp",
|
||||
backendInitialized: true,
|
||||
}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
|
||||
Name: "download_file",
|
||||
Arguments: map[string]any{"path": "/snapshots/abc/export.pcap", "dest": customDest},
|
||||
}))
|
||||
result := resp.Result.(map[string]any)
|
||||
if result["isError"] != nil && result["isError"].(bool) {
|
||||
t.Fatalf("Expected no error, got: %v", result["content"])
|
||||
}
|
||||
|
||||
text := result["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
var downloadResult map[string]any
|
||||
if err := json.Unmarshal([]byte(text), &downloadResult); err != nil {
|
||||
t.Fatalf("Failed to parse download result JSON: %v", err)
|
||||
}
|
||||
if downloadResult["path"] != customDest {
|
||||
t.Errorf("Expected path %q, got %q", customDest, downloadResult["path"])
|
||||
}
|
||||
|
||||
if _, err := os.Stat(customDest); os.IsNotExist(err) {
|
||||
t.Error("Expected file to exist at custom destination")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsList_IncludesFileTools(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
resp := parseResponse(t, sendRequest(s, "tools/list", 1, nil))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
tools := resp.Result.(map[string]any)["tools"].([]any)
|
||||
toolNames := make(map[string]bool)
|
||||
for _, tool := range tools {
|
||||
toolNames[tool.(map[string]any)["name"].(string)] = true
|
||||
}
|
||||
for _, expected := range []string{"get_file_url", "download_file"} {
|
||||
if !toolNames[expected] {
|
||||
t.Errorf("Missing expected tool: %s", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_FullConversation(t *testing.T) {
|
||||
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/" {
|
||||
_, _ = w.Write([]byte(`{"name":"hub","tools":[{"name":"list_workloads","description":"","inputSchema":{}}]}`))
|
||||
} else if r.URL.Path == "/tools/call" {
|
||||
_, _ = w.Write([]byte(`{"data":"ok"}`))
|
||||
}
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
input := `{"jsonrpc":"2.0","id":1,"method":"initialize"}
|
||||
{"jsonrpc":"2.0","method":"notifications/initialized"}
|
||||
{"jsonrpc":"2.0","id":2,"method":"tools/list"}
|
||||
{"jsonrpc":"2.0","id":3,"method":"tools/call","params":{"name":"list_workloads","arguments":{}}}
|
||||
`
|
||||
output := &bytes.Buffer{}
|
||||
s := &mcpServer{httpClient: &http.Client{}, stdin: strings.NewReader(input), stdout: output, hubBaseURL: mockServer.URL, backendInitialized: true}
|
||||
s.run()
|
||||
|
||||
lines := strings.Split(strings.TrimSpace(output.String()), "\n")
|
||||
if len(lines) != 3 { // 3 responses (notification has no response)
|
||||
t.Errorf("Expected 3 responses, got %d", len(lines))
|
||||
}
|
||||
for i, line := range lines {
|
||||
var resp jsonRPCResponse
|
||||
if err := json.Unmarshal([]byte(line), &resp); err != nil || resp.Error != nil {
|
||||
t.Errorf("Response %d: parse error or unexpected error", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -18,13 +18,12 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientk8s "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
|
||||
const (
|
||||
label = "app.kubeshark.co/app=worker"
|
||||
label = "app.kubeshark.com/app=worker"
|
||||
srcDir = "pcapdump"
|
||||
maxSnaplen uint32 = 262144
|
||||
maxTimePerFile = time.Minute * 5
|
||||
@@ -39,7 +38,7 @@ type PodFileInfo struct {
|
||||
}
|
||||
|
||||
// listWorkerPods fetches all worker pods from multiple namespaces
|
||||
func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespaces []string) ([]*PodFileInfo, error) {
|
||||
func listWorkerPods(ctx context.Context, clientset *kubernetes.Clientset, namespaces []string) ([]*PodFileInfo, error) {
|
||||
var podFileInfos []*PodFileInfo
|
||||
var errs []error
|
||||
labelSelector := label
|
||||
@@ -65,7 +64,7 @@ func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespa
|
||||
}
|
||||
|
||||
// listFilesInPodDir lists all files in the specified directory inside the pod across multiple namespaces
|
||||
func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, config *rest.Config, pod *PodFileInfo, cutoffTime *time.Time) error {
|
||||
func listFilesInPodDir(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, pod *PodFileInfo, cutoffTime *time.Time) error {
|
||||
nodeName := pod.Pod.Spec.NodeName
|
||||
srcFilePath := filepath.Join("data", nodeName, srcDir)
|
||||
|
||||
|
||||
@@ -62,4 +62,5 @@ func init() {
|
||||
tapCmd.Flags().Bool(configStructs.TelemetryEnabledLabel, defaultTapConfig.Telemetry.Enabled, "Enable/disable Telemetry")
|
||||
tapCmd.Flags().Bool(configStructs.ResourceGuardEnabledLabel, defaultTapConfig.ResourceGuard.Enabled, "Enable/disable resource guard")
|
||||
tapCmd.Flags().Bool(configStructs.WatchdogEnabled, defaultTapConfig.Watchdog.Enabled, "Enable/disable watchdog")
|
||||
tapCmd.Flags().String(configStructs.HelmChartPathLabel, defaultTapConfig.Release.HelmChartPath, "Path to a local Helm chart folder (overrides the remote Helm repo)")
|
||||
}
|
||||
|
||||
@@ -58,6 +58,7 @@ func InitConfig(cmd *cobra.Command) error {
|
||||
"pro",
|
||||
"manifests",
|
||||
"license",
|
||||
"mcp",
|
||||
}, cmd.Use) {
|
||||
go version.CheckNewerVersion()
|
||||
}
|
||||
|
||||
@@ -116,6 +116,7 @@ func CreateDefaultConfig() ConfigStruct {
|
||||
},
|
||||
CanUpdateTargetedPods: true,
|
||||
CanStopTrafficCapturing: true,
|
||||
CanControlDissection: true,
|
||||
ShowAdminConsoleLink: true,
|
||||
},
|
||||
},
|
||||
@@ -137,6 +138,10 @@ func CreateDefaultConfig() ConfigStruct {
|
||||
"ldap",
|
||||
"radius",
|
||||
"diameter",
|
||||
"udp-flow",
|
||||
"tcp-flow",
|
||||
"udp-conn",
|
||||
"tcp-conn",
|
||||
},
|
||||
PortMapping: configStructs.PortMapping{
|
||||
HTTP: []uint16{80, 443, 8080},
|
||||
@@ -148,6 +153,13 @@ func CreateDefaultConfig() ConfigStruct {
|
||||
},
|
||||
Dashboard: configStructs.DashboardConfig{
|
||||
CompleteStreamingEnabled: true,
|
||||
ClusterWideMapEnabled: false,
|
||||
},
|
||||
Capture: configStructs.CaptureConfig{
|
||||
Dissection: configStructs.DissectionConfig{
|
||||
Enabled: true,
|
||||
StopAfter: "5m",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -171,10 +183,11 @@ type ConfigStruct struct {
|
||||
DumpLogs bool `yaml:"dumpLogs" json:"dumpLogs" default:"false"`
|
||||
HeadlessMode bool `yaml:"headless" json:"headless" default:"false"`
|
||||
License string `yaml:"license" json:"license" default:""`
|
||||
CloudApiUrl string `yaml:"cloudApiUrl" json:"cloudApiUrl" default:"https://api.kubeshark.com"`
|
||||
CloudLicenseEnabled bool `yaml:"cloudLicenseEnabled" json:"cloudLicenseEnabled" default:"true"`
|
||||
AiAssistantEnabled bool `yaml:"aiAssistantEnabled" json:"aiAssistantEnabled" default:"true"`
|
||||
DemoModeEnabled bool `yaml:"demoModeEnabled" json:"demoModeEnabled" default:"false"`
|
||||
SupportChatEnabled bool `yaml:"supportChatEnabled" json:"supportChatEnabled" default:"false"`
|
||||
BetaEnabled bool `yaml:"betaEnabled" json:"betaEnabled" default:"false"`
|
||||
InternetConnectivity bool `yaml:"internetConnectivity" json:"internetConnectivity" default:"true"`
|
||||
Scripting configStructs.ScriptingConfig `yaml:"scripting" json:"scripting"`
|
||||
Manifests ManifestsConfig `yaml:"manifests,omitempty" json:"manifests,omitempty"`
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
)
|
||||
|
||||
type ScriptingConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
Env map[string]interface{} `yaml:"env" json:"env" default:"{}"`
|
||||
Source string `yaml:"source" json:"source" default:""`
|
||||
Sources []string `yaml:"sources" json:"sources" default:"[]"`
|
||||
|
||||
@@ -45,6 +45,7 @@ const (
|
||||
PcapDumpEnabled = "enabled"
|
||||
PcapTime = "time"
|
||||
WatchdogEnabled = "watchdogEnabled"
|
||||
HelmChartPathLabel = "release-helmChartPath"
|
||||
)
|
||||
|
||||
type ResourceLimitsHub struct {
|
||||
@@ -167,6 +168,7 @@ type Role struct {
|
||||
ScriptingPermissions ScriptingPermissions `yaml:"scriptingPermissions" json:"scriptingPermissions"`
|
||||
CanUpdateTargetedPods bool `yaml:"canUpdateTargetedPods" json:"canUpdateTargetedPods" default:"false"`
|
||||
CanStopTrafficCapturing bool `yaml:"canStopTrafficCapturing" json:"canStopTrafficCapturing" default:"false"`
|
||||
CanControlDissection bool `yaml:"canControlDissection" json:"canControlDissection" default:"false"`
|
||||
ShowAdminConsoleLink bool `yaml:"showAdminConsoleLink" json:"showAdminConsoleLink" default:"false"`
|
||||
}
|
||||
|
||||
@@ -188,6 +190,7 @@ type IngressConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
ClassName string `yaml:"className" json:"className" default:""`
|
||||
Host string `yaml:"host" json:"host" default:"ks.svc.cluster.local"`
|
||||
Path string `yaml:"path" json:"path" default:"/"`
|
||||
TLS []networking.IngressTLS `yaml:"tls" json:"tls" default:"[]"`
|
||||
Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"`
|
||||
}
|
||||
@@ -197,7 +200,9 @@ type RoutingConfig struct {
|
||||
}
|
||||
|
||||
type DashboardConfig struct {
|
||||
CompleteStreamingEnabled bool `yaml:"completeStreamingEnabled" json:"completeStreamingEnabled" default:"true"`
|
||||
StreamingType string `yaml:"streamingType" json:"streamingType" default:"connect-rpc"`
|
||||
CompleteStreamingEnabled bool `yaml:"completeStreamingEnabled" json:"completeStreamingEnabled" default:"true"`
|
||||
ClusterWideMapEnabled bool `yaml:"clusterWideMapEnabled" json:"clusterWideMapEnabled" default:"false"`
|
||||
}
|
||||
|
||||
type FrontRoutingConfig struct {
|
||||
@@ -205,9 +210,10 @@ type FrontRoutingConfig struct {
|
||||
}
|
||||
|
||||
type ReleaseConfig struct {
|
||||
Repo string `yaml:"repo" json:"repo" default:"https://helm.kubeshark.co"`
|
||||
Name string `yaml:"name" json:"name" default:"kubeshark"`
|
||||
Namespace string `yaml:"namespace" json:"namespace" default:"default"`
|
||||
Repo string `yaml:"repo" json:"repo" default:"https://helm.kubeshark.com"`
|
||||
Name string `yaml:"name" json:"name" default:"kubeshark"`
|
||||
Namespace string `yaml:"namespace" json:"namespace" default:"default"`
|
||||
HelmChartPath string `yaml:"helmChartPath" json:"helmChartPath" default:""`
|
||||
}
|
||||
|
||||
type TelemetryConfig struct {
|
||||
@@ -249,8 +255,8 @@ type PprofConfig struct {
|
||||
|
||||
type MiscConfig struct {
|
||||
JsonTTL string `yaml:"jsonTTL" json:"jsonTTL" default:"5m"`
|
||||
PcapTTL string `yaml:"pcapTTL" json:"pcapTTL" default:"10s"`
|
||||
PcapErrorTTL string `yaml:"pcapErrorTTL" json:"pcapErrorTTL" default:"60s"`
|
||||
PcapTTL string `yaml:"pcapTTL" json:"pcapTTL" default:"0"`
|
||||
PcapErrorTTL string `yaml:"pcapErrorTTL" json:"pcapErrorTTL" default:"0"`
|
||||
TrafficSampleRate int `yaml:"trafficSampleRate" json:"trafficSampleRate" default:"100"`
|
||||
TcpStreamChannelTimeoutMs int `yaml:"tcpStreamChannelTimeoutMs" json:"tcpStreamChannelTimeoutMs" default:"10000"`
|
||||
TcpStreamChannelTimeoutShow bool `yaml:"tcpStreamChannelTimeoutShow" json:"tcpStreamChannelTimeoutShow" default:"false"`
|
||||
@@ -258,10 +264,12 @@ type MiscConfig struct {
|
||||
DuplicateTimeframe string `yaml:"duplicateTimeframe" json:"duplicateTimeframe" default:"200ms"`
|
||||
DetectDuplicates bool `yaml:"detectDuplicates" json:"detectDuplicates" default:"false"`
|
||||
StaleTimeoutSeconds int `yaml:"staleTimeoutSeconds" json:"staleTimeoutSeconds" default:"30"`
|
||||
TcpFlowTimeout int `yaml:"tcpFlowTimeout" json:"tcpFlowTimeout" default:"1200"`
|
||||
UdpFlowTimeout int `yaml:"udpFlowTimeout" json:"udpFlowTimeout" default:"1200"`
|
||||
}
|
||||
|
||||
type PcapDumpConfig struct {
|
||||
PcapDumpEnabled bool `yaml:"enabled" json:"enabled" default:"true"`
|
||||
PcapDumpEnabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
PcapTimeInterval string `yaml:"timeInterval" json:"timeInterval" default:"1m"`
|
||||
PcapMaxTime string `yaml:"maxTime" json:"maxTime" default:"1h"`
|
||||
PcapMaxSize string `yaml:"maxSize" json:"maxSize" default:"500MB"`
|
||||
@@ -298,6 +306,69 @@ type SeLinuxOptionsConfig struct {
|
||||
User string `yaml:"user" json:"user"`
|
||||
}
|
||||
|
||||
type RawCaptureConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"true"`
|
||||
StorageSize string `yaml:"storageSize" json:"storageSize" default:"1Gi"`
|
||||
}
|
||||
|
||||
type SnapshotsLocalConfig struct {
|
||||
StorageClass string `yaml:"storageClass" json:"storageClass" default:""`
|
||||
StorageSize string `yaml:"storageSize" json:"storageSize" default:"20Gi"`
|
||||
}
|
||||
|
||||
type SnapshotsCloudS3Config struct {
|
||||
Bucket string `yaml:"bucket" json:"bucket" default:""`
|
||||
Region string `yaml:"region" json:"region" default:""`
|
||||
AccessKey string `yaml:"accessKey" json:"accessKey" default:""`
|
||||
SecretKey string `yaml:"secretKey" json:"secretKey" default:""`
|
||||
RoleArn string `yaml:"roleArn" json:"roleArn" default:""`
|
||||
ExternalId string `yaml:"externalId" json:"externalId" default:""`
|
||||
}
|
||||
|
||||
type SnapshotsCloudAzblobConfig struct {
|
||||
StorageAccount string `yaml:"storageAccount" json:"storageAccount" default:""`
|
||||
Container string `yaml:"container" json:"container" default:""`
|
||||
StorageKey string `yaml:"storageKey" json:"storageKey" default:""`
|
||||
}
|
||||
|
||||
type SnapshotsCloudGCSConfig struct {
|
||||
Bucket string `yaml:"bucket" json:"bucket" default:""`
|
||||
Project string `yaml:"project" json:"project" default:""`
|
||||
CredentialsJson string `yaml:"credentialsJson" json:"credentialsJson" default:""`
|
||||
}
|
||||
|
||||
type SnapshotsCloudConfig struct {
|
||||
Provider string `yaml:"provider" json:"provider" default:""`
|
||||
Prefix string `yaml:"prefix" json:"prefix" default:""`
|
||||
ConfigMaps []string `yaml:"configMaps" json:"configMaps" default:"[]"`
|
||||
Secrets []string `yaml:"secrets" json:"secrets" default:"[]"`
|
||||
S3 SnapshotsCloudS3Config `yaml:"s3" json:"s3"`
|
||||
Azblob SnapshotsCloudAzblobConfig `yaml:"azblob" json:"azblob"`
|
||||
GCS SnapshotsCloudGCSConfig `yaml:"gcs" json:"gcs"`
|
||||
}
|
||||
|
||||
type SnapshotsConfig struct {
|
||||
Local SnapshotsLocalConfig `yaml:"local" json:"local"`
|
||||
Cloud SnapshotsCloudConfig `yaml:"cloud" json:"cloud"`
|
||||
}
|
||||
|
||||
type DelayedDissectionConfig struct {
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"1"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"4Gi"`
|
||||
}
|
||||
|
||||
type DissectionConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"true"`
|
||||
StopAfter string `yaml:"stopAfter" json:"stopAfter" default:"5m"`
|
||||
}
|
||||
|
||||
type CaptureConfig struct {
|
||||
Dissection DissectionConfig `yaml:"dissection" json:"dissection"`
|
||||
CaptureSelf bool `yaml:"captureSelf" json:"captureSelf" default:"false"`
|
||||
Raw RawCaptureConfig `yaml:"raw" json:"raw"`
|
||||
DbMaxSize string `yaml:"dbMaxSize" json:"dbMaxSize" default:"500Mi"`
|
||||
}
|
||||
|
||||
type TapConfig struct {
|
||||
Docker DockerConfig `yaml:"docker" json:"docker"`
|
||||
Proxy ProxyConfig `yaml:"proxy" json:"proxy"`
|
||||
@@ -305,14 +376,16 @@ type TapConfig struct {
|
||||
Namespaces []string `yaml:"namespaces" json:"namespaces" default:"[]"`
|
||||
ExcludedNamespaces []string `yaml:"excludedNamespaces" json:"excludedNamespaces" default:"[]"`
|
||||
BpfOverride string `yaml:"bpfOverride" json:"bpfOverride" default:""`
|
||||
Stopped bool `yaml:"stopped" json:"stopped" default:"false"`
|
||||
Capture CaptureConfig `yaml:"capture" json:"capture"`
|
||||
DelayedDissection DelayedDissectionConfig `yaml:"delayedDissection" json:"delayedDissection"`
|
||||
Snapshots SnapshotsConfig `yaml:"snapshots" json:"snapshots"`
|
||||
Release ReleaseConfig `yaml:"release" json:"release"`
|
||||
PersistentStorage bool `yaml:"persistentStorage" json:"persistentStorage" default:"false"`
|
||||
PersistentStorageStatic bool `yaml:"persistentStorageStatic" json:"persistentStorageStatic" default:"false"`
|
||||
PersistentStoragePvcVolumeMode string `yaml:"persistentStoragePvcVolumeMode" json:"persistentStoragePvcVolumeMode" default:"FileSystem"`
|
||||
EfsFileSytemIdAndPath string `yaml:"efsFileSytemIdAndPath" json:"efsFileSytemIdAndPath" default:""`
|
||||
Secrets []string `yaml:"secrets" json:"secrets" default:"[]"`
|
||||
StorageLimit string `yaml:"storageLimit" json:"storageLimit" default:"5Gi"`
|
||||
StorageLimit string `yaml:"storageLimit" json:"storageLimit" default:"10Gi"`
|
||||
StorageClass string `yaml:"storageClass" json:"storageClass" default:"standard"`
|
||||
DryRun bool `yaml:"dryRun" json:"dryRun" default:"false"`
|
||||
DnsConfig DnsConfig `yaml:"dns" json:"dns"`
|
||||
@@ -328,6 +401,7 @@ type TapConfig struct {
|
||||
Tolerations TolerationsConfig `yaml:"tolerations" json:"tolerations" default:"{}"`
|
||||
Auth AuthConfig `yaml:"auth" json:"auth"`
|
||||
Ingress IngressConfig `yaml:"ingress" json:"ingress"`
|
||||
PriorityClass string `yaml:"priorityClass" json:"priorityClass" default:""`
|
||||
Routing RoutingConfig `yaml:"routing" json:"routing"`
|
||||
IPv6 bool `yaml:"ipv6" json:"ipv6" default:"true"`
|
||||
Debug bool `yaml:"debug" json:"debug" default:"false"`
|
||||
@@ -337,8 +411,7 @@ type TapConfig struct {
|
||||
Watchdog WatchdogConfig `yaml:"watchdog" json:"watchdog"`
|
||||
Gitops GitopsConfig `yaml:"gitops" json:"gitops"`
|
||||
Sentry SentryConfig `yaml:"sentry" json:"sentry"`
|
||||
DefaultFilter string `yaml:"defaultFilter" json:"defaultFilter" default:"!dns and !error"`
|
||||
LiveConfigMapChangesDisabled bool `yaml:"liveConfigMapChangesDisabled" json:"liveConfigMapChangesDisabled" default:"false"`
|
||||
DefaultFilter string `yaml:"defaultFilter" json:"defaultFilter" default:""`
|
||||
GlobalFilter string `yaml:"globalFilter" json:"globalFilter" default:""`
|
||||
EnabledDissectors []string `yaml:"enabledDissectors" json:"enabledDissectors"`
|
||||
PortMapping PortMapping `yaml:"portMapping" json:"portMapping"`
|
||||
@@ -348,6 +421,7 @@ type TapConfig struct {
|
||||
Misc MiscConfig `yaml:"misc" json:"misc"`
|
||||
SecurityContext SecurityContextConfig `yaml:"securityContext" json:"securityContext"`
|
||||
MountBpf bool `yaml:"mountBpf" json:"mountBpf" default:"true"`
|
||||
HostNetwork bool `yaml:"hostNetwork" json:"hostNetwork" default:"true"`
|
||||
}
|
||||
|
||||
func (config *TapConfig) PodRegex() *regexp.Regexp {
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
k8serrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
// formatError wraps error with a detailed message that is meant for the user.
|
||||
// FormatError wraps error with a detailed message that is meant for the user.
|
||||
// While the errors are meant to be displayed, they are not meant to be exported as classes outsite of CLI.
|
||||
func FormatError(err error) error {
|
||||
var errorNew error
|
||||
|
||||
168
go.mod
168
go.mod
@@ -1,161 +1,143 @@
|
||||
module github.com/kubeshark/kubeshark
|
||||
|
||||
go 1.21.1
|
||||
go 1.24.0
|
||||
|
||||
toolchain go1.24.5
|
||||
|
||||
require (
|
||||
github.com/creasty/defaults v1.5.2
|
||||
github.com/fsnotify/fsnotify v1.6.0
|
||||
github.com/fsnotify/fsnotify v1.7.0
|
||||
github.com/go-cmd/cmd v1.4.3
|
||||
github.com/goccy/go-yaml v1.11.2
|
||||
github.com/google/go-github/v37 v37.0.0
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674
|
||||
github.com/kubeshark/gopacket v1.1.39
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rivo/tview v0.0.0-20240818110301-fd649dbf1223
|
||||
github.com/robertkrimen/otto v0.2.1
|
||||
github.com/rs/zerolog v1.28.0
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/cobra v1.9.1
|
||||
github.com/spf13/pflag v1.0.6
|
||||
github.com/tanqiangyes/grep-go v0.0.0-20220515134556-b36bff9c3d8e
|
||||
helm.sh/helm/v3 v3.12.0
|
||||
k8s.io/api v0.28.3
|
||||
k8s.io/apimachinery v0.28.3
|
||||
k8s.io/client-go v0.28.3
|
||||
k8s.io/kubectl v0.28.3
|
||||
helm.sh/helm/v3 v3.18.4
|
||||
k8s.io/api v0.33.2
|
||||
k8s.io/apimachinery v0.33.2
|
||||
k8s.io/client-go v0.33.2
|
||||
k8s.io/kubectl v0.33.2
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/BurntSushi/toml v1.2.1 // indirect
|
||||
dario.cat/mergo v1.0.1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/BurntSushi/toml v1.5.0 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.2.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.2.3 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.3 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
|
||||
github.com/Masterminds/squirrel v1.5.4 // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/blang/semver/v4 v4.0.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/containerd/containerd v1.7.0 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/docker/cli v20.10.21+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/docker v20.10.24+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/containerd/containerd v1.7.27 // indirect
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/containerd/platforms v0.2.1 // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
|
||||
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fxamacker/cbor/v2 v2.7.0 // indirect
|
||||
github.com/gdamore/encoding v1.0.0 // indirect
|
||||
github.com/gdamore/tcell/v2 v2.7.1 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.0.5 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-openapi/swag v0.23.0 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.0 // indirect
|
||||
github.com/gobwas/glob v0.2.3 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/btree v1.0.1 // indirect
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/btree v1.1.3 // indirect
|
||||
github.com/google/gnostic-models v0.6.9 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/go-querystring v1.1.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gosuri/uitable v0.0.4 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/huandu/xstrings v1.4.0 // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/huandu/xstrings v1.5.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmoiron/sqlx v1.3.5 // indirect
|
||||
github.com/jmoiron/sqlx v1.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.16.0 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/kubeshark/tracerproto v1.0.0 // indirect
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
|
||||
github.com/lib/pq v1.10.7 // indirect
|
||||
github.com/lib/pq v1.10.9 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/copystructure v1.2.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.2 // indirect
|
||||
github.com/moby/locker v1.0.1 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect
|
||||
github.com/moby/spdystream v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b // indirect
|
||||
github.com/opencontainers/image-spec v1.1.1 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
github.com/prometheus/client_golang v1.16.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.44.0 // indirect
|
||||
github.com/prometheus/procfs v0.10.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/rubenv/sql-migrate v1.3.1 // indirect
|
||||
github.com/rubenv/sql-migrate v1.8.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/stretchr/testify v1.8.3 // indirect
|
||||
github.com/shopspring/decimal v1.4.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.7.0 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
go.opentelemetry.io/otel v1.14.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.14.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
golang.org/x/crypto v0.14.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/net v0.17.0 // indirect
|
||||
golang.org/x/oauth2 v0.8.0 // indirect
|
||||
golang.org/x/sync v0.2.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/term v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.39.0 // indirect
|
||||
golang.org/x/net v0.40.0 // indirect
|
||||
golang.org/x/oauth2 v0.28.0 // indirect
|
||||
golang.org/x/sync v0.15.0 // indirect
|
||||
golang.org/x/sys v0.33.0 // indirect
|
||||
golang.org/x/term v0.32.0 // indirect
|
||||
golang.org/x/text v0.26.0 // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect
|
||||
google.golang.org/grpc v1.54.0 // indirect
|
||||
google.golang.org/protobuf v1.30.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect
|
||||
google.golang.org/grpc v1.68.1 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/sourcemap.v1 v1.0.5 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.27.1 // indirect
|
||||
k8s.io/apiserver v0.27.1 // indirect
|
||||
k8s.io/cli-runtime v0.28.3 // indirect
|
||||
k8s.io/component-base v0.28.3 // indirect
|
||||
k8s.io/klog/v2 v2.100.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
|
||||
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
|
||||
oras.land/oras-go v1.2.2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||
k8s.io/apiextensions-apiserver v0.33.2 // indirect
|
||||
k8s.io/apiserver v0.33.2 // indirect
|
||||
k8s.io/cli-runtime v0.33.2 // indirect
|
||||
k8s.io/component-base v0.33.2 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect
|
||||
oras.land/oras-go/v2 v2.6.0 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.19.0 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect
|
||||
sigs.k8s.io/randfill v1.0.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
apiVersion: v2
|
||||
name: kubeshark
|
||||
version: "52.7.5"
|
||||
version: "53.1.0"
|
||||
description: The API Traffic Analyzer for Kubernetes
|
||||
home: https://kubeshark.co
|
||||
home: https://kubeshark.com
|
||||
keywords:
|
||||
- kubeshark
|
||||
- packet capture
|
||||
@@ -16,9 +16,9 @@ keywords:
|
||||
- api
|
||||
kubeVersion: '>= 1.16.0-0'
|
||||
maintainers:
|
||||
- email: info@kubeshark.co
|
||||
- email: support@kubeshark.com
|
||||
name: Kubeshark
|
||||
url: https://kubeshark.co
|
||||
url: https://kubeshark.com
|
||||
sources:
|
||||
- https://github.com/kubeshark/kubeshark/tree/master/helm-chart
|
||||
type: application
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
Add the Helm repo for Kubeshark:
|
||||
|
||||
```shell
|
||||
helm repo add kubeshark https://helm.kubeshark.co
|
||||
helm repo add kubeshark https://helm.kubeshark.com
|
||||
```
|
||||
|
||||
then install Kubeshark:
|
||||
@@ -69,7 +69,7 @@ When it's necessary, you can use:
|
||||
--set license=YOUR_LICENSE_GOES_HERE
|
||||
```
|
||||
|
||||
Get your license from Kubeshark's [Admin Console](https://console.kubeshark.co/).
|
||||
Get your license from Kubeshark's [Admin Console](https://console.kubeshark.com/).
|
||||
|
||||
## Installing with Ingress (EKS) enabled
|
||||
|
||||
@@ -112,7 +112,7 @@ Example for overriding image names:
|
||||
|
||||
```yaml
|
||||
docker:
|
||||
overrideImage:
|
||||
overrideImage:
|
||||
worker: docker.io/kubeshark/worker:v52.3.87
|
||||
front: docker.io/kubeshark/front:v52.3.87
|
||||
hub: docker.io/kubeshark/hub:v52.3.87
|
||||
@@ -120,117 +120,145 @@ Example for overriding image names:
|
||||
|
||||
## Configuration
|
||||
|
||||
| Parameter | Description | Default |
|
||||
|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------|
|
||||
| `tap.docker.registry` | Docker registry to pull from | `docker.io/kubeshark` |
|
||||
| `tap.docker.tag` | Tag of the Docker images | `latest` |
|
||||
| `tap.docker.tagLocked` | Lock the Docker image tags to prevent automatic upgrades to the latest branch image version. | `true` |
|
||||
| `tap.docker.tagLocked` | If `false` - use latest minor tag | `true` |
|
||||
| `tap.docker.imagePullPolicy` | Kubernetes image pull policy | `Always` |
|
||||
| `tap.docker.imagePullSecrets` | Kubernetes secrets to pull the images | `[]` |
|
||||
| `tap.docker.overrideImage` | Can be used to directly override image names | `""` |
|
||||
| `tap.docker.overrideTag` | Can be used to override image tags | `""` |
|
||||
| `tap.proxy.hub.srvPort` | Hub server port. Change if already occupied. | `8898` |
|
||||
| `tap.proxy.worker.srvPort` | Worker server port. Change if already occupied.| `48999` |
|
||||
| `tap.proxy.front.port` | Front service port. Change if already occupied.| `8899` |
|
||||
| `tap.proxy.host` | Change to 0.0.0.0 top open up to the world. | `127.0.0.1` |
|
||||
| `tap.regex` | Target (process traffic from) pods that match regex | `.*` |
|
||||
| `tap.namespaces` | Target pods in namespaces | `[]` |
|
||||
| `tap.excludedNamespaces` | Exclude pods in namespaces | `[]` |
|
||||
| `tap.bpfOverride` | When using AF_PACKET as a traffic capture backend, override any existing pod targeting rules and set explicit BPF expression (e.g. `net 0.0.0.0/0`). | `[]` |
|
||||
| `tap.stopped` | Set to `false` to have traffic processing start automatically. When set to `true`, traffic processing is stopped by default, resulting in almost no resource consumption (e.g. Kubeshark is dormant). This property can be dynamically control via the dashboard. | `false` |
|
||||
| `tap.release.repo` | URL of the Helm chart repository | `https://helm.kubeshark.co` |
|
||||
| `tap.release.name` | Helm release name | `kubeshark` |
|
||||
| `tap.release.namespace` | Helm release namespace | `default` |
|
||||
| `tap.persistentStorage` | Use `persistentVolumeClaim` instead of `emptyDir` | `false` |
|
||||
| `tap.persistentStorageStatic` | Use static persistent volume provisioning (explicitly defined `PersistentVolume` ) | `false` |
|
||||
| `tap.persistentStoragePvcVolumeMode` | Set the pvc volume mode (Filesystem\|Block) | `Filesystem` |
|
||||
| `tap.efsFileSytemIdAndPath` | [EFS file system ID and, optionally, subpath and/or access point](https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/examples/kubernetes/access_points/README.md) `<FileSystemId>:<Path>:<AccessPointId>` | "" |
|
||||
| `tap.storageLimit` | Limit of either the `emptyDir` or `persistentVolumeClaim` | `5Gi` |
|
||||
| `tap.storageClass` | Storage class of the `PersistentVolumeClaim` | `standard` |
|
||||
| `tap.dryRun` | Preview of all pods matching the regex, without tapping them | `false` |
|
||||
| `tap.dnsConfig.nameservers` | Nameservers to use for DNS resolution | `[]` |
|
||||
| `tap.dnsConfig.searches` | Search domains to use for DNS resolution | `[]` |
|
||||
| `tap.dnsConfig.options` | DNS options to use for DNS resolution | `[]` |
|
||||
| `tap.resources.hub.limits.cpu` | CPU limit for hub | `""` (no limit) |
|
||||
| `tap.resources.hub.limits.memory` | Memory limit for hub | `5Gi` |
|
||||
| `tap.resources.hub.requests.cpu` | CPU request for hub | `50m` |
|
||||
| `tap.resources.hub.requests.memory` | Memory request for hub | `50Mi` |
|
||||
| `tap.resources.sniffer.limits.cpu` | CPU limit for sniffer | `""` (no limit) |
|
||||
| `tap.resources.sniffer.limits.memory` | Memory limit for sniffer | `3Gi` |
|
||||
| `tap.resources.sniffer.requests.cpu` | CPU request for sniffer | `50m` |
|
||||
| `tap.resources.sniffer.requests.memory` | Memory request for sniffer | `50Mi` |
|
||||
| `tap.resources.tracer.limits.cpu` | CPU limit for tracer | `""` (no limit) |
|
||||
| `tap.resources.tracer.limits.memory` | Memory limit for tracer | `3Gi` |
|
||||
| `tap.resources.tracer.requests.cpu` | CPU request for tracer | `50m` |
|
||||
| `tap.resources.tracer.requests.memory` | Memory request for tracer | `50Mi` |
|
||||
| `tap.probes.hub.initialDelaySeconds` | Initial delay before probing the hub | `15` |
|
||||
| `tap.probes.hub.periodSeconds` | Period between probes for the hub | `10` |
|
||||
| `tap.probes.hub.successThreshold` | Number of successful probes before considering the hub healthy | `1` |
|
||||
| `tap.probes.hub.failureThreshold` | Number of failed probes before considering the hub unhealthy | `3` |
|
||||
| `tap.probes.sniffer.initialDelaySeconds` | Initial delay before probing the sniffer | `15` |
|
||||
| `tap.probes.sniffer.periodSeconds` | Period between probes for the sniffer | `10` |
|
||||
| `tap.probes.sniffer.successThreshold` | Number of successful probes before considering the sniffer healthy | `1` |
|
||||
| `tap.probes.sniffer.failureThreshold` | Number of failed probes before considering the sniffer unhealthy | `3` |
|
||||
| `tap.serviceMesh` | Capture traffic from service meshes like Istio, Linkerd, Consul, etc. | `true` |
|
||||
| `tap.tls` | Capture the encrypted/TLS traffic from cryptography libraries like OpenSSL | `true` |
|
||||
| `tap.disableTlsLog` | Suppress logging for TLS/eBPF | `true` |
|
||||
| `tap.labels` | Kubernetes labels to apply to all Kubeshark resources | `{}` |
|
||||
| `tap.annotations` | Kubernetes annotations to apply to all Kubeshark resources | `{}` |
|
||||
| `tap.nodeSelectorTerms.workers` | Node selector terms for workers components | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
|
||||
| `tap.nodeSelectorTerms.hub` | Node selector terms for hub component | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
|
||||
| `tap.nodeSelectorTerms.front` | Node selector terms for front-end component | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
|
||||
| `tap.tolerations.workers` | Tolerations for workers components | `[ {"operator": "Exists", "effect": "NoExecute"}` |
|
||||
| `tap.tolerations.hub` | Tolerations for hub component | `[]` |
|
||||
| `tap.tolerations.front` | Tolerations for front-end component | `[]` |
|
||||
| `tap.auth.enabled` | Enable authentication | `false` |
|
||||
| `tap.auth.type` | Authentication type (1 option available: `saml`) | `saml` |
|
||||
| `tap.auth.approvedEmails` | List of approved email addresses for authentication | `[]` |
|
||||
| `tap.auth.approvedDomains` | List of approved email domains for authentication | `[]` |
|
||||
| `tap.auth.saml.idpMetadataUrl` | SAML IDP metadata URL <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.x509crt` | A self-signed X.509 `.cert` contents <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.x509key` | A self-signed X.509 `.key` contents <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.roleAttribute` | A SAML attribute name corresponding to user's authorization role <br/>(effective, if `tap.auth.type = saml`) | `role` |
|
||||
| `tap.auth.saml.roles` | A list of SAML authorization roles and their permissions <br/>(effective, if `tap.auth.type = saml`) | `{"admin":{"canDownloadPCAP":true,"canUpdateTargetedPods":true,"canUseScripting":true, "scriptingPermissions":{"canSave":true, "canActivate":true, "canDelete":true}, "canStopTrafficCapturing":true, "filter":"","showAdminConsoleLink":true}}` |
|
||||
| `tap.ingress.enabled` | Enable `Ingress` | `false` |
|
||||
| `tap.ingress.className` | Ingress class name | `""` |
|
||||
| `tap.ingress.host` | Host of the `Ingress` | `ks.svc.cluster.local` |
|
||||
| `tap.ingress.tls` | `Ingress` TLS configuration | `[]` |
|
||||
| `tap.ingress.annotations` | `Ingress` annotations | `{}` |
|
||||
| `tap.routing.front.basePath` | Set this value to serve `front` under specific base path. Example: `/custompath` (forward slash must be present) | `""` |
|
||||
| `tap.ipv6` | Enable IPv6 support for the front-end | `true` |
|
||||
| `tap.debug` | Enable debug mode | `false` |
|
||||
| `tap.telemetry.enabled` | Enable anonymous usage statistics collection | `true` |
|
||||
| `tap.resourceGuard.enabled` | Enable resource guard worker process, which watches RAM/disk usage and enables/disables traffic capture based on available resources | `false` |
|
||||
| `tap.secrets` | List of secrets to be used as source for environment variables (e.g. `kubeshark-license`) | `[]` |
|
||||
| `tap.sentry.enabled` | Enable sending of error logs to Sentry | `true` (only for qualified users) |
|
||||
| `tap.sentry.environment` | Sentry environment to label error logs with | `production` |
|
||||
| `tap.defaultFilter` | Sets the default dashboard KFL filter (e.g. `http`). By default, this value is set to filter out noisy protocols such as DNS, UDP, ICMP and TCP. The user can easily change this, **temporarily**, in the Dashboard. For a permanent change, you should change this value in the `values.yaml` or `config.yaml` file. | `"!dns and !error"` |
|
||||
| `tap.liveConfigMapChangesDisabled` | If set to `true`, all user functionality (scripting, targeting settings, global & default KFL modification, traffic recording, traffic capturing on/off, protocol dissectors) involving dynamic ConfigMap changes from UI will be disabled | `false` |
|
||||
| `tap.globalFilter` | Prepends to any KFL filter and can be used to limit what is visible in the dashboard. For example, `redact("request.headers.Authorization")` will redact the appropriate field. Another example `!dns` will not show any DNS traffic. | `""` |
|
||||
| `tap.metrics.port` | Pod port used to expose Prometheus metrics | `49100` |
|
||||
| `tap.enabledDissectors` | This is an array of strings representing the list of supported protocols. Remove or comment out redundant protocols (e.g., dns).| The default list excludes: `udp` and `tcp` |
|
||||
| Parameter | Description | Default |
|
||||
|-------------------------------------------|-----------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `tap.docker.registry` | Docker registry to pull from | `docker.io/kubeshark` |
|
||||
| `tap.docker.tag` | Tag of the Docker images | `latest` |
|
||||
| `tap.docker.tagLocked` | Lock the Docker image tags to prevent automatic upgrades to the latest branch image version. | `true` |
|
||||
| `tap.docker.tagLocked` | If `false` - use latest minor tag | `true` |
|
||||
| `tap.docker.imagePullPolicy` | Kubernetes image pull policy | `Always` |
|
||||
| `tap.docker.imagePullSecrets` | Kubernetes secrets to pull the images | `[]` |
|
||||
| `tap.docker.overrideImage` | Can be used to directly override image names | `""` |
|
||||
| `tap.docker.overrideTag` | Can be used to override image tags | `""` |
|
||||
| `tap.proxy.hub.srvPort` | Hub server port. Change if already occupied. | `8898` |
|
||||
| `tap.proxy.worker.srvPort` | Worker server port. Change if already occupied.| `48999` |
|
||||
| `tap.proxy.front.port` | Front service port. Change if already occupied.| `8899` |
|
||||
| `tap.proxy.host` | Change to 0.0.0.0 top open up to the world. | `127.0.0.1` |
|
||||
| `tap.regex` | Target (process traffic from) pods that match regex | `.*` |
|
||||
| `tap.namespaces` | Target pods in namespaces | `[]` |
|
||||
| `tap.excludedNamespaces` | Exclude pods in namespaces | `[]` |
|
||||
| `tap.bpfOverride` | When using AF_PACKET as a traffic capture backend, override any existing pod targeting rules and set explicit BPF expression (e.g. `net 0.0.0.0/0`). | `[]` |
|
||||
| `tap.capture.dissection.enabled` | Set to `true` to have L7 protocol dissection start automatically. When set to `false`, dissection is disabled by default. This property can be dynamically controlled via the dashboard. | `true` |
|
||||
| `tap.capture.dissection.stopAfter` | Set to a duration (e.g. `30s`) to have L7 dissection stop after no activity. | `5m` |
|
||||
| `tap.capture.raw.enabled` | Enable raw capture of packets and syscalls to disk for offline analysis | `true` |
|
||||
| `tap.capture.raw.storageSize` | Maximum storage size for raw capture files (supports K8s quantity format: `1Gi`, `500Mi`, etc.) | `1Gi` |
|
||||
| `tap.capture.captureSelf` | Include Kubeshark's own traffic in capture | `false` |
|
||||
| `tap.capture.dbMaxSize` | Maximum size for capture database (e.g., `4Gi`, `2000Mi`). | `500Mi` |
|
||||
| `tap.snapshots.local.storageClass` | Storage class for local snapshots volume. When empty, uses `emptyDir`. When set, creates a PVC with this storage class | `""` |
|
||||
| `tap.snapshots.local.storageSize` | Storage size for local snapshots volume (supports K8s quantity format: `1Gi`, `500Mi`, etc.) | `20Gi` |
|
||||
| `tap.snapshots.cloud.provider` | Cloud storage provider for snapshots: `s3`, `azblob`, or `gcs`. Empty string disables cloud storage. See [Cloud Storage docs](docs/snapshots_cloud_storage.md). | `""` |
|
||||
| `tap.snapshots.cloud.prefix` | Key prefix in the bucket/container (e.g. `snapshots/`). See [Cloud Storage docs](docs/snapshots_cloud_storage.md). | `""` |
|
||||
| `tap.snapshots.cloud.configMaps` | Names of pre-existing ConfigMaps with cloud storage env vars. Alternative to inline `s3`/`azblob`/`gcs` values below. See [Cloud Storage docs](docs/snapshots_cloud_storage.md). | `[]` |
|
||||
| `tap.snapshots.cloud.secrets` | Names of pre-existing Secrets with cloud storage credentials. Alternative to inline `s3`/`azblob`/`gcs` values below. See [Cloud Storage docs](docs/snapshots_cloud_storage.md). | `[]` |
|
||||
| `tap.snapshots.cloud.s3.bucket` | S3 bucket name. When set, the chart auto-creates a ConfigMap with `SNAPSHOT_AWS_BUCKET`. | `""` |
|
||||
| `tap.snapshots.cloud.s3.region` | AWS region for the S3 bucket. | `""` |
|
||||
| `tap.snapshots.cloud.s3.accessKey` | AWS access key ID. When set, the chart auto-creates a Secret with `SNAPSHOT_AWS_ACCESS_KEY`. | `""` |
|
||||
| `tap.snapshots.cloud.s3.secretKey` | AWS secret access key. When set, the chart auto-creates a Secret with `SNAPSHOT_AWS_SECRET_KEY`. | `""` |
|
||||
| `tap.snapshots.cloud.s3.roleArn` | IAM role ARN to assume via STS for cross-account S3 access. | `""` |
|
||||
| `tap.snapshots.cloud.s3.externalId` | External ID for the STS AssumeRole call. | `""` |
|
||||
| `tap.snapshots.cloud.azblob.storageAccount` | Azure storage account name. When set, the chart auto-creates a ConfigMap with `SNAPSHOT_AZBLOB_STORAGE_ACCOUNT`. | `""` |
|
||||
| `tap.snapshots.cloud.azblob.container` | Azure blob container name. | `""` |
|
||||
| `tap.snapshots.cloud.azblob.storageKey` | Azure storage account access key. When set, the chart auto-creates a Secret with `SNAPSHOT_AZBLOB_STORAGE_KEY`. | `""` |
|
||||
| `tap.snapshots.cloud.gcs.bucket` | GCS bucket name. When set, the chart auto-creates a ConfigMap with `SNAPSHOT_GCS_BUCKET`. | `""` |
|
||||
| `tap.snapshots.cloud.gcs.project` | GCP project ID. | `""` |
|
||||
| `tap.snapshots.cloud.gcs.credentialsJson` | Service account JSON key. When set, the chart auto-creates a Secret with `SNAPSHOT_GCS_CREDENTIALS_JSON`. | `""` |
|
||||
| `tap.delayedDissection.cpu` | CPU allocation for delayed dissection jobs | `1` |
|
||||
| `tap.delayedDissection.memory` | Memory allocation for delayed dissection jobs | `4Gi` |
|
||||
| `tap.release.repo` | URL of the Helm chart repository | `https://helm.kubeshark.com` |
|
||||
| `tap.release.name` | Helm release name | `kubeshark` |
|
||||
| `tap.release.namespace` | Helm release namespace | `default` |
|
||||
| `tap.persistentStorage` | Use `persistentVolumeClaim` instead of `emptyDir` | `false` |
|
||||
| `tap.persistentStorageStatic` | Use static persistent volume provisioning (explicitly defined `PersistentVolume` ) | `false` |
|
||||
| `tap.persistentStoragePvcVolumeMode` | Set the pvc volume mode (Filesystem\|Block) | `Filesystem` |
|
||||
| `tap.efsFileSytemIdAndPath` | [EFS file system ID and, optionally, subpath and/or access point](https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/examples/kubernetes/access_points/README.md) `<FileSystemId>:<Path>:<AccessPointId>` | "" |
|
||||
| `tap.storageLimit` | Limit of either the `emptyDir` or `persistentVolumeClaim` | `10Gi` |
|
||||
| `tap.storageClass` | Storage class of the `PersistentVolumeClaim` | `standard` |
|
||||
| `tap.dryRun` | Preview of all pods matching the regex, without tapping them | `false` |
|
||||
| `tap.dns.nameservers` | Nameservers to use for DNS resolution | `[]` |
|
||||
| `tap.dns.searches` | Search domains to use for DNS resolution | `[]` |
|
||||
| `tap.dns.options` | DNS options to use for DNS resolution | `[]` |
|
||||
| `tap.resources.hub.limits.cpu` | CPU limit for hub | `""` (no limit) |
|
||||
| `tap.resources.hub.limits.memory` | Memory limit for hub | `5Gi` |
|
||||
| `tap.resources.hub.requests.cpu` | CPU request for hub | `50m` |
|
||||
| `tap.resources.hub.requests.memory` | Memory request for hub | `50Mi` |
|
||||
| `tap.resources.sniffer.limits.cpu` | CPU limit for sniffer | `""` (no limit) |
|
||||
| `tap.resources.sniffer.limits.memory` | Memory limit for sniffer | `5Gi` |
|
||||
| `tap.resources.sniffer.requests.cpu` | CPU request for sniffer | `50m` |
|
||||
| `tap.resources.sniffer.requests.memory` | Memory request for sniffer | `50Mi` |
|
||||
| `tap.resources.tracer.limits.cpu` | CPU limit for tracer | `""` (no limit) |
|
||||
| `tap.resources.tracer.limits.memory` | Memory limit for tracer | `5Gi` |
|
||||
| `tap.resources.tracer.requests.cpu` | CPU request for tracer | `50m` |
|
||||
| `tap.resources.tracer.requests.memory` | Memory request for tracer | `50Mi` |
|
||||
| `tap.probes.hub.initialDelaySeconds` | Initial delay before probing the hub | `5` |
|
||||
| `tap.probes.hub.periodSeconds` | Period between probes for the hub | `5` |
|
||||
| `tap.probes.hub.successThreshold` | Number of successful probes before considering the hub healthy | `1` |
|
||||
| `tap.probes.hub.failureThreshold` | Number of failed probes before considering the hub unhealthy | `3` |
|
||||
| `tap.probes.sniffer.initialDelaySeconds` | Initial delay before probing the sniffer | `5` |
|
||||
| `tap.probes.sniffer.periodSeconds` | Period between probes for the sniffer | `5` |
|
||||
| `tap.probes.sniffer.successThreshold` | Number of successful probes before considering the sniffer healthy | `1` |
|
||||
| `tap.probes.sniffer.failureThreshold` | Number of failed probes before considering the sniffer unhealthy | `3` |
|
||||
| `tap.serviceMesh` | Capture traffic from service meshes like Istio, Linkerd, Consul, etc. | `true` |
|
||||
| `tap.tls` | Capture the encrypted/TLS traffic from cryptography libraries like OpenSSL | `true` |
|
||||
| `tap.disableTlsLog` | Suppress logging for TLS/eBPF | `true` |
|
||||
| `tap.labels` | Kubernetes labels to apply to all Kubeshark resources | `{}` |
|
||||
| `tap.annotations` | Kubernetes annotations to apply to all Kubeshark resources | `{}` |
|
||||
| `tap.nodeSelectorTerms.workers` | Node selector terms for workers components | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
|
||||
| `tap.nodeSelectorTerms.hub` | Node selector terms for hub component | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
|
||||
| `tap.nodeSelectorTerms.front` | Node selector terms for front-end component | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` |
|
||||
| `tap.priorityClass` | Priority class name for Kubeshark components | `""` |
|
||||
| `tap.tolerations.workers` | Tolerations for workers components | `[ {"operator": "Exists", "effect": "NoExecute"}` |
|
||||
| `tap.tolerations.hub` | Tolerations for hub component | `[]` |
|
||||
| `tap.tolerations.front` | Tolerations for front-end component | `[]` |
|
||||
| `tap.auth.enabled` | Enable authentication | `false` |
|
||||
| `tap.auth.type` | Authentication type (1 option available: `saml`) | `saml` |
|
||||
| `tap.auth.approvedEmails` | List of approved email addresses for authentication | `[]` |
|
||||
| `tap.auth.approvedDomains` | List of approved email domains for authentication | `[]` |
|
||||
| `tap.auth.saml.idpMetadataUrl` | SAML IDP metadata URL <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.x509crt` | A self-signed X.509 `.cert` contents <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.x509key` | A self-signed X.509 `.key` contents <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.roleAttribute` | A SAML attribute name corresponding to user's authorization role <br/>(effective, if `tap.auth.type = saml`) | `role` |
|
||||
| `tap.auth.saml.roles` | A list of SAML authorization roles and their permissions <br/>(effective, if `tap.auth.type = saml`) | `{"admin":{"canDownloadPCAP":true,"canUpdateTargetedPods":true,"canUseScripting":true, "scriptingPermissions":{"canSave":true, "canActivate":true, "canDelete":true}, "canStopTrafficCapturing":true, "canControlDissection":true, "filter":"","showAdminConsoleLink":true}}` |
|
||||
| `tap.ingress.enabled` | Enable `Ingress` | `false` |
|
||||
| `tap.ingress.className` | Ingress class name | `""` |
|
||||
| `tap.ingress.host` | Host of the `Ingress` | `ks.svc.cluster.local` |
|
||||
| `tap.ingress.tls` | `Ingress` TLS configuration | `[]` |
|
||||
| `tap.ingress.annotations` | `Ingress` annotations | `{}` |
|
||||
| `tap.routing.front.basePath` | Set this value to serve `front` under specific base path. Example: `/custompath` (forward slash must be present) | `""` |
|
||||
| `tap.ipv6` | Enable IPv6 support for the front-end | `true` |
|
||||
| `tap.debug` | Enable debug mode | `false` |
|
||||
| `tap.telemetry.enabled` | Enable anonymous usage statistics collection | `true` |
|
||||
| `tap.resourceGuard.enabled` | Enable resource guard worker process, which watches RAM/disk usage and enables/disables traffic capture based on available resources | `false` |
|
||||
| `tap.secrets` | List of secrets to be used as source for environment variables (e.g. `kubeshark-license`) | `[]` |
|
||||
| `tap.sentry.enabled` | Enable sending of error logs to Sentry | `false` |
|
||||
| `tap.sentry.environment` | Sentry environment to label error logs with | `production` |
|
||||
| `tap.defaultFilter` | Sets the default dashboard KFL filter (e.g. `http`). By default, this value is set to filter out noisy protocols such as DNS, UDP, ICMP and TCP. The user can easily change this, **temporarily**, in the Dashboard. For a permanent change, you should change this value in the `values.yaml` or `config.yaml` file. | `""` |
|
||||
| `tap.globalFilter` | Prepends to any KFL filter and can be used to limit what is visible in the dashboard. For example, `redact("request.headers.Authorization")` will redact the appropriate field. Another example `!dns` will not show any DNS traffic. | `""` |
|
||||
| `tap.metrics.port` | Pod port used to expose Prometheus metrics | `49100` |
|
||||
| `tap.enabledDissectors` | This is an array of strings representing the list of supported protocols. Remove or comment out redundant protocols (e.g., dns).| The default list excludes: `udp` and `tcp` |
|
||||
| `tap.mountBpf` | BPF filesystem needs to be mounted for eBPF to work properly. This helm value determines whether Kubeshark will attempt to mount the filesystem. This option is not required if filesystem is already mounts. │ `true`|
|
||||
| `tap.gitops.enabled` | Enable GitOps functionality. This will allow you to use GitOps to manage your Kubeshark configuration. | `false` |
|
||||
| `logs.file` | Logs dump path | `""` |
|
||||
| `pcapdump.enabled` | Enable recording of all traffic captured according to other parameters. Whatever Kubeshark captures, considering pod targeting rules, will be stored in pcap files ready to be viewed by tools | `true` |
|
||||
| `pcapdump.maxTime` | The time window into the past that will be stored. Older traffic will be discarded. | `2h` |
|
||||
| `pcapdump.maxSize` | The maximum storage size the PCAP files will consume. Old files that cause to surpass storage consumption will get discarded. | `500MB` |
|
||||
| `kube.configPath` | Path to the `kubeconfig` file (`$HOME/.kube/config`) | `""` |
|
||||
| `kube.context` | Kubernetes context to use for the deployment | `""` |
|
||||
| `dumpLogs` | Enable dumping of logs | `false` |
|
||||
| `headless` | Enable running in headless mode | `false` |
|
||||
| `license` | License key for the Pro/Enterprise edition | `""` |
|
||||
| `scripting.env` | Environment variables for the scripting | `{}` |
|
||||
| `scripting.source` | Source directory of the scripts | `""` |
|
||||
| `scripting.watchScripts` | Enable watch mode for the scripts in source directory | `true` |
|
||||
| `timezone` | IANA time zone applied to time shown in the front-end | `""` (local time zone applies) |
|
||||
| `supportChatEnabled` | Enable real-time support chat channel based on Intercom | `false` |
|
||||
| `internetConnectivity` | Turns off API requests that are dependant on Internet connectivity such as `telemetry` and `online-support`. | `true` |
|
||||
|
||||
KernelMapping pairs kernel versions with a
|
||||
DriverContainer image. Kernel versions can be matched
|
||||
literally or using a regular expression
|
||||
| `tap.hostNetwork` | Enable host network mode for worker DaemonSet pods. When enabled, worker pods use the host's network namespace for direct network access. | `true` |
|
||||
| `tap.packetCapture` | Packet capture backend: `best`, `af_packet`, or `pf_ring` | `best` |
|
||||
| `tap.misc.trafficSampleRate` | Percentage of traffic to process (0-100) | `100` |
|
||||
| `tap.misc.tcpStreamChannelTimeoutMs` | Timeout in milliseconds for TCP stream channel | `10000` |
|
||||
| `tap.gitops.enabled` | Enable GitOps functionality. This will allow you to use GitOps to manage your Kubeshark configuration. | `false` |
|
||||
| `tap.misc.tcpFlowTimeout` | TCP flow aggregation timeout in seconds. Controls how long the worker waits before finalizing a TCP flow. | `1200` |
|
||||
| `tap.misc.udpFlowTimeout` | UDP flow aggregation timeout in seconds. Controls how long the worker waits before finalizing a UDP flow. | `1200` |
|
||||
| `logs.file` | Logs dump path | `""` |
|
||||
| `pcapdump.enabled` | Enable recording of all traffic captured according to other parameters. Whatever Kubeshark captures, considering pod targeting rules, will be stored in pcap files ready to be viewed by tools | `false` |
|
||||
| `pcapdump.maxTime` | The time window into the past that will be stored. Older traffic will be discarded. | `2h` |
|
||||
| `pcapdump.maxSize` | The maximum storage size the PCAP files will consume. Old files that cause to surpass storage consumption will get discarded. | `500MB` |
|
||||
| `kube.configPath` | Path to the `kubeconfig` file (`$HOME/.kube/config`) | `""` |
|
||||
| `kube.context` | Kubernetes context to use for the deployment | `""` |
|
||||
| `dumpLogs` | Enable dumping of logs | `false` |
|
||||
| `headless` | Enable running in headless mode | `false` |
|
||||
| `license` | License key for the Pro/Enterprise edition | `""` |
|
||||
| `scripting.enabled` | Enables scripting | `false` |
|
||||
| `scripting.env` | Environment variables for the scripting | `{}` |
|
||||
| `scripting.source` | Source directory of the scripts | `""` |
|
||||
| `scripting.watchScripts` | Enable watch mode for the scripts in source directory | `true` |
|
||||
| `timezone` | IANA time zone applied to time shown in the front-end | `""` (local time zone applies) |
|
||||
| `supportChatEnabled` | Enable real-time support chat channel based on Intercom | `false` |
|
||||
| `internetConnectivity` | Turns off API requests that are dependent on Internet connectivity such as `telemetry` and `online-support`. | `true` |
|
||||
|
||||
# Installing with SAML enabled
|
||||
|
||||
@@ -300,9 +328,9 @@ tap:
|
||||
|
||||
# Installing with Dex OIDC authentication
|
||||
|
||||
[**Click here to see full docs**](https://docs.kubeshark.co/en/saml#installing-with-oidc-enabled-dex-idp).
|
||||
[**Click here to see full docs**](https://docs.kubeshark.com/en/saml#installing-with-oidc-enabled-dex-idp).
|
||||
|
||||
Choose this option, if **you already have a running instance** of Dex in your cluster &
|
||||
Choose this option, if **you already have a running instance** of Dex in your cluster &
|
||||
you want to set up Dex OIDC authentication for Kubeshark users.
|
||||
|
||||
Kubeshark supports authentication using [Dex - A Federated OpenID Connect Provider](https://dexidp.io/).
|
||||
@@ -344,7 +372,7 @@ Add these helm values to set up OIDC authentication powered by your Dex IdP:
|
||||
```yaml
|
||||
# values.yaml
|
||||
|
||||
tap:
|
||||
tap:
|
||||
auth:
|
||||
enabled: true
|
||||
type: dex
|
||||
@@ -374,7 +402,7 @@ Once you run `helm install kubeshark kubeshark/kubeshark -f ./values.yaml`, Kube
|
||||
|
||||
# Installing your own Dex IdP along with Kubeshark
|
||||
|
||||
Choose this option, if **you need to deploy an instance of Dex IdP** along with Kubeshark &
|
||||
Choose this option, if **you need to deploy an instance of Dex IdP** along with Kubeshark &
|
||||
set up Dex OIDC authentication for Kubeshark users.
|
||||
|
||||
Depending on Ingress enabled/disabled, your Dex configuration might differ.
|
||||
@@ -410,10 +438,10 @@ The following Dex settings will have these values:
|
||||
|
||||
Please, make sure to prepare the following things first.
|
||||
|
||||
1. Choose **[Connectors](https://dexidp.io/docs/connectors/)** to enable in Dex IdP.
|
||||
1. Choose **[Connectors](https://dexidp.io/docs/connectors/)** to enable in Dex IdP.
|
||||
- i.e. how many kind of "Log in with ..." options you'd like to offer your users
|
||||
- You will need to specify connectors in `tap.auth.dexConfig.connectors`
|
||||
2. Choose type of **[Storage](https://dexidp.io/docs/configuration/storage/)** to use in Dex IdP.
|
||||
2. Choose type of **[Storage](https://dexidp.io/docs/configuration/storage/)** to use in Dex IdP.
|
||||
- You will need to specify storage settings in `tap.auth.dexConfig.storage`
|
||||
- default: `memory`
|
||||
3. Decide on the OAuth2 `?state=` param expiration time:
|
||||
@@ -445,28 +473,28 @@ Make sure to:
|
||||
|
||||
Helm `values.yaml`:
|
||||
```yaml
|
||||
tap:
|
||||
tap:
|
||||
auth:
|
||||
enabled: true
|
||||
type: dex
|
||||
dexOidc:
|
||||
issuer: https://<your-ingress-hostname>/dex
|
||||
|
||||
|
||||
# Client ID/secret must be taken from `tap.auth.dexConfig.staticClients -> id/secret`
|
||||
clientId: kubeshark
|
||||
clientSecret: create your own client password
|
||||
|
||||
|
||||
refreshTokenLifetime: "3960h" # 165 days
|
||||
oauth2StateParamExpiry: "10m"
|
||||
bypassSslCaCheck: false
|
||||
dexConfig:
|
||||
# This field is REQUIRED!
|
||||
#
|
||||
#
|
||||
# The base path of Dex and the external name of the OpenID Connect service.
|
||||
# This is the canonical URL that all clients MUST use to refer to Dex. If a
|
||||
# path is provided, Dex's HTTP service will listen at a non-root URL.
|
||||
issuer: https://<your-ingress-hostname>/dex
|
||||
|
||||
|
||||
# Expiration configuration for tokens, signing keys, etc.
|
||||
expiry:
|
||||
refreshTokens:
|
||||
@@ -474,15 +502,15 @@ tap:
|
||||
absoluteLifetime: "3960h" # 165 days
|
||||
|
||||
# This field is REQUIRED!
|
||||
#
|
||||
#
|
||||
# The storage configuration determines where Dex stores its state.
|
||||
# See the documentation (https://dexidp.io/docs/storage/) for further information.
|
||||
storage:
|
||||
type: memory
|
||||
|
||||
# This field is REQUIRED!
|
||||
#
|
||||
# Attention:
|
||||
#
|
||||
# Attention:
|
||||
# Do not change this field and its values.
|
||||
# This field is required for internal Kubeshark-to-Dex communication.
|
||||
#
|
||||
@@ -492,7 +520,7 @@ tap:
|
||||
|
||||
# This field is REQUIRED!
|
||||
#
|
||||
# Attention:
|
||||
# Attention:
|
||||
# Do not change this field and its values.
|
||||
# This field is required for internal Kubeshark-to-Dex communication.
|
||||
#
|
||||
@@ -518,10 +546,10 @@ tap:
|
||||
# Connectors are used to authenticate users against upstream identity providers.
|
||||
# See the documentation (https://dexidp.io/docs/connectors/) for further information.
|
||||
#
|
||||
# Attention:
|
||||
# When you define a new connector, `config.redirectURI` must be:
|
||||
# Attention:
|
||||
# When you define a new connector, `config.redirectURI` must be:
|
||||
# https://<your-ingress-hostname>/dex/callback
|
||||
#
|
||||
#
|
||||
# Example with Google connector:
|
||||
# connectors:
|
||||
# - type: google
|
||||
|
||||
583
helm-chart/docs/snapshots_cloud_storage.md
Normal file
583
helm-chart/docs/snapshots_cloud_storage.md
Normal file
@@ -0,0 +1,583 @@
|
||||
# Cloud Storage for Snapshots
|
||||
|
||||
Kubeshark can upload and download snapshots to cloud object storage, enabling cross-cluster sharing, backup/restore, and long-term retention.
|
||||
|
||||
Supported providers: **Amazon S3** (`s3`), **Azure Blob Storage** (`azblob`), and **Google Cloud Storage** (`gcs`).
|
||||
|
||||
## Helm Values
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "" # "s3", "azblob", or "gcs" (empty = disabled)
|
||||
prefix: "" # key prefix in the bucket/container (e.g. "snapshots/")
|
||||
configMaps: [] # names of pre-existing ConfigMaps with cloud config env vars
|
||||
secrets: [] # names of pre-existing Secrets with cloud credentials
|
||||
s3:
|
||||
bucket: ""
|
||||
region: ""
|
||||
accessKey: ""
|
||||
secretKey: ""
|
||||
roleArn: ""
|
||||
externalId: ""
|
||||
azblob:
|
||||
storageAccount: ""
|
||||
container: ""
|
||||
storageKey: ""
|
||||
gcs:
|
||||
bucket: ""
|
||||
project: ""
|
||||
credentialsJson: ""
|
||||
```
|
||||
|
||||
- `provider` selects which cloud backend to use. Leave empty to disable cloud storage.
|
||||
- `configMaps` and `secrets` are lists of names of existing ConfigMap/Secret resources. They are mounted as `envFrom` on the hub pod, injecting all their keys as environment variables.
|
||||
|
||||
### Inline Values (Alternative to External ConfigMaps/Secrets)
|
||||
|
||||
Instead of creating ConfigMap and Secret resources manually, you can set cloud storage configuration directly in `values.yaml` or via `--set` flags. The Helm chart will automatically create the necessary ConfigMap and Secret resources.
|
||||
|
||||
Both approaches can be used together — inline values are additive to external `configMaps`/`secrets` references.
|
||||
|
||||
---
|
||||
|
||||
## Amazon S3
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Required | Description |
|
||||
|----------|----------|-------------|
|
||||
| `SNAPSHOT_AWS_BUCKET` | Yes | S3 bucket name |
|
||||
| `SNAPSHOT_AWS_REGION` | No | AWS region (uses SDK default if empty) |
|
||||
| `SNAPSHOT_AWS_ACCESS_KEY` | No | Static access key ID (empty = use default credential chain) |
|
||||
| `SNAPSHOT_AWS_SECRET_KEY` | No | Static secret access key |
|
||||
| `SNAPSHOT_AWS_ROLE_ARN` | No | IAM role ARN to assume via STS (for cross-account access) |
|
||||
| `SNAPSHOT_AWS_EXTERNAL_ID` | No | External ID for the STS AssumeRole call |
|
||||
| `SNAPSHOT_CLOUD_PREFIX` | No | Key prefix in the bucket (e.g. `snapshots/`) |
|
||||
|
||||
### Authentication Methods
|
||||
|
||||
Credentials are resolved in this order:
|
||||
|
||||
1. **Static credentials** -- If `SNAPSHOT_AWS_ACCESS_KEY` is set, static credentials are used directly.
|
||||
2. **STS AssumeRole** -- If `SNAPSHOT_AWS_ROLE_ARN` is also set, the static (or default) credentials are used to assume the given IAM role. This is useful for cross-account S3 access.
|
||||
3. **AWS default credential chain** -- When no static credentials are provided, the SDK default chain is used:
|
||||
- **IRSA** (EKS service account token) -- recommended for production on EKS
|
||||
- EC2 instance profile
|
||||
- Standard AWS environment variables (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, etc.)
|
||||
- Shared credentials file (`~/.aws/credentials`)
|
||||
|
||||
The provider validates bucket access on startup via `HeadBucket`. If the bucket is inaccessible, the hub will fail to start.
|
||||
|
||||
### Example: Inline Values (simplest approach)
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "s3"
|
||||
s3:
|
||||
bucket: my-kubeshark-snapshots
|
||||
region: us-east-1
|
||||
```
|
||||
|
||||
Or with static credentials via `--set`:
|
||||
|
||||
```bash
|
||||
helm install kubeshark kubeshark/kubeshark \
|
||||
--set tap.snapshots.cloud.provider=s3 \
|
||||
--set tap.snapshots.cloud.s3.bucket=my-kubeshark-snapshots \
|
||||
--set tap.snapshots.cloud.s3.region=us-east-1 \
|
||||
--set tap.snapshots.cloud.s3.accessKey=AKIA... \
|
||||
--set tap.snapshots.cloud.s3.secretKey=wJal...
|
||||
```
|
||||
|
||||
### Example: IRSA (recommended for EKS)
|
||||
|
||||
[IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) lets EKS pods assume an IAM role without static credentials. EKS injects a short-lived token into the pod automatically.
|
||||
|
||||
**Prerequisites:**
|
||||
|
||||
1. Your EKS cluster must have an [OIDC provider](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html) associated with it.
|
||||
2. An IAM role with a trust policy that allows the Kubeshark service account to assume it.
|
||||
|
||||
**Step 1 — Create an IAM policy scoped to your bucket:**
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:GetObject",
|
||||
"s3:PutObject",
|
||||
"s3:DeleteObject",
|
||||
"s3:GetObjectVersion",
|
||||
"s3:DeleteObjectVersion",
|
||||
"s3:ListBucket",
|
||||
"s3:ListBucketVersions",
|
||||
"s3:GetBucketLocation",
|
||||
"s3:GetBucketVersioning"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::my-kubeshark-snapshots",
|
||||
"arn:aws:s3:::my-kubeshark-snapshots/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
> For read-only access, remove `s3:PutObject`, `s3:DeleteObject`, and `s3:DeleteObjectVersion`.
|
||||
|
||||
**Step 2 — Create an IAM role with IRSA trust policy:**
|
||||
|
||||
```bash
|
||||
# Get your cluster's OIDC provider URL
|
||||
OIDC_PROVIDER=$(aws eks describe-cluster --name CLUSTER_NAME \
|
||||
--query "cluster.identity.oidc.issuer" --output text | sed 's|https://||')
|
||||
|
||||
# Create a trust policy
|
||||
# The default K8s SA name is "<release-name>-service-account" (e.g. "kubeshark-service-account")
|
||||
cat > trust-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"Federated": "arn:aws:iam::ACCOUNT_ID:oidc-provider/${OIDC_PROVIDER}"
|
||||
},
|
||||
"Action": "sts:AssumeRoleWithWebIdentity",
|
||||
"Condition": {
|
||||
"StringEquals": {
|
||||
"${OIDC_PROVIDER}:sub": "system:serviceaccount:NAMESPACE:kubeshark-service-account",
|
||||
"${OIDC_PROVIDER}:aud": "sts.amazonaws.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create the role and attach your policy
|
||||
aws iam create-role \
|
||||
--role-name KubesharkS3Role \
|
||||
--assume-role-policy-document file://trust-policy.json
|
||||
|
||||
aws iam put-role-policy \
|
||||
--role-name KubesharkS3Role \
|
||||
--policy-name KubesharkSnapshotsBucketAccess \
|
||||
--policy-document file://bucket-policy.json
|
||||
```
|
||||
|
||||
**Step 3 — Create a ConfigMap with bucket configuration:**
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-s3-config
|
||||
data:
|
||||
SNAPSHOT_AWS_BUCKET: my-kubeshark-snapshots
|
||||
SNAPSHOT_AWS_REGION: us-east-1
|
||||
```
|
||||
|
||||
**Step 4 — Set Helm values with `tap.annotations` to annotate the service account:**
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
annotations:
|
||||
eks.amazonaws.com/role-arn: arn:aws:iam::ACCOUNT_ID:role/KubesharkS3Role
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "s3"
|
||||
configMaps:
|
||||
- kubeshark-s3-config
|
||||
```
|
||||
|
||||
Or via `--set`:
|
||||
|
||||
```bash
|
||||
helm install kubeshark kubeshark/kubeshark \
|
||||
--set tap.snapshots.cloud.provider=s3 \
|
||||
--set tap.snapshots.cloud.s3.bucket=my-kubeshark-snapshots \
|
||||
--set tap.snapshots.cloud.s3.region=us-east-1 \
|
||||
--set tap.annotations."eks\.amazonaws\.com/role-arn"=arn:aws:iam::ACCOUNT_ID:role/KubesharkS3Role
|
||||
```
|
||||
|
||||
No `accessKey`/`secretKey` is needed — EKS injects credentials automatically via the IRSA token.
|
||||
|
||||
### Example: Static Credentials
|
||||
|
||||
Create a Secret with credentials:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: kubeshark-s3-creds
|
||||
type: Opaque
|
||||
stringData:
|
||||
SNAPSHOT_AWS_ACCESS_KEY: AKIA...
|
||||
SNAPSHOT_AWS_SECRET_KEY: wJal...
|
||||
```
|
||||
|
||||
Create a ConfigMap with bucket configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-s3-config
|
||||
data:
|
||||
SNAPSHOT_AWS_BUCKET: my-kubeshark-snapshots
|
||||
SNAPSHOT_AWS_REGION: us-east-1
|
||||
```
|
||||
|
||||
Set Helm values:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "s3"
|
||||
configMaps:
|
||||
- kubeshark-s3-config
|
||||
secrets:
|
||||
- kubeshark-s3-creds
|
||||
```
|
||||
|
||||
### Example: Cross-Account Access via AssumeRole
|
||||
|
||||
Add the role ARN to your ConfigMap:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-s3-config
|
||||
data:
|
||||
SNAPSHOT_AWS_BUCKET: other-account-bucket
|
||||
SNAPSHOT_AWS_REGION: eu-west-1
|
||||
SNAPSHOT_AWS_ROLE_ARN: arn:aws:iam::123456789012:role/KubesharkCrossAccountRole
|
||||
SNAPSHOT_AWS_EXTERNAL_ID: my-external-id # optional, if required by the trust policy
|
||||
```
|
||||
|
||||
The hub will first authenticate using its own credentials (IRSA, static, or default chain), then assume the specified role to access the bucket.
|
||||
|
||||
---
|
||||
|
||||
## Azure Blob Storage
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Required | Description |
|
||||
|----------|----------|-------------|
|
||||
| `SNAPSHOT_AZBLOB_STORAGE_ACCOUNT` | Yes | Azure storage account name |
|
||||
| `SNAPSHOT_AZBLOB_CONTAINER` | Yes | Blob container name |
|
||||
| `SNAPSHOT_AZBLOB_STORAGE_KEY` | No | Storage account access key (empty = use DefaultAzureCredential) |
|
||||
| `SNAPSHOT_CLOUD_PREFIX` | No | Key prefix in the container (e.g. `snapshots/`) |
|
||||
|
||||
### Authentication Methods
|
||||
|
||||
Credentials are resolved in this order:
|
||||
|
||||
1. **Shared Key** -- If `SNAPSHOT_AZBLOB_STORAGE_KEY` is set, the storage account key is used directly.
|
||||
2. **DefaultAzureCredential** -- When no storage key is provided, the Azure SDK default credential chain is used:
|
||||
- **Workload Identity** (AKS pod identity) -- recommended for production on AKS
|
||||
- Managed Identity (system or user-assigned)
|
||||
- Azure CLI credentials
|
||||
- Environment variables (`AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, `AZURE_CLIENT_SECRET`)
|
||||
|
||||
The provider validates container access on startup via `GetProperties`. If the container is inaccessible, the hub will fail to start.
|
||||
|
||||
### Example: Inline Values
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "azblob"
|
||||
azblob:
|
||||
storageAccount: mykubesharksa
|
||||
container: snapshots
|
||||
storageKey: "base64-encoded-storage-key..." # optional, omit for DefaultAzureCredential
|
||||
```
|
||||
|
||||
### Example: Workload Identity (recommended for AKS)
|
||||
|
||||
Create a ConfigMap with storage configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-azblob-config
|
||||
data:
|
||||
SNAPSHOT_AZBLOB_STORAGE_ACCOUNT: mykubesharksa
|
||||
SNAPSHOT_AZBLOB_CONTAINER: snapshots
|
||||
```
|
||||
|
||||
Set Helm values:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "azblob"
|
||||
configMaps:
|
||||
- kubeshark-azblob-config
|
||||
```
|
||||
|
||||
The hub pod's service account must be configured for AKS Workload Identity with a managed identity that has the **Storage Blob Data Contributor** role on the container.
|
||||
|
||||
### Example: Storage Account Key
|
||||
|
||||
Create a Secret with the storage key:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: kubeshark-azblob-creds
|
||||
type: Opaque
|
||||
stringData:
|
||||
SNAPSHOT_AZBLOB_STORAGE_KEY: "base64-encoded-storage-key..."
|
||||
```
|
||||
|
||||
Create a ConfigMap with storage configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-azblob-config
|
||||
data:
|
||||
SNAPSHOT_AZBLOB_STORAGE_ACCOUNT: mykubesharksa
|
||||
SNAPSHOT_AZBLOB_CONTAINER: snapshots
|
||||
```
|
||||
|
||||
Set Helm values:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "azblob"
|
||||
configMaps:
|
||||
- kubeshark-azblob-config
|
||||
secrets:
|
||||
- kubeshark-azblob-creds
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Google Cloud Storage
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Required | Description |
|
||||
|----------|----------|-------------|
|
||||
| `SNAPSHOT_GCS_BUCKET` | Yes | GCS bucket name |
|
||||
| `SNAPSHOT_GCS_PROJECT` | No | GCP project ID |
|
||||
| `SNAPSHOT_GCS_CREDENTIALS_JSON` | No | Service account JSON key (empty = use Application Default Credentials) |
|
||||
| `SNAPSHOT_CLOUD_PREFIX` | No | Key prefix in the bucket (e.g. `snapshots/`) |
|
||||
|
||||
### Authentication Methods
|
||||
|
||||
Credentials are resolved in this order:
|
||||
|
||||
1. **Service Account JSON Key** -- If `SNAPSHOT_GCS_CREDENTIALS_JSON` is set, the provided JSON key is used directly.
|
||||
2. **Application Default Credentials** -- When no JSON key is provided, the GCP SDK default credential chain is used:
|
||||
- **Workload Identity** (GKE pod identity) -- recommended for production on GKE
|
||||
- GCE instance metadata (Compute Engine default service account)
|
||||
- Standard GCP environment variables (`GOOGLE_APPLICATION_CREDENTIALS`)
|
||||
- `gcloud` CLI credentials
|
||||
|
||||
The provider validates bucket access on startup via `Bucket.Attrs()`. If the bucket is inaccessible, the hub will fail to start.
|
||||
|
||||
### Required IAM Permissions
|
||||
|
||||
The service account needs different IAM roles depending on the access level:
|
||||
|
||||
**Read-only** (download, list, and sync snapshots from cloud):
|
||||
|
||||
| Role | Permissions provided | Purpose |
|
||||
|------|---------------------|---------|
|
||||
| `roles/storage.legacyBucketReader` | `storage.buckets.get`, `storage.objects.list` | Hub startup (bucket validation) + listing snapshots |
|
||||
| `roles/storage.objectViewer` | `storage.objects.get`, `storage.objects.list` | Downloading snapshots, checking existence, reading metadata |
|
||||
|
||||
```bash
|
||||
gcloud storage buckets add-iam-policy-binding gs://BUCKET_NAME \
|
||||
--member="serviceAccount:SA_EMAIL" \
|
||||
--role="roles/storage.legacyBucketReader"
|
||||
gcloud storage buckets add-iam-policy-binding gs://BUCKET_NAME \
|
||||
--member="serviceAccount:SA_EMAIL" \
|
||||
--role="roles/storage.objectViewer"
|
||||
```
|
||||
|
||||
**Read-write** (upload and delete snapshots in addition to read):
|
||||
|
||||
Add `roles/storage.objectAdmin` instead of `roles/storage.objectViewer` to also grant `storage.objects.create` and `storage.objects.delete`:
|
||||
|
||||
| Role | Permissions provided | Purpose |
|
||||
|------|---------------------|---------|
|
||||
| `roles/storage.legacyBucketReader` | `storage.buckets.get`, `storage.objects.list` | Hub startup (bucket validation) + listing snapshots |
|
||||
| `roles/storage.objectAdmin` | `storage.objects.*` | Full object CRUD (upload, download, delete, list, metadata) |
|
||||
|
||||
```bash
|
||||
gcloud storage buckets add-iam-policy-binding gs://BUCKET_NAME \
|
||||
--member="serviceAccount:SA_EMAIL" \
|
||||
--role="roles/storage.legacyBucketReader"
|
||||
gcloud storage buckets add-iam-policy-binding gs://BUCKET_NAME \
|
||||
--member="serviceAccount:SA_EMAIL" \
|
||||
--role="roles/storage.objectAdmin"
|
||||
```
|
||||
|
||||
### Example: Inline Values (simplest approach)
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "gcs"
|
||||
gcs:
|
||||
bucket: my-kubeshark-snapshots
|
||||
project: my-gcp-project
|
||||
```
|
||||
|
||||
Or with a service account key via `--set`:
|
||||
|
||||
```bash
|
||||
helm install kubeshark kubeshark/kubeshark \
|
||||
--set tap.snapshots.cloud.provider=gcs \
|
||||
--set tap.snapshots.cloud.gcs.bucket=my-kubeshark-snapshots \
|
||||
--set tap.snapshots.cloud.gcs.project=my-gcp-project \
|
||||
--set-file tap.snapshots.cloud.gcs.credentialsJson=service-account.json
|
||||
```
|
||||
|
||||
### Example: Workload Identity (recommended for GKE)
|
||||
|
||||
Create a ConfigMap with bucket configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-gcs-config
|
||||
data:
|
||||
SNAPSHOT_GCS_BUCKET: my-kubeshark-snapshots
|
||||
SNAPSHOT_GCS_PROJECT: my-gcp-project
|
||||
```
|
||||
|
||||
Set Helm values:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "gcs"
|
||||
configMaps:
|
||||
- kubeshark-gcs-config
|
||||
```
|
||||
|
||||
Configure GKE Workload Identity to allow the Kubernetes service account to impersonate the GCP service account:
|
||||
|
||||
```bash
|
||||
# Ensure the GKE cluster has Workload Identity enabled
|
||||
# (--workload-pool=PROJECT_ID.svc.id.goog at cluster creation)
|
||||
|
||||
# Create a GCP service account (if not already created)
|
||||
gcloud iam service-accounts create kubeshark-gcs \
|
||||
--display-name="Kubeshark GCS Snapshots"
|
||||
|
||||
# Grant bucket access (read-write — see Required IAM Permissions above)
|
||||
gcloud storage buckets add-iam-policy-binding gs://BUCKET_NAME \
|
||||
--member="serviceAccount:kubeshark-gcs@PROJECT_ID.iam.gserviceaccount.com" \
|
||||
--role="roles/storage.legacyBucketReader"
|
||||
gcloud storage buckets add-iam-policy-binding gs://BUCKET_NAME \
|
||||
--member="serviceAccount:kubeshark-gcs@PROJECT_ID.iam.gserviceaccount.com" \
|
||||
--role="roles/storage.objectAdmin"
|
||||
|
||||
# Allow the K8s service account to impersonate the GCP service account
|
||||
# Note: the K8s SA name is "<release-name>-service-account" (default: "kubeshark-service-account")
|
||||
gcloud iam service-accounts add-iam-policy-binding \
|
||||
kubeshark-gcs@PROJECT_ID.iam.gserviceaccount.com \
|
||||
--role="roles/iam.workloadIdentityUser" \
|
||||
--member="serviceAccount:PROJECT_ID.svc.id.goog[NAMESPACE/kubeshark-service-account]"
|
||||
```
|
||||
|
||||
Set Helm values — the `tap.annotations` field adds the Workload Identity annotation to the service account:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
annotations:
|
||||
iam.gke.io/gcp-service-account: kubeshark-gcs@PROJECT_ID.iam.gserviceaccount.com
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "gcs"
|
||||
configMaps:
|
||||
- kubeshark-gcs-config
|
||||
```
|
||||
|
||||
Or via `--set`:
|
||||
|
||||
```bash
|
||||
helm install kubeshark kubeshark/kubeshark \
|
||||
--set tap.snapshots.cloud.provider=gcs \
|
||||
--set tap.snapshots.cloud.gcs.bucket=BUCKET_NAME \
|
||||
--set tap.snapshots.cloud.gcs.project=PROJECT_ID \
|
||||
--set tap.annotations."iam\.gke\.io/gcp-service-account"=kubeshark-gcs@PROJECT_ID.iam.gserviceaccount.com
|
||||
```
|
||||
|
||||
No `credentialsJson` secret is needed — GKE injects credentials automatically via the Workload Identity metadata server.
|
||||
|
||||
### Example: Service Account Key
|
||||
|
||||
Create a Secret with the service account JSON key:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: kubeshark-gcs-creds
|
||||
type: Opaque
|
||||
stringData:
|
||||
SNAPSHOT_GCS_CREDENTIALS_JSON: |
|
||||
{
|
||||
"type": "service_account",
|
||||
"project_id": "my-gcp-project",
|
||||
"private_key_id": "...",
|
||||
"private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n",
|
||||
"client_email": "kubeshark@my-gcp-project.iam.gserviceaccount.com",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Create a ConfigMap with bucket configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-gcs-config
|
||||
data:
|
||||
SNAPSHOT_GCS_BUCKET: my-kubeshark-snapshots
|
||||
SNAPSHOT_GCS_PROJECT: my-gcp-project
|
||||
```
|
||||
|
||||
Set Helm values:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "gcs"
|
||||
configMaps:
|
||||
- kubeshark-gcs-config
|
||||
secrets:
|
||||
- kubeshark-gcs-creds
|
||||
```
|
||||
@@ -4,9 +4,15 @@ kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "kubeshark.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -4,8 +4,8 @@ kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-cluster-role-{{ .Release.Namespace }}
|
||||
@@ -85,4 +85,10 @@ rules:
|
||||
- pods/log
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- get
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- "*"
|
||||
@@ -4,8 +4,8 @@ kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-cluster-role-binding-{{ .Release.Namespace }}
|
||||
|
||||
@@ -3,10 +3,10 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "kubeshark.name" . }}-hub
|
||||
@@ -15,16 +15,19 @@ spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
|
||||
{{- if .Values.tap.priorityClass }}
|
||||
priorityClassName: {{ .Values.tap.priorityClass | quote }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: hub
|
||||
command:
|
||||
@@ -33,15 +36,59 @@ spec:
|
||||
- "8080"
|
||||
- -loglevel
|
||||
- '{{ .Values.logLevel | default "warning" }}'
|
||||
- -capture-stop-after
|
||||
- "{{ if hasKey .Values.tap.capture.dissection "stopAfter" }}{{ .Values.tap.capture.dissection.stopAfter }}{{ else }}5m{{ end }}"
|
||||
- -snapshot-size-limit
|
||||
- '{{ .Values.tap.snapshots.local.storageSize }}'
|
||||
- -dissector-image
|
||||
{{- if .Values.tap.docker.overrideImage.worker }}
|
||||
- '{{ .Values.tap.docker.overrideImage.worker }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.worker }}
|
||||
- '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}'
|
||||
{{- else }}
|
||||
- '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'
|
||||
{{- end }}
|
||||
{{- if .Values.tap.delayedDissection.cpu }}
|
||||
- -dissector-cpu
|
||||
- '{{ .Values.tap.delayedDissection.cpu }}'
|
||||
{{- end }}
|
||||
{{- if .Values.tap.delayedDissection.memory }}
|
||||
- -dissector-memory
|
||||
- '{{ .Values.tap.delayedDissection.memory }}'
|
||||
{{- end }}
|
||||
{{- if .Values.tap.gitops.enabled }}
|
||||
- -gitops
|
||||
{{- end }}
|
||||
{{- if .Values.tap.secrets }}
|
||||
- -cloud-api-url
|
||||
- '{{ .Values.cloudApiUrl }}'
|
||||
{{- if .Values.tap.snapshots.cloud.provider }}
|
||||
- -cloud-storage-provider
|
||||
- '{{ .Values.tap.snapshots.cloud.provider }}'
|
||||
{{- end }}
|
||||
{{- $hasInlineConfig := or .Values.tap.snapshots.cloud.prefix .Values.tap.snapshots.cloud.s3.bucket .Values.tap.snapshots.cloud.s3.region .Values.tap.snapshots.cloud.s3.roleArn .Values.tap.snapshots.cloud.s3.externalId .Values.tap.snapshots.cloud.azblob.storageAccount .Values.tap.snapshots.cloud.azblob.container .Values.tap.snapshots.cloud.gcs.bucket .Values.tap.snapshots.cloud.gcs.project }}
|
||||
{{- $hasInlineSecrets := or .Values.tap.snapshots.cloud.s3.accessKey .Values.tap.snapshots.cloud.s3.secretKey .Values.tap.snapshots.cloud.azblob.storageKey .Values.tap.snapshots.cloud.gcs.credentialsJson }}
|
||||
{{- if or .Values.tap.secrets .Values.tap.snapshots.cloud.configMaps .Values.tap.snapshots.cloud.secrets $hasInlineConfig $hasInlineSecrets }}
|
||||
envFrom:
|
||||
{{- range .Values.tap.secrets }}
|
||||
- secretRef:
|
||||
name: {{ . }}
|
||||
{{- end }}
|
||||
{{- range .Values.tap.snapshots.cloud.configMaps }}
|
||||
- configMapRef:
|
||||
name: {{ . }}
|
||||
{{- end }}
|
||||
{{- range .Values.tap.snapshots.cloud.secrets }}
|
||||
- secretRef:
|
||||
name: {{ . }}
|
||||
{{- end }}
|
||||
{{- if $hasInlineConfig }}
|
||||
- configMapRef:
|
||||
name: {{ include "kubeshark.name" . }}-cloud-config
|
||||
{{- end }}
|
||||
{{- if $hasInlineSecrets }}
|
||||
- secretRef:
|
||||
name: {{ include "kubeshark.name" . }}-cloud-secret
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: POD_NAME
|
||||
@@ -56,8 +103,6 @@ spec:
|
||||
value: '{{ (include "sentry.enabled" .) }}'
|
||||
- name: SENTRY_ENVIRONMENT
|
||||
value: '{{ .Values.tap.sentry.environment }}'
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubeshark.co'
|
||||
- name: PROFILING_ENABLED
|
||||
value: '{{ .Values.tap.pprof.enabled }}'
|
||||
{{- if .Values.tap.docker.overrideImage.hub }}
|
||||
@@ -94,19 +139,15 @@ spec:
|
||||
{{ if ne (toString .Values.tap.resources.hub.requests.cpu) "0" }}
|
||||
cpu: {{ .Values.tap.resources.hub.requests.cpu }}
|
||||
{{ end }}
|
||||
{{ if ne (toString .Values.tap.resources.hub.requests.memor) "0" }}
|
||||
{{ if ne (toString .Values.tap.resources.hub.requests.memory) "0" }}
|
||||
memory: {{ .Values.tap.resources.hub.requests.memory }}
|
||||
{{ end }}
|
||||
volumeMounts:
|
||||
- name: saml-x509-volume
|
||||
mountPath: "/etc/saml/x509"
|
||||
readOnly: true
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
- name: snapshots-volume
|
||||
mountPath: "/app/data/snapshots"
|
||||
{{- if gt (len .Values.tap.nodeSelectorTerms.hub) 0}}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
@@ -168,3 +209,11 @@ spec:
|
||||
items:
|
||||
- key: AUTH_SAML_X509_KEY
|
||||
path: kubeshark.key
|
||||
- name: snapshots-volume
|
||||
{{- if .Values.tap.snapshots.local.storageClass }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ include "kubeshark.name" . }}-snapshots-pvc
|
||||
{{- else }}
|
||||
emptyDir:
|
||||
sizeLimit: {{ .Values.tap.snapshots.local.storageSize }}
|
||||
{{- end }}
|
||||
|
||||
@@ -3,10 +3,10 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-hub
|
||||
@@ -17,5 +17,5 @@ spec:
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
type: ClusterIP
|
||||
|
||||
@@ -2,10 +2,10 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: front
|
||||
app.kubeshark.com/app: front
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "kubeshark.name" . }}-front
|
||||
@@ -14,27 +14,27 @@ spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: front
|
||||
app.kubeshark.com/app: front
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: front
|
||||
app.kubeshark.com/app: front
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: REACT_APP_AUTH_ENABLED
|
||||
value: '{{- if or (and .Values.cloudLicenseEnabled (not (empty .Values.license))) (not .Values.internetConnectivity) -}}
|
||||
{{ (and .Values.tap.auth.enabled (eq .Values.tap.auth.type "dex")) | ternary true false }}
|
||||
{{ (default false .Values.demoModeEnabled) | ternary true ((and .Values.tap.auth.enabled (eq .Values.tap.auth.type "dex")) | ternary true false) }}
|
||||
{{- else -}}
|
||||
{{ .Values.cloudLicenseEnabled | ternary "true" .Values.tap.auth.enabled }}
|
||||
{{ .Values.cloudLicenseEnabled | ternary "true" ((default false .Values.demoModeEnabled) | ternary "true" .Values.tap.auth.enabled) }}
|
||||
{{- end }}'
|
||||
- name: REACT_APP_AUTH_TYPE
|
||||
value: '{{- if and .Values.cloudLicenseEnabled (not (eq .Values.tap.auth.type "dex")) -}}
|
||||
default
|
||||
{{- else -}}
|
||||
{{ .Values.tap.auth.type }}
|
||||
{{ (default false .Values.demoModeEnabled) | ternary "default" .Values.tap.auth.type }}
|
||||
{{- end }}'
|
||||
- name: REACT_APP_COMPLETE_STREAMING_ENABLED
|
||||
value: '{{- if and (hasKey .Values.tap "dashboard") (hasKey .Values.tap.dashboard "completeStreamingEnabled") -}}
|
||||
@@ -42,33 +42,35 @@ spec:
|
||||
{{- else -}}
|
||||
true
|
||||
{{- end }}'
|
||||
- name: REACT_APP_STREAMING_TYPE
|
||||
value: '{{ default "" (((.Values).tap).dashboard).streamingType }}'
|
||||
- name: REACT_APP_AUTH_SAML_IDP_METADATA_URL
|
||||
value: '{{ not (eq .Values.tap.auth.saml.idpMetadataUrl "") | ternary .Values.tap.auth.saml.idpMetadataUrl " " }}'
|
||||
- name: REACT_APP_TIMEZONE
|
||||
value: '{{ not (eq .Values.timezone "") | ternary .Values.timezone " " }}'
|
||||
- name: REACT_APP_SCRIPTING_DISABLED
|
||||
value: '{{- if .Values.tap.liveConfigMapChangesDisabled -}}
|
||||
{{- if .Values.demoModeEnabled -}}
|
||||
{{ .Values.demoModeEnabled | ternary false true }}
|
||||
{{- else -}}
|
||||
true
|
||||
{{- end }}
|
||||
- name: REACT_APP_SCRIPTING_HIDDEN
|
||||
value: '{{- if and .Values.scripting (eq (.Values.scripting.enabled | toString) "false") -}}
|
||||
true
|
||||
{{- else -}}
|
||||
false
|
||||
{{- end }}'
|
||||
- name: REACT_APP_SCRIPTING_DISABLED
|
||||
value: '{{ default false .Values.demoModeEnabled }}'
|
||||
- name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled }}'
|
||||
value: '{{ default false .Values.demoModeEnabled }}'
|
||||
- name: REACT_APP_PRESET_FILTERS_CHANGING_ENABLED
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
value: '{{ not (default false .Values.demoModeEnabled) }}'
|
||||
- name: REACT_APP_BPF_OVERRIDE_DISABLED
|
||||
value: '{{ eq .Values.tap.packetCapture "af_packet" | ternary "false" "true" }}'
|
||||
- name: REACT_APP_RECORDING_DISABLED
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled }}'
|
||||
- name: REACT_APP_STOP_TRAFFIC_CAPTURING_DISABLED
|
||||
value: '{{- if and .Values.tap.liveConfigMapChangesDisabled .Values.tap.stopped -}}
|
||||
false
|
||||
value: '{{ default false .Values.demoModeEnabled }}'
|
||||
- name: REACT_APP_DISSECTION_ENABLED
|
||||
value: '{{ .Values.tap.capture.dissection.enabled | ternary "true" "false" }}'
|
||||
- name: REACT_APP_DISSECTION_CONTROL_ENABLED
|
||||
value: '{{- if and (not .Values.demoModeEnabled) (not .Values.tap.capture.dissection.enabled) -}}
|
||||
true
|
||||
{{- else -}}
|
||||
{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "false" }}
|
||||
{{ not (default false .Values.demoModeEnabled) | ternary false true }}
|
||||
{{- end -}}'
|
||||
- name: 'REACT_APP_CLOUD_LICENSE_ENABLED'
|
||||
value: '{{- if or (and .Values.cloudLicenseEnabled (not (empty .Values.license))) (not .Values.internetConnectivity) -}}
|
||||
@@ -76,12 +78,20 @@ spec:
|
||||
{{- else -}}
|
||||
{{ .Values.cloudLicenseEnabled }}
|
||||
{{- end }}'
|
||||
- name: 'REACT_APP_AI_ASSISTANT_ENABLED'
|
||||
value: '{{ .Values.aiAssistantEnabled | ternary "true" "false" }}'
|
||||
- name: REACT_APP_SUPPORT_CHAT_ENABLED
|
||||
value: '{{ and .Values.supportChatEnabled .Values.internetConnectivity | ternary "true" "false" }}'
|
||||
- name: REACT_APP_BETA_ENABLED
|
||||
value: '{{ default false .Values.betaEnabled | ternary "true" "false" }}'
|
||||
- name: REACT_APP_DISSECTORS_UPDATING_ENABLED
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
value: '{{ not (default false .Values.demoModeEnabled) }}'
|
||||
- name: REACT_APP_SNAPSHOTS_UPDATING_ENABLED
|
||||
value: '{{ not (default false .Values.demoModeEnabled) }}'
|
||||
- name: REACT_APP_DEMO_MODE_ENABLED
|
||||
value: '{{ default false .Values.demoModeEnabled }}'
|
||||
- name: REACT_APP_CLUSTER_WIDE_MAP_ENABLED
|
||||
value: '{{ default false (((.Values).tap).dashboard).clusterWideMapEnabled }}'
|
||||
- name: REACT_APP_RAW_CAPTURE_ENABLED
|
||||
value: '{{ .Values.tap.capture.raw.enabled | ternary "true" "false" }}'
|
||||
- name: REACT_APP_SENTRY_ENABLED
|
||||
value: '{{ (include "sentry.enabled" .) }}'
|
||||
- name: REACT_APP_SENTRY_ENVIRONMENT
|
||||
@@ -122,12 +132,6 @@ spec:
|
||||
mountPath: /etc/nginx/conf.d/default.conf
|
||||
subPath: default.conf
|
||||
readOnly: true
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if gt (len .Values.tap.nodeSelectorTerms.front) 0}}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
@@ -181,3 +185,6 @@ spec:
|
||||
name: kubeshark-nginx-config-map
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
|
||||
{{- if .Values.tap.priorityClass }}
|
||||
priorityClassName: {{ .Values.tap.priorityClass | quote }}
|
||||
{{- end }}
|
||||
|
||||
@@ -4,8 +4,8 @@ kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-front
|
||||
@@ -16,5 +16,5 @@ spec:
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubeshark.co/app: front
|
||||
app.kubeshark.com/app: front
|
||||
type: ClusterIP
|
||||
|
||||
@@ -26,8 +26,8 @@ kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-persistent-volume-claim
|
||||
|
||||
22
helm-chart/templates/09-snapshots-pvc.yaml
Normal file
22
helm-chart/templates/09-snapshots-pvc.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
---
|
||||
{{- if .Values.tap.snapshots.local.storageClass }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "kubeshark.name" . }}-snapshots-pvc
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.tap.snapshots.local.storageSize }}
|
||||
storageClassName: {{ .Values.tap.snapshots.local.storageClass }}
|
||||
status: {}
|
||||
{{- end }}
|
||||
@@ -3,11 +3,11 @@ apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
sidecar.istio.io/inject: "false"
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-worker-daemon-set
|
||||
@@ -15,12 +15,12 @@ metadata:
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: kubeshark
|
||||
@@ -99,6 +99,16 @@ spec:
|
||||
- '{{ .Values.tap.misc.resolutionStrategy }}'
|
||||
- -staletimeout
|
||||
- '{{ .Values.tap.misc.staleTimeoutSeconds }}'
|
||||
- -tcp-flow-full-timeout
|
||||
- '{{ .Values.tap.misc.tcpFlowTimeout }}'
|
||||
- -udp-flow-full-timeout
|
||||
- '{{ .Values.tap.misc.udpFlowTimeout }}'
|
||||
- -storage-size
|
||||
- '{{ .Values.tap.storageLimit }}'
|
||||
- -capture-db-max-size
|
||||
- '{{ .Values.tap.capture.dbMaxSize }}'
|
||||
- -cloud-api-url
|
||||
- '{{ .Values.cloudApiUrl }}'
|
||||
{{- if .Values.tap.docker.overrideImage.worker }}
|
||||
image: '{{ .Values.tap.docker.overrideImage.worker }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.worker }}
|
||||
@@ -107,12 +117,6 @@ spec:
|
||||
image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }}
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
name: sniffer
|
||||
ports:
|
||||
- containerPort: {{ .Values.tap.metrics.port }}
|
||||
@@ -131,8 +135,6 @@ spec:
|
||||
value: '{{ .Values.tap.misc.tcpStreamChannelTimeoutMs }}'
|
||||
- name: TCP_STREAM_CHANNEL_TIMEOUT_SHOW
|
||||
value: '{{ .Values.tap.misc.tcpStreamChannelTimeoutShow }}'
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubeshark.co'
|
||||
- name: PROFILING_ENABLED
|
||||
value: '{{ .Values.tap.pprof.enabled }}'
|
||||
- name: SENTRY_ENABLED
|
||||
@@ -335,8 +337,11 @@ spec:
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
hostNetwork: true
|
||||
hostNetwork: {{ .Values.tap.hostNetwork }}
|
||||
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
|
||||
{{- if .Values.tap.priorityClass }}
|
||||
priorityClassName: {{ .Values.tap.priorityClass | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.tolerations.workers }}
|
||||
tolerations:
|
||||
{{- range .Values.tap.tolerations.workers }}
|
||||
@@ -353,12 +358,6 @@ spec:
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if gt (len .Values.tap.nodeSelectorTerms.workers) 0}}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
@@ -403,9 +402,11 @@ spec:
|
||||
- hostPath:
|
||||
path: /etc/os-release
|
||||
name: os-release
|
||||
{{- if .Values.tap.tls }}
|
||||
- hostPath:
|
||||
path: /
|
||||
name: root
|
||||
{{- end }}
|
||||
- name: data
|
||||
{{- if .Values.tap.persistentStorage }}
|
||||
persistentVolumeClaim:
|
||||
|
||||
@@ -28,7 +28,7 @@ spec:
|
||||
name: kubeshark-front
|
||||
port:
|
||||
number: 80
|
||||
path: /
|
||||
path: {{ default "/" (((.Values).tap).ingress).path }}
|
||||
pathType: Prefix
|
||||
{{- if .Values.tap.ingress.tls }}
|
||||
tls:
|
||||
|
||||
@@ -30,8 +30,10 @@ data:
|
||||
proxy_set_header Authorization $http_authorization;
|
||||
proxy_pass_header Authorization;
|
||||
proxy_connect_timeout 4s;
|
||||
proxy_read_timeout 120s;
|
||||
proxy_send_timeout 12s;
|
||||
# Disable buffering for gRPC/Connect streaming
|
||||
client_max_body_size 0;
|
||||
proxy_request_buffering off;
|
||||
proxy_buffering off;
|
||||
proxy_pass_request_headers on;
|
||||
}
|
||||
|
||||
@@ -86,4 +88,3 @@ data:
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,28 +4,29 @@ metadata:
|
||||
name: {{ include "kubeshark.configmapName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
data:
|
||||
POD_REGEX: '{{ .Values.tap.regex }}'
|
||||
NAMESPACES: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}'
|
||||
EXCLUDED_NAMESPACES: '{{ gt (len .Values.tap.excludedNamespaces) 0 | ternary (join "," .Values.tap.excludedNamespaces) "" }}'
|
||||
BPF_OVERRIDE: '{{ .Values.tap.bpfOverride }}'
|
||||
STOPPED: '{{ .Values.tap.stopped | ternary "true" "false" }}'
|
||||
DISSECTION_ENABLED: '{{ .Values.tap.capture.dissection.enabled | ternary "true" "false" }}'
|
||||
CAPTURE_SELF: '{{ .Values.tap.capture.captureSelf | ternary "true" "false" }}'
|
||||
SCRIPTING_SCRIPTS: '{}'
|
||||
SCRIPTING_ACTIVE_SCRIPTS: '{{ gt (len .Values.scripting.active) 0 | ternary (join "," .Values.scripting.active) "" }}'
|
||||
INGRESS_ENABLED: '{{ .Values.tap.ingress.enabled }}'
|
||||
INGRESS_HOST: '{{ .Values.tap.ingress.host }}'
|
||||
PROXY_FRONT_PORT: '{{ .Values.tap.proxy.front.port }}'
|
||||
AUTH_ENABLED: '{{- if and .Values.cloudLicenseEnabled (not (empty .Values.license)) -}}
|
||||
{{ and .Values.tap.auth.enabled (eq .Values.tap.auth.type "dex") | ternary true false }}
|
||||
{{ (default false .Values.demoModeEnabled) | ternary true ((and .Values.tap.auth.enabled (eq .Values.tap.auth.type "dex")) | ternary true false) }}
|
||||
{{- else -}}
|
||||
{{ .Values.cloudLicenseEnabled | ternary "true" (.Values.tap.auth.enabled | ternary "true" "") }}
|
||||
{{ .Values.cloudLicenseEnabled | ternary "true" ((default false .Values.demoModeEnabled) | ternary "true" .Values.tap.auth.enabled) }}
|
||||
{{- end }}'
|
||||
AUTH_TYPE: '{{- if and .Values.cloudLicenseEnabled (not (eq .Values.tap.auth.type "dex")) -}}
|
||||
default
|
||||
{{- else -}}
|
||||
{{ .Values.tap.auth.type }}
|
||||
{{ (default false .Values.demoModeEnabled) | ternary "default" .Values.tap.auth.type }}
|
||||
{{- end }}'
|
||||
AUTH_SAML_IDP_METADATA_URL: '{{ .Values.tap.auth.saml.idpMetadataUrl }}'
|
||||
AUTH_SAML_ROLE_ATTRIBUTE: '{{ .Values.tap.auth.saml.roleAttribute }}'
|
||||
@@ -43,23 +44,15 @@ data:
|
||||
false
|
||||
{{- end }}'
|
||||
TELEMETRY_DISABLED: '{{ not .Values.internetConnectivity | ternary "true" (not .Values.tap.telemetry.enabled | ternary "true" "false") }}'
|
||||
SCRIPTING_DISABLED: '{{- if .Values.tap.liveConfigMapChangesDisabled -}}
|
||||
{{- if .Values.demoModeEnabled -}}
|
||||
{{ .Values.demoModeEnabled | ternary false true }}
|
||||
{{- else -}}
|
||||
true
|
||||
{{- end }}
|
||||
{{- else -}}
|
||||
false
|
||||
{{- end }}'
|
||||
TARGETED_PODS_UPDATE_DISABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "" }}'
|
||||
PRESET_FILTERS_CHANGING_ENABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
RECORDING_DISABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "" }}'
|
||||
STOP_TRAFFIC_CAPTURING_DISABLED: '{{- if and .Values.tap.liveConfigMapChangesDisabled .Values.tap.stopped -}}
|
||||
false
|
||||
{{- else -}}
|
||||
{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "false" }}
|
||||
{{- end }}'
|
||||
SCRIPTING_DISABLED: '{{ default false .Values.demoModeEnabled }}'
|
||||
TARGETED_PODS_UPDATE_DISABLED: '{{ default false .Values.demoModeEnabled }}'
|
||||
PRESET_FILTERS_CHANGING_ENABLED: '{{ not (default false .Values.demoModeEnabled) }}'
|
||||
RECORDING_DISABLED: '{{ (default false .Values.demoModeEnabled) | ternary true false }}'
|
||||
DISSECTION_CONTROL_ENABLED: '{{- if and (not .Values.demoModeEnabled) (not .Values.tap.capture.dissection.enabled) -}}
|
||||
true
|
||||
{{- else -}}
|
||||
{{ (default false .Values.demoModeEnabled) | ternary false true }}
|
||||
{{- end }}'
|
||||
GLOBAL_FILTER: {{ include "kubeshark.escapeDoubleQuotes" .Values.tap.globalFilter | quote }}
|
||||
DEFAULT_FILTER: {{ include "kubeshark.escapeDoubleQuotes" .Values.tap.defaultFilter | quote }}
|
||||
TRAFFIC_SAMPLE_RATE: '{{ .Values.tap.misc.trafficSampleRate }}'
|
||||
@@ -72,14 +65,17 @@ data:
|
||||
{{- else -}}
|
||||
{{ .Values.cloudLicenseEnabled }}
|
||||
{{- end }}'
|
||||
AI_ASSISTANT_ENABLED: '{{ .Values.aiAssistantEnabled | ternary "true" "false" }}'
|
||||
DUPLICATE_TIMEFRAME: '{{ .Values.tap.misc.duplicateTimeframe }}'
|
||||
ENABLED_DISSECTORS: '{{ gt (len .Values.tap.enabledDissectors) 0 | ternary (join "," .Values.tap.enabledDissectors) "" }}'
|
||||
CUSTOM_MACROS: '{{ toJson .Values.tap.customMacros }}'
|
||||
DISSECTORS_UPDATING_ENABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
DISSECTORS_UPDATING_ENABLED: '{{ not (default false .Values.demoModeEnabled) }}'
|
||||
SNAPSHOTS_UPDATING_ENABLED: '{{ not (default false .Values.demoModeEnabled) }}'
|
||||
DEMO_MODE_ENABLED: '{{ default false .Values.demoModeEnabled }}'
|
||||
DETECT_DUPLICATES: '{{ .Values.tap.misc.detectDuplicates | ternary "true" "false" }}'
|
||||
PCAP_DUMP_ENABLE: '{{ .Values.pcapdump.enabled }}'
|
||||
PCAP_TIME_INTERVAL: '{{ .Values.pcapdump.timeInterval }}'
|
||||
PCAP_MAX_TIME: '{{ .Values.pcapdump.maxTime }}'
|
||||
PCAP_MAX_SIZE: '{{ .Values.pcapdump.maxSize }}'
|
||||
PORT_MAPPING: '{{ toJson .Values.tap.portMapping }}'
|
||||
RAW_CAPTURE_ENABLED: '{{ .Values.tap.capture.raw.enabled | ternary "true" "false" }}'
|
||||
RAW_CAPTURE_STORAGE_SIZE: '{{ .Values.tap.capture.raw.storageSize }}'
|
||||
|
||||
@@ -4,7 +4,7 @@ metadata:
|
||||
name: {{ include "kubeshark.secretName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
stringData:
|
||||
LICENSE: '{{ .Values.license }}'
|
||||
@@ -20,7 +20,7 @@ metadata:
|
||||
name: kubeshark-saml-x509-crt-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
stringData:
|
||||
AUTH_SAML_X509_CRT: |
|
||||
@@ -34,7 +34,7 @@ metadata:
|
||||
name: kubeshark-saml-x509-key-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
stringData:
|
||||
AUTH_SAML_X509_KEY: |
|
||||
|
||||
@@ -14,7 +14,7 @@ metadata:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
app.kubeshark.co/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
ports:
|
||||
- name: metrics
|
||||
|
||||
@@ -14,7 +14,7 @@ metadata:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
selector:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
ports:
|
||||
- name: metrics
|
||||
|
||||
@@ -3,8 +3,8 @@ kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-hub-network-policy
|
||||
@@ -12,7 +12,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
@@ -40,7 +40,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: front
|
||||
app.kubeshark.com/app: front
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
@@ -65,7 +65,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: dex
|
||||
app.kubeshark.com/app: dex
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
@@ -90,7 +90,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
|
||||
@@ -10,6 +10,9 @@ spec:
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
|
||||
{{- if .Values.tap.priorityClass }}
|
||||
priorityClassName: {{ .Values.tap.priorityClass | quote }}
|
||||
{{- end }}
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: cleanup
|
||||
@@ -21,4 +24,4 @@ spec:
|
||||
image: '{{ .Values.tap.docker.registry }}/hub:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'
|
||||
{{- end }}
|
||||
command: ["/app/cleanup"]
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
|
||||
@@ -5,10 +5,10 @@ apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: dex
|
||||
app.kubeshark.com/app: dex
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ include "kubeshark.name" . }}-dex
|
||||
@@ -17,12 +17,12 @@ spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: dex
|
||||
app.kubeshark.com/app: dex
|
||||
{{- include "kubeshark.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: dex
|
||||
app.kubeshark.com/app: dex
|
||||
{{- include "kubeshark.labels" . | nindent 8 }}
|
||||
spec:
|
||||
containers:
|
||||
@@ -69,12 +69,6 @@ spec:
|
||||
requests:
|
||||
cpu: 50m
|
||||
memory: 50Mi
|
||||
{{- if .Values.tap.docker.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.tap.docker.imagePullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if gt (len .Values.tap.nodeSelectorTerms.dex) 0}}
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
@@ -112,5 +106,7 @@ spec:
|
||||
secretName: kubeshark-dex-conf-secret
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
|
||||
|
||||
{{- if .Values.tap.priorityClass }}
|
||||
priorityClassName: {{ .Values.tap.priorityClass | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -5,10 +5,10 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: dex
|
||||
app.kubeshark.com/app: dex
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- if .Values.tap.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.tap.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
name: kubeshark-dex
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
port: 80
|
||||
targetPort: 5556
|
||||
selector:
|
||||
app.kubeshark.co/app: dex
|
||||
app.kubeshark.com/app: dex
|
||||
type: ClusterIP
|
||||
|
||||
{{- end }}
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
name: kubeshark-dex-conf-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
data:
|
||||
dex-config.yaml: {{ .Values.tap.auth.dexConfig | toYaml | b64enc | quote }}
|
||||
|
||||
64
helm-chart/templates/21-cloud-storage.yaml
Normal file
64
helm-chart/templates/21-cloud-storage.yaml
Normal file
@@ -0,0 +1,64 @@
|
||||
{{- $hasConfigValues := or .Values.tap.snapshots.cloud.prefix .Values.tap.snapshots.cloud.s3.bucket .Values.tap.snapshots.cloud.s3.region .Values.tap.snapshots.cloud.s3.roleArn .Values.tap.snapshots.cloud.s3.externalId .Values.tap.snapshots.cloud.azblob.storageAccount .Values.tap.snapshots.cloud.azblob.container .Values.tap.snapshots.cloud.gcs.bucket .Values.tap.snapshots.cloud.gcs.project -}}
|
||||
{{- $hasSecretValues := or .Values.tap.snapshots.cloud.s3.accessKey .Values.tap.snapshots.cloud.s3.secretKey .Values.tap.snapshots.cloud.azblob.storageKey .Values.tap.snapshots.cloud.gcs.credentialsJson -}}
|
||||
{{- if $hasConfigValues }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
name: {{ include "kubeshark.name" . }}-cloud-config
|
||||
namespace: {{ .Release.Namespace }}
|
||||
data:
|
||||
{{- if .Values.tap.snapshots.cloud.prefix }}
|
||||
SNAPSHOT_CLOUD_PREFIX: {{ .Values.tap.snapshots.cloud.prefix | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.s3.bucket }}
|
||||
SNAPSHOT_AWS_BUCKET: {{ .Values.tap.snapshots.cloud.s3.bucket | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.s3.region }}
|
||||
SNAPSHOT_AWS_REGION: {{ .Values.tap.snapshots.cloud.s3.region | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.s3.roleArn }}
|
||||
SNAPSHOT_AWS_ROLE_ARN: {{ .Values.tap.snapshots.cloud.s3.roleArn | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.s3.externalId }}
|
||||
SNAPSHOT_AWS_EXTERNAL_ID: {{ .Values.tap.snapshots.cloud.s3.externalId | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.azblob.storageAccount }}
|
||||
SNAPSHOT_AZBLOB_STORAGE_ACCOUNT: {{ .Values.tap.snapshots.cloud.azblob.storageAccount | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.azblob.container }}
|
||||
SNAPSHOT_AZBLOB_CONTAINER: {{ .Values.tap.snapshots.cloud.azblob.container | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.gcs.bucket }}
|
||||
SNAPSHOT_GCS_BUCKET: {{ .Values.tap.snapshots.cloud.gcs.bucket | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.gcs.project }}
|
||||
SNAPSHOT_GCS_PROJECT: {{ .Values.tap.snapshots.cloud.gcs.project | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $hasSecretValues }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
name: {{ include "kubeshark.name" . }}-cloud-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
{{- if .Values.tap.snapshots.cloud.s3.accessKey }}
|
||||
SNAPSHOT_AWS_ACCESS_KEY: {{ .Values.tap.snapshots.cloud.s3.accessKey | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.s3.secretKey }}
|
||||
SNAPSHOT_AWS_SECRET_KEY: {{ .Values.tap.snapshots.cloud.s3.secretKey | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.azblob.storageKey }}
|
||||
SNAPSHOT_AZBLOB_STORAGE_KEY: {{ .Values.tap.snapshots.cloud.azblob.storageKey | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.gcs.credentialsJson }}
|
||||
SNAPSHOT_GCS_CREDENTIALS_JSON: {{ .Values.tap.snapshots.cloud.gcs.credentialsJson | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
@@ -28,9 +28,12 @@ Notices:
|
||||
- Support chat using Intercom is enabled. It can be disabled using `--set supportChatEnabled=false`
|
||||
{{- end }}
|
||||
{{- if eq .Values.license ""}}
|
||||
- No license key was detected. You can either log-in/sign-up through the dashboard, or download the license key from https://console.kubeshark.co/ and add it as `LICENSE` via mounted secret (`tap.secrets`).
|
||||
- No license key was detected.
|
||||
- Authenticate through the dashboard to activate a complementary COMMUNITY license.
|
||||
- If you have an Enterprise license, download the license key from https://console.kubeshark.com/
|
||||
- An Enterprise license-key can be added as 'license: <license>' in helm values or as `--set license=<license>` or as `LICENSE` via mounted secret (`tap.secrets`).
|
||||
- Contact us to get an Enterprise license: https://kubeshark.com/contact-us.
|
||||
{{- end }}
|
||||
|
||||
{{ if .Values.tap.ingress.enabled }}
|
||||
|
||||
You can now access the application through the following URL:
|
||||
@@ -42,8 +45,9 @@ To access the application, follow these steps:
|
||||
1. Perform port forwarding with the following commands:
|
||||
|
||||
kubectl port-forward -n {{ .Release.Namespace }} service/kubeshark-front 8899:80
|
||||
you could also run: `kubeshark proxy` (which simply manages the port-forward connection)
|
||||
|
||||
2. Once port forwarding is done, you can access the application by visiting the following URL in your web browser:
|
||||
http://0.0.0.0:8899{{ default "" (((.Values.tap).routing).front).basePath }}/
|
||||
http://127.0.0.1:8899{{ default "" (((.Values.tap).routing).front).basePath }}/
|
||||
|
||||
{{- end }}
|
||||
|
||||
@@ -80,7 +80,7 @@ Create docker tag default version
|
||||
*/}}
|
||||
{{- define "kubeshark.defaultVersion" -}}
|
||||
{{- $defaultVersion := (printf "v%s" .Chart.Version) -}}
|
||||
{{- if not .Values.tap.docker.tagLocked }}
|
||||
{{- if .Values.tap.docker.tagLocked }}
|
||||
{{- $defaultVersion = regexReplaceAll "^([^.]+\\.[^.]+).*" $defaultVersion "$1" -}}
|
||||
{{- end }}
|
||||
{{- $defaultVersion }}
|
||||
|
||||
248
helm-chart/tests/cloud_storage_test.yaml
Normal file
248
helm-chart/tests/cloud_storage_test.yaml
Normal file
@@ -0,0 +1,248 @@
|
||||
suite: cloud storage template
|
||||
templates:
|
||||
- templates/21-cloud-storage.yaml
|
||||
tests:
|
||||
- it: should render nothing with default values
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
|
||||
- it: should render ConfigMap with S3 config only
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
tap.snapshots.cloud.s3.region: us-east-1
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: metadata.name
|
||||
value: RELEASE-NAME-cloud-config
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AWS_BUCKET
|
||||
value: "my-bucket"
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AWS_REGION
|
||||
value: "us-east-1"
|
||||
documentIndex: 0
|
||||
- notExists:
|
||||
path: data.SNAPSHOT_AWS_ACCESS_KEY
|
||||
documentIndex: 0
|
||||
|
||||
- it: should render ConfigMap and Secret with S3 config and credentials
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
tap.snapshots.cloud.s3.region: us-east-1
|
||||
tap.snapshots.cloud.s3.accessKey: AKIAIOSFODNN7EXAMPLE
|
||||
tap.snapshots.cloud.s3.secretKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 2
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AWS_BUCKET
|
||||
value: "my-bucket"
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AWS_REGION
|
||||
value: "us-east-1"
|
||||
documentIndex: 0
|
||||
- isKind:
|
||||
of: Secret
|
||||
documentIndex: 1
|
||||
- equal:
|
||||
path: metadata.name
|
||||
value: RELEASE-NAME-cloud-secret
|
||||
documentIndex: 1
|
||||
- equal:
|
||||
path: stringData.SNAPSHOT_AWS_ACCESS_KEY
|
||||
value: "AKIAIOSFODNN7EXAMPLE"
|
||||
documentIndex: 1
|
||||
- equal:
|
||||
path: stringData.SNAPSHOT_AWS_SECRET_KEY
|
||||
value: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||
documentIndex: 1
|
||||
|
||||
- it: should render ConfigMap with Azure Blob config only
|
||||
set:
|
||||
tap.snapshots.cloud.azblob.storageAccount: myaccount
|
||||
tap.snapshots.cloud.azblob.container: mycontainer
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AZBLOB_STORAGE_ACCOUNT
|
||||
value: "myaccount"
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AZBLOB_CONTAINER
|
||||
value: "mycontainer"
|
||||
documentIndex: 0
|
||||
|
||||
- it: should render ConfigMap and Secret with Azure Blob config and storage key
|
||||
set:
|
||||
tap.snapshots.cloud.azblob.storageAccount: myaccount
|
||||
tap.snapshots.cloud.azblob.container: mycontainer
|
||||
tap.snapshots.cloud.azblob.storageKey: c29tZWtleQ==
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 2
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AZBLOB_STORAGE_ACCOUNT
|
||||
value: "myaccount"
|
||||
documentIndex: 0
|
||||
- isKind:
|
||||
of: Secret
|
||||
documentIndex: 1
|
||||
- equal:
|
||||
path: stringData.SNAPSHOT_AZBLOB_STORAGE_KEY
|
||||
value: "c29tZWtleQ=="
|
||||
documentIndex: 1
|
||||
|
||||
- it: should render ConfigMap with GCS config only
|
||||
set:
|
||||
tap.snapshots.cloud.gcs.bucket: my-gcs-bucket
|
||||
tap.snapshots.cloud.gcs.project: my-gcp-project
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_GCS_BUCKET
|
||||
value: "my-gcs-bucket"
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_GCS_PROJECT
|
||||
value: "my-gcp-project"
|
||||
documentIndex: 0
|
||||
- notExists:
|
||||
path: data.SNAPSHOT_GCS_CREDENTIALS_JSON
|
||||
documentIndex: 0
|
||||
|
||||
- it: should render ConfigMap and Secret with GCS config and credentials
|
||||
set:
|
||||
tap.snapshots.cloud.gcs.bucket: my-gcs-bucket
|
||||
tap.snapshots.cloud.gcs.project: my-gcp-project
|
||||
tap.snapshots.cloud.gcs.credentialsJson: '{"type":"service_account"}'
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 2
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_GCS_BUCKET
|
||||
value: "my-gcs-bucket"
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_GCS_PROJECT
|
||||
value: "my-gcp-project"
|
||||
documentIndex: 0
|
||||
- isKind:
|
||||
of: Secret
|
||||
documentIndex: 1
|
||||
- equal:
|
||||
path: metadata.name
|
||||
value: RELEASE-NAME-cloud-secret
|
||||
documentIndex: 1
|
||||
- equal:
|
||||
path: stringData.SNAPSHOT_GCS_CREDENTIALS_JSON
|
||||
value: '{"type":"service_account"}'
|
||||
documentIndex: 1
|
||||
|
||||
- it: should render ConfigMap with GCS bucket only (no project)
|
||||
set:
|
||||
tap.snapshots.cloud.gcs.bucket: my-gcs-bucket
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_GCS_BUCKET
|
||||
value: "my-gcs-bucket"
|
||||
documentIndex: 0
|
||||
- notExists:
|
||||
path: data.SNAPSHOT_GCS_PROJECT
|
||||
documentIndex: 0
|
||||
|
||||
- it: should render ConfigMap with only prefix
|
||||
set:
|
||||
tap.snapshots.cloud.prefix: snapshots/prod
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_CLOUD_PREFIX
|
||||
value: "snapshots/prod"
|
||||
documentIndex: 0
|
||||
- notExists:
|
||||
path: data.SNAPSHOT_AWS_BUCKET
|
||||
documentIndex: 0
|
||||
- notExists:
|
||||
path: data.SNAPSHOT_AZBLOB_STORAGE_ACCOUNT
|
||||
documentIndex: 0
|
||||
- notExists:
|
||||
path: data.SNAPSHOT_GCS_BUCKET
|
||||
documentIndex: 0
|
||||
|
||||
- it: should render ConfigMap with role ARN without credentials (IAM auth)
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
tap.snapshots.cloud.s3.region: us-east-1
|
||||
tap.snapshots.cloud.s3.roleArn: arn:aws:iam::123456789012:role/my-role
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AWS_ROLE_ARN
|
||||
value: "arn:aws:iam::123456789012:role/my-role"
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AWS_BUCKET
|
||||
value: "my-bucket"
|
||||
documentIndex: 0
|
||||
|
||||
- it: should render ConfigMap with externalId
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
tap.snapshots.cloud.s3.externalId: ext-12345
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AWS_EXTERNAL_ID
|
||||
value: "ext-12345"
|
||||
documentIndex: 0
|
||||
|
||||
- it: should set correct namespace
|
||||
release:
|
||||
namespace: kubeshark-ns
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.namespace
|
||||
value: kubeshark-ns
|
||||
documentIndex: 0
|
||||
9
helm-chart/tests/fixtures/values-azblob.yaml
vendored
Normal file
9
helm-chart/tests/fixtures/values-azblob.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: azblob
|
||||
prefix: snapshots/
|
||||
azblob:
|
||||
storageAccount: kubesharkstore
|
||||
container: snapshots
|
||||
storageKey: c29tZWtleWhlcmU=
|
||||
8
helm-chart/tests/fixtures/values-cloud-refs.yaml
vendored
Normal file
8
helm-chart/tests/fixtures/values-cloud-refs.yaml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: s3
|
||||
configMaps:
|
||||
- my-cloud-config
|
||||
secrets:
|
||||
- my-cloud-secret
|
||||
9
helm-chart/tests/fixtures/values-gcs.yaml
vendored
Normal file
9
helm-chart/tests/fixtures/values-gcs.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: gcs
|
||||
prefix: snapshots/
|
||||
gcs:
|
||||
bucket: kubeshark-snapshots
|
||||
project: my-gcp-project
|
||||
credentialsJson: '{"type":"service_account","project_id":"my-gcp-project"}'
|
||||
10
helm-chart/tests/fixtures/values-s3.yaml
vendored
Normal file
10
helm-chart/tests/fixtures/values-s3.yaml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: s3
|
||||
prefix: snapshots/
|
||||
s3:
|
||||
bucket: kubeshark-snapshots
|
||||
region: us-east-1
|
||||
accessKey: AKIAIOSFODNN7EXAMPLE
|
||||
secretKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||
167
helm-chart/tests/hub_deployment_test.yaml
Normal file
167
helm-chart/tests/hub_deployment_test.yaml
Normal file
@@ -0,0 +1,167 @@
|
||||
suite: hub deployment cloud integration
|
||||
templates:
|
||||
- templates/04-hub-deployment.yaml
|
||||
tests:
|
||||
- it: should not render envFrom with default values
|
||||
asserts:
|
||||
- isKind:
|
||||
of: Deployment
|
||||
- notContains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
any: true
|
||||
content:
|
||||
configMapRef:
|
||||
name: RELEASE-NAME-cloud-config
|
||||
|
||||
- it: should render envFrom with inline S3 config
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
tap.snapshots.cloud.s3.region: us-east-1
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: RELEASE-NAME-cloud-config
|
||||
|
||||
- it: should render envFrom secret ref with inline credentials
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
tap.snapshots.cloud.s3.accessKey: AKIAIOSFODNN7EXAMPLE
|
||||
tap.snapshots.cloud.s3.secretKey: secret
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: RELEASE-NAME-cloud-config
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
secretRef:
|
||||
name: RELEASE-NAME-cloud-secret
|
||||
|
||||
- it: should render envFrom with inline GCS config
|
||||
set:
|
||||
tap.snapshots.cloud.gcs.bucket: my-gcs-bucket
|
||||
tap.snapshots.cloud.gcs.project: my-gcp-project
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: RELEASE-NAME-cloud-config
|
||||
|
||||
- it: should render envFrom secret ref with inline GCS credentials
|
||||
set:
|
||||
tap.snapshots.cloud.gcs.bucket: my-gcs-bucket
|
||||
tap.snapshots.cloud.gcs.credentialsJson: '{"type":"service_account"}'
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: RELEASE-NAME-cloud-config
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
secretRef:
|
||||
name: RELEASE-NAME-cloud-secret
|
||||
|
||||
- it: should render cloud-storage-provider arg when provider is gcs
|
||||
set:
|
||||
tap.snapshots.cloud.provider: gcs
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].command
|
||||
content: -cloud-storage-provider
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].command
|
||||
content: gcs
|
||||
|
||||
- it: should render envFrom with external configMaps
|
||||
set:
|
||||
tap.snapshots.cloud.configMaps:
|
||||
- my-cloud-config
|
||||
- my-other-config
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: my-cloud-config
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: my-other-config
|
||||
|
||||
- it: should render envFrom with external secrets
|
||||
set:
|
||||
tap.snapshots.cloud.secrets:
|
||||
- my-cloud-secret
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
secretRef:
|
||||
name: my-cloud-secret
|
||||
|
||||
- it: should render cloud-storage-provider arg when provider is set
|
||||
set:
|
||||
tap.snapshots.cloud.provider: s3
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].command
|
||||
content: -cloud-storage-provider
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].command
|
||||
content: s3
|
||||
|
||||
- it: should not render cloud-storage-provider arg with default values
|
||||
asserts:
|
||||
- notContains:
|
||||
path: spec.template.spec.containers[0].command
|
||||
content: -cloud-storage-provider
|
||||
|
||||
- it: should render envFrom with tap.secrets
|
||||
set:
|
||||
tap.secrets:
|
||||
- my-existing-secret
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
secretRef:
|
||||
name: my-existing-secret
|
||||
|
||||
- it: should render both inline and external refs together
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
tap.snapshots.cloud.s3.accessKey: key
|
||||
tap.snapshots.cloud.s3.secretKey: secret
|
||||
tap.snapshots.cloud.configMaps:
|
||||
- ext-config
|
||||
tap.snapshots.cloud.secrets:
|
||||
- ext-secret
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: ext-config
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
secretRef:
|
||||
name: ext-secret
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: RELEASE-NAME-cloud-config
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
secretRef:
|
||||
name: RELEASE-NAME-cloud-secret
|
||||
@@ -1,8 +1,7 @@
|
||||
# find a detailed description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md
|
||||
tap:
|
||||
docker:
|
||||
registry: docker.io/kubeshark
|
||||
tag: v52.7
|
||||
tag: ""
|
||||
tagLocked: true
|
||||
imagePullPolicy: Always
|
||||
imagePullSecrets: []
|
||||
@@ -26,17 +25,53 @@ tap:
|
||||
namespaces: []
|
||||
excludedNamespaces: []
|
||||
bpfOverride: ""
|
||||
stopped: false
|
||||
capture:
|
||||
dissection:
|
||||
enabled: true
|
||||
stopAfter: 5m
|
||||
captureSelf: false
|
||||
raw:
|
||||
enabled: true
|
||||
storageSize: 1Gi
|
||||
dbMaxSize: 500Mi
|
||||
delayedDissection:
|
||||
cpu: "1"
|
||||
memory: 4Gi
|
||||
snapshots:
|
||||
local:
|
||||
storageClass: ""
|
||||
storageSize: 20Gi
|
||||
cloud:
|
||||
provider: ""
|
||||
prefix: ""
|
||||
configMaps: []
|
||||
secrets: []
|
||||
s3:
|
||||
bucket: ""
|
||||
region: ""
|
||||
accessKey: ""
|
||||
secretKey: ""
|
||||
roleArn: ""
|
||||
externalId: ""
|
||||
azblob:
|
||||
storageAccount: ""
|
||||
container: ""
|
||||
storageKey: ""
|
||||
gcs:
|
||||
bucket: ""
|
||||
project: ""
|
||||
credentialsJson: ""
|
||||
release:
|
||||
repo: https://helm.kubeshark.co
|
||||
repo: https://helm.kubeshark.com
|
||||
name: kubeshark
|
||||
namespace: default
|
||||
helmChartPath: ""
|
||||
persistentStorage: false
|
||||
persistentStorageStatic: false
|
||||
persistentStoragePvcVolumeMode: FileSystem
|
||||
efsFileSytemIdAndPath: ""
|
||||
secrets: []
|
||||
storageLimit: 5Gi
|
||||
storageLimit: 10Gi
|
||||
storageClass: standard
|
||||
dryRun: false
|
||||
dns:
|
||||
@@ -132,20 +167,25 @@ tap:
|
||||
canDelete: true
|
||||
canUpdateTargetedPods: true
|
||||
canStopTrafficCapturing: true
|
||||
canControlDissection: true
|
||||
showAdminConsoleLink: true
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
host: ks.svc.cluster.local
|
||||
path: /
|
||||
tls: []
|
||||
annotations: {}
|
||||
priorityClass: ""
|
||||
routing:
|
||||
front:
|
||||
basePath: ""
|
||||
ipv6: true
|
||||
debug: false
|
||||
dashboard:
|
||||
streamingType: connect-rpc
|
||||
completeStreamingEnabled: true
|
||||
clusterWideMapEnabled: false
|
||||
telemetry:
|
||||
enabled: true
|
||||
resourceGuard:
|
||||
@@ -157,8 +197,7 @@ tap:
|
||||
sentry:
|
||||
enabled: false
|
||||
environment: production
|
||||
defaultFilter: "!dns and !error"
|
||||
liveConfigMapChangesDisabled: false
|
||||
defaultFilter: ""
|
||||
globalFilter: ""
|
||||
enabledDissectors:
|
||||
- amqp
|
||||
@@ -171,6 +210,10 @@ tap:
|
||||
- ldap
|
||||
- radius
|
||||
- diameter
|
||||
- udp-flow
|
||||
- tcp-flow
|
||||
- udp-conn
|
||||
- tcp-conn
|
||||
portMapping:
|
||||
http:
|
||||
- 80
|
||||
@@ -197,8 +240,8 @@ tap:
|
||||
view: flamegraph
|
||||
misc:
|
||||
jsonTTL: 5m
|
||||
pcapTTL: 10s
|
||||
pcapErrorTTL: 60s
|
||||
pcapTTL: "0"
|
||||
pcapErrorTTL: "0"
|
||||
trafficSampleRate: 100
|
||||
tcpStreamChannelTimeoutMs: 10000
|
||||
tcpStreamChannelTimeoutShow: false
|
||||
@@ -206,6 +249,8 @@ tap:
|
||||
duplicateTimeframe: 200ms
|
||||
detectDuplicates: false
|
||||
staleTimeoutSeconds: 30
|
||||
tcpFlowTimeout: 1200
|
||||
udpFlowTimeout: 1200
|
||||
securityContext:
|
||||
privileged: true
|
||||
appArmorProfile:
|
||||
@@ -230,11 +275,12 @@ tap:
|
||||
- SYS_RESOURCE
|
||||
- IPC_LOCK
|
||||
mountBpf: true
|
||||
hostNetwork: true
|
||||
logs:
|
||||
file: ""
|
||||
grep: ""
|
||||
pcapdump:
|
||||
enabled: true
|
||||
enabled: false
|
||||
timeInterval: 1m
|
||||
maxTime: 1h
|
||||
maxSize: 500MB
|
||||
@@ -247,12 +293,14 @@ kube:
|
||||
dumpLogs: false
|
||||
headless: false
|
||||
license: ""
|
||||
cloudApiUrl: https://api.kubeshark.com
|
||||
cloudLicenseEnabled: true
|
||||
aiAssistantEnabled: true
|
||||
demoModeEnabled: false
|
||||
supportChatEnabled: false
|
||||
betaEnabled: false
|
||||
internetConnectivity: true
|
||||
scripting:
|
||||
enabled: false
|
||||
env: {}
|
||||
source: ""
|
||||
sources: []
|
||||
|
||||
57
integration/README.md
Normal file
57
integration/README.md
Normal file
@@ -0,0 +1,57 @@
|
||||
# Integration Tests
|
||||
|
||||
This directory contains integration tests that run against a real Kubernetes cluster.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. **Kubernetes cluster** - A running cluster accessible via `kubectl`
|
||||
2. **kubectl** - Configured with appropriate context
|
||||
3. **Go 1.21+** - For running tests
|
||||
|
||||
## Running Tests
|
||||
|
||||
```bash
|
||||
# Run all integration tests
|
||||
make test-integration
|
||||
|
||||
# Run specific command tests
|
||||
make test-integration-mcp
|
||||
|
||||
# Run with verbose output
|
||||
make test-integration-verbose
|
||||
|
||||
# Run with custom timeout (default: 5m)
|
||||
INTEGRATION_TIMEOUT=10m make test-integration
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `KUBESHARK_BINARY` | Auto-built | Path to pre-built kubeshark binary |
|
||||
| `INTEGRATION_TIMEOUT` | `5m` | Test timeout duration |
|
||||
| `KUBECONFIG` | `~/.kube/config` | Kubernetes config file |
|
||||
| `INTEGRATION_SKIP_CLEANUP` | `false` | Skip cleanup after tests (for debugging) |
|
||||
|
||||
## Test Structure
|
||||
|
||||
```
|
||||
integration/
|
||||
├── README.md # This file
|
||||
├── common_test.go # Shared test helpers
|
||||
├── mcp_test.go # MCP command integration tests
|
||||
├── tap_test.go # Tap command tests (future)
|
||||
└── ... # Additional command tests
|
||||
```
|
||||
|
||||
## Writing New Tests
|
||||
|
||||
1. Create `<command>_test.go` with build tag `//go:build integration`
|
||||
2. Use helpers from `common_test.go`: `requireKubernetesCluster(t)`, `getKubesharkBinary(t)`, `cleanupKubeshark(t, binary)`
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
```bash
|
||||
# JSON output for CI parsing
|
||||
go test -tags=integration -json ./integration/...
|
||||
```
|
||||
217
integration/common_test.go
Normal file
217
integration/common_test.go
Normal file
@@ -0,0 +1,217 @@
|
||||
//go:build integration
|
||||
|
||||
// Package integration contains integration tests that run against a real Kubernetes cluster.
|
||||
// Run with: go test -tags=integration ./integration/...
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
binaryName = "kubeshark"
|
||||
defaultTimeout = 2 * time.Minute
|
||||
startupTimeout = 3 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
// binaryPath caches the built binary path
|
||||
binaryPath string
|
||||
buildOnce sync.Once
|
||||
buildErr error
|
||||
)
|
||||
|
||||
// requireKubernetesCluster skips the test if no Kubernetes cluster is available.
|
||||
func requireKubernetesCluster(t *testing.T) {
|
||||
t.Helper()
|
||||
if !hasKubernetesCluster() {
|
||||
t.Skip("Skipping: no Kubernetes cluster available")
|
||||
}
|
||||
}
|
||||
|
||||
// hasKubernetesCluster returns true if a Kubernetes cluster is accessible.
|
||||
func hasKubernetesCluster() bool {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
return exec.CommandContext(ctx, "kubectl", "cluster-info").Run() == nil
|
||||
}
|
||||
|
||||
// getKubesharkBinary returns the path to the kubeshark binary, building it if necessary.
|
||||
func getKubesharkBinary(t *testing.T) string {
|
||||
t.Helper()
|
||||
|
||||
// Check if binary path is provided via environment
|
||||
if envBinary := os.Getenv("KUBESHARK_BINARY"); envBinary != "" {
|
||||
if _, err := os.Stat(envBinary); err == nil {
|
||||
return envBinary
|
||||
}
|
||||
t.Fatalf("KUBESHARK_BINARY set but file not found: %s", envBinary)
|
||||
}
|
||||
|
||||
// Build once per test run
|
||||
buildOnce.Do(func() {
|
||||
binaryPath, buildErr = buildBinary(t)
|
||||
})
|
||||
|
||||
if buildErr != nil {
|
||||
t.Fatalf("Failed to build binary: %v", buildErr)
|
||||
}
|
||||
|
||||
return binaryPath
|
||||
}
|
||||
|
||||
// buildBinary compiles the binary and returns its path.
|
||||
func buildBinary(t *testing.T) (string, error) {
|
||||
t.Helper()
|
||||
|
||||
// Find project root (directory containing go.mod)
|
||||
projectRoot, err := findProjectRoot()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("finding project root: %w", err)
|
||||
}
|
||||
|
||||
outputPath := filepath.Join(projectRoot, "bin", binaryName+"_integration_test")
|
||||
|
||||
t.Logf("Building %s binary at %s", binaryName, outputPath)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, "go", "build",
|
||||
"-o", outputPath,
|
||||
filepath.Join(projectRoot, binaryName+".go"),
|
||||
)
|
||||
cmd.Dir = projectRoot
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("build failed: %w\nOutput: %s", err, output)
|
||||
}
|
||||
|
||||
return outputPath, nil
|
||||
}
|
||||
|
||||
// findProjectRoot locates the project root by finding go.mod
|
||||
func findProjectRoot() (string, error) {
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for {
|
||||
if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil {
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
parent := filepath.Dir(dir)
|
||||
if parent == dir {
|
||||
return "", fmt.Errorf("could not find go.mod in any parent directory")
|
||||
}
|
||||
dir = parent
|
||||
}
|
||||
}
|
||||
|
||||
// runKubeshark executes the kubeshark binary with the given arguments.
|
||||
// Returns combined stdout/stderr and any error.
|
||||
func runKubeshark(t *testing.T, binary string, args ...string) (string, error) {
|
||||
t.Helper()
|
||||
return runKubesharkWithTimeout(t, binary, defaultTimeout, args...)
|
||||
}
|
||||
|
||||
// runKubesharkWithTimeout executes the kubeshark binary with a custom timeout.
|
||||
func runKubesharkWithTimeout(t *testing.T, binary string, timeout time.Duration, args ...string) (string, error) {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
t.Logf("Running: %s %s", binary, strings.Join(args, " "))
|
||||
|
||||
cmd := exec.CommandContext(ctx, binary, args...)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
|
||||
output := stdout.String()
|
||||
if stderr.Len() > 0 {
|
||||
output += "\n[stderr]\n" + stderr.String()
|
||||
}
|
||||
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
return output, fmt.Errorf("command timed out after %v", timeout)
|
||||
}
|
||||
|
||||
return output, err
|
||||
}
|
||||
|
||||
// cleanupKubeshark ensures Kubeshark is not running in the cluster.
|
||||
func cleanupKubeshark(t *testing.T, binary string) {
|
||||
t.Helper()
|
||||
|
||||
if os.Getenv("INTEGRATION_SKIP_CLEANUP") == "true" {
|
||||
t.Log("Skipping cleanup (INTEGRATION_SKIP_CLEANUP=true)")
|
||||
return
|
||||
}
|
||||
|
||||
t.Log("Cleaning up any existing Kubeshark installation...")
|
||||
|
||||
// Run clean command, ignore errors (might not be installed)
|
||||
_, _ = runKubeshark(t, binary, "clean")
|
||||
|
||||
// Wait a moment for resources to be deleted
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
// waitForKubesharkReady waits for Kubeshark to be ready after starting.
|
||||
func waitForKubesharkReady(t *testing.T, binary string, timeout time.Duration) error {
|
||||
t.Helper()
|
||||
|
||||
t.Log("Waiting for Kubeshark to be ready...")
|
||||
|
||||
deadline := time.Now().Add(timeout)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
// Check if pods are running
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
cmd := exec.CommandContext(ctx, "kubectl", "get", "pods", "-l", "app.kubernetes.io/name=kubeshark", "-o", "jsonpath={.items[*].status.phase}")
|
||||
output, err := cmd.Output()
|
||||
cancel()
|
||||
|
||||
if err == nil && strings.Contains(string(output), "Running") {
|
||||
t.Log("Kubeshark is ready")
|
||||
return nil
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
return fmt.Errorf("timeout waiting for Kubeshark to be ready")
|
||||
}
|
||||
|
||||
// isKubesharkRunning checks if Kubeshark is currently running in the cluster.
|
||||
func isKubesharkRunning(t *testing.T) bool {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, "kubectl", "get", "pods", "-l", "app.kubernetes.io/name=kubeshark", "-o", "name")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return strings.TrimSpace(string(output)) != ""
|
||||
}
|
||||
529
integration/mcp_test.go
Normal file
529
integration/mcp_test.go
Normal file
@@ -0,0 +1,529 @@
|
||||
//go:build integration
|
||||
|
||||
package integration
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MCPRequest represents a JSON-RPC request
|
||||
type MCPRequest struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
ID int `json:"id"`
|
||||
Method string `json:"method"`
|
||||
Params interface{} `json:"params,omitempty"`
|
||||
}
|
||||
|
||||
// MCPResponse represents a JSON-RPC response
|
||||
type MCPResponse struct {
|
||||
JSONRPC string `json:"jsonrpc"`
|
||||
ID int `json:"id"`
|
||||
Result json.RawMessage `json:"result,omitempty"`
|
||||
Error *MCPError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// MCPError represents a JSON-RPC error
|
||||
type MCPError struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// mcpSession represents a running MCP server session
|
||||
type mcpSession struct {
|
||||
cmd *exec.Cmd
|
||||
stdin io.WriteCloser
|
||||
stdout *bufio.Reader
|
||||
stderr *bytes.Buffer // Captured stderr for debugging
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
// startMCPSession starts an MCP server and returns a session for sending requests.
|
||||
// By default, starts in read-only mode (no --allow-destructive).
|
||||
func startMCPSession(t *testing.T, binary string, args ...string) *mcpSession {
|
||||
t.Helper()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
cmdArgs := append([]string{"mcp"}, args...)
|
||||
cmd := exec.CommandContext(ctx, binary, cmdArgs...)
|
||||
|
||||
stdin, err := cmd.StdinPipe()
|
||||
if err != nil {
|
||||
cancel()
|
||||
t.Fatalf("Failed to create stdin pipe: %v", err)
|
||||
}
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
cancel()
|
||||
t.Fatalf("Failed to create stdout pipe: %v", err)
|
||||
}
|
||||
|
||||
// Capture stderr for debugging
|
||||
var stderrBuf bytes.Buffer
|
||||
cmd.Stderr = &stderrBuf
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
cancel()
|
||||
t.Fatalf("Failed to start MCP server: %v", err)
|
||||
}
|
||||
|
||||
return &mcpSession{
|
||||
cmd: cmd,
|
||||
stdin: stdin,
|
||||
stdout: bufio.NewReader(stdout),
|
||||
stderr: &stderrBuf,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// startMCPSessionWithDestructive starts an MCP server with --allow-destructive flag.
|
||||
func startMCPSessionWithDestructive(t *testing.T, binary string, args ...string) *mcpSession {
|
||||
t.Helper()
|
||||
allArgs := append([]string{"--allow-destructive"}, args...)
|
||||
return startMCPSession(t, binary, allArgs...)
|
||||
}
|
||||
|
||||
// sendRequest sends a JSON-RPC request and returns the response (30s timeout).
|
||||
func (s *mcpSession) sendRequest(t *testing.T, req MCPRequest) MCPResponse {
|
||||
t.Helper()
|
||||
return s.sendRequestWithTimeout(t, req, 30*time.Second)
|
||||
}
|
||||
|
||||
// sendRequestWithTimeout sends a JSON-RPC request with a custom timeout.
|
||||
func (s *mcpSession) sendRequestWithTimeout(t *testing.T, req MCPRequest, timeout time.Duration) MCPResponse {
|
||||
t.Helper()
|
||||
|
||||
reqBytes, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to marshal request: %v", err)
|
||||
}
|
||||
|
||||
t.Logf("Sending: %s", string(reqBytes))
|
||||
|
||||
if _, err := s.stdin.Write(append(reqBytes, '\n')); err != nil {
|
||||
t.Fatalf("Failed to write request: %v", err)
|
||||
}
|
||||
|
||||
// Read response with timeout
|
||||
responseChan := make(chan string, 1)
|
||||
errChan := make(chan error, 1)
|
||||
|
||||
go func() {
|
||||
line, err := s.stdout.ReadString('\n')
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
responseChan <- line
|
||||
}()
|
||||
|
||||
select {
|
||||
case line := <-responseChan:
|
||||
t.Logf("Received: %s", strings.TrimSpace(line))
|
||||
var resp MCPResponse
|
||||
if err := json.Unmarshal([]byte(line), &resp); err != nil {
|
||||
t.Fatalf("Failed to unmarshal response: %v\nResponse: %s", err, line)
|
||||
}
|
||||
return resp
|
||||
case err := <-errChan:
|
||||
t.Fatalf("Failed to read response: %v", err)
|
||||
return MCPResponse{}
|
||||
case <-time.After(timeout):
|
||||
t.Fatalf("Timeout waiting for MCP response after %v", timeout)
|
||||
return MCPResponse{}
|
||||
}
|
||||
}
|
||||
|
||||
// callTool invokes an MCP tool and returns the response (30s timeout).
|
||||
func (s *mcpSession) callTool(t *testing.T, id int, toolName string, args map[string]interface{}) MCPResponse {
|
||||
t.Helper()
|
||||
return s.callToolWithTimeout(t, id, toolName, args, 30*time.Second)
|
||||
}
|
||||
|
||||
// callToolWithTimeout invokes an MCP tool with a custom timeout.
|
||||
func (s *mcpSession) callToolWithTimeout(t *testing.T, id int, toolName string, args map[string]interface{}, timeout time.Duration) MCPResponse {
|
||||
t.Helper()
|
||||
|
||||
return s.sendRequestWithTimeout(t, MCPRequest{
|
||||
JSONRPC: "2.0",
|
||||
ID: id,
|
||||
Method: "tools/call",
|
||||
Params: map[string]interface{}{
|
||||
"name": toolName,
|
||||
"arguments": args,
|
||||
},
|
||||
}, timeout)
|
||||
}
|
||||
|
||||
// close terminates the MCP session.
|
||||
func (s *mcpSession) close() {
|
||||
s.cancel()
|
||||
_ = s.cmd.Wait()
|
||||
}
|
||||
|
||||
// getStderr returns any captured stderr output (useful for debugging failures).
|
||||
func (s *mcpSession) getStderr() string {
|
||||
if s.stderr == nil {
|
||||
return ""
|
||||
}
|
||||
return s.stderr.String()
|
||||
}
|
||||
|
||||
// initialize sends the MCP initialize request and returns the response.
|
||||
func (s *mcpSession) initialize(t *testing.T, id int) MCPResponse {
|
||||
t.Helper()
|
||||
return s.sendRequest(t, MCPRequest{
|
||||
JSONRPC: "2.0",
|
||||
ID: id,
|
||||
Method: "initialize",
|
||||
Params: map[string]interface{}{
|
||||
"protocolVersion": "2024-11-05",
|
||||
"capabilities": map[string]interface{}{},
|
||||
"clientInfo": map[string]interface{}{"name": "test", "version": "1.0"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestMCP_Initialize tests the MCP initialization handshake.
|
||||
func TestMCP_Initialize(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
resp := session.initialize(t, 1)
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Initialize failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
var result map[string]interface{}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if _, ok := result["capabilities"]; !ok {
|
||||
t.Error("Response missing capabilities")
|
||||
}
|
||||
if _, ok := result["serverInfo"]; !ok {
|
||||
t.Error("Response missing serverInfo")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_ToolsList_ReadOnly tests that tools/list returns only safe tools in read-only mode.
|
||||
func TestMCP_ToolsList_ReadOnly(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.sendRequest(t, MCPRequest{JSONRPC: "2.0", ID: 2, Method: "tools/list"})
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("tools/list failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Tools []struct{ Name string `json:"name"` } `json:"tools"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
toolNames := make(map[string]bool)
|
||||
for _, tool := range result.Tools {
|
||||
toolNames[tool.Name] = true
|
||||
}
|
||||
|
||||
if !toolNames["check_kubeshark_status"] {
|
||||
t.Error("Missing expected tool: check_kubeshark_status")
|
||||
}
|
||||
if toolNames["start_kubeshark"] || toolNames["stop_kubeshark"] {
|
||||
t.Error("Destructive tools should not be available in read-only mode")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_ToolsList_WithDestructive tests that tools/list includes destructive tools when flag is set.
|
||||
func TestMCP_ToolsList_WithDestructive(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSessionWithDestructive(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.sendRequest(t, MCPRequest{JSONRPC: "2.0", ID: 2, Method: "tools/list"})
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("tools/list failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Tools []struct{ Name string `json:"name"` } `json:"tools"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
toolNames := make(map[string]bool)
|
||||
for _, tool := range result.Tools {
|
||||
toolNames[tool.Name] = true
|
||||
}
|
||||
|
||||
for _, expected := range []string{"check_kubeshark_status", "start_kubeshark", "stop_kubeshark"} {
|
||||
if !toolNames[expected] {
|
||||
t.Errorf("Missing expected tool: %s", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_CheckKubesharkStatus_NotRunning tests check_kubeshark_status when Kubeshark is not running.
|
||||
func TestMCP_CheckKubesharkStatus_NotRunning(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
cleanupKubeshark(t, binary)
|
||||
|
||||
session := startMCPSession(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.callTool(t, 2, "check_kubeshark_status", nil)
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("check_kubeshark_status failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
var result struct {
|
||||
Content []struct{ Text string `json:"text"` } `json:"content"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if len(result.Content) == 0 || (!strings.Contains(result.Content[0].Text, "not running") && !strings.Contains(result.Content[0].Text, "NOT")) {
|
||||
t.Errorf("Expected 'not running' status")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_StartKubeshark tests the start_kubeshark tool.
|
||||
func TestMCP_StartKubeshark(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping in short mode")
|
||||
}
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
cleanupKubeshark(t, binary)
|
||||
t.Cleanup(func() { cleanupKubeshark(t, binary) })
|
||||
|
||||
session := startMCPSessionWithDestructive(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.callToolWithTimeout(t, 2, "start_kubeshark", nil, 3*time.Minute)
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("start_kubeshark failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
if !isKubesharkRunning(t) {
|
||||
t.Error("Kubeshark should be running after start_kubeshark")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_StartKubeshark_WithoutFlag tests that start_kubeshark fails without --allow-destructive.
|
||||
func TestMCP_StartKubeshark_WithoutFlag(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.callTool(t, 2, "start_kubeshark", nil)
|
||||
|
||||
var result struct {
|
||||
Content []struct{ Text string `json:"text"` } `json:"content"`
|
||||
IsError bool `json:"isError"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if !result.IsError {
|
||||
t.Error("Expected isError=true without --allow-destructive")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_StopKubeshark tests the stop_kubeshark tool.
|
||||
func TestMCP_StopKubeshark(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping in short mode")
|
||||
}
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
|
||||
session := startMCPSessionWithDestructive(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 0)
|
||||
|
||||
// Start Kubeshark if not running
|
||||
if !isKubesharkRunning(t) {
|
||||
resp := session.callToolWithTimeout(t, 1, "start_kubeshark", nil, 2*time.Minute)
|
||||
if resp.Error != nil {
|
||||
t.Skipf("Could not start Kubeshark: %v", resp.Error.Message)
|
||||
}
|
||||
}
|
||||
|
||||
resp := session.callToolWithTimeout(t, 2, "stop_kubeshark", nil, 2*time.Minute)
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("stop_kubeshark failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
if isKubesharkRunning(t) {
|
||||
t.Error("Kubeshark should not be running after stop_kubeshark")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_StopKubeshark_WithoutFlag tests that stop_kubeshark fails without --allow-destructive.
|
||||
func TestMCP_StopKubeshark_WithoutFlag(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t))
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.callTool(t, 2, "stop_kubeshark", nil)
|
||||
|
||||
var result struct {
|
||||
IsError bool `json:"isError"`
|
||||
}
|
||||
if err := json.Unmarshal(resp.Result, &result); err != nil {
|
||||
t.Fatalf("Failed to parse result: %v", err)
|
||||
}
|
||||
|
||||
if !result.IsError {
|
||||
t.Error("Expected isError=true without --allow-destructive")
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_FullLifecycle tests the complete lifecycle: check -> start -> check -> stop -> check
|
||||
func TestMCP_FullLifecycle(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Skipping in short mode")
|
||||
}
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
cleanupKubeshark(t, binary)
|
||||
|
||||
session := startMCPSessionWithDestructive(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
|
||||
// Check -> Start -> Check -> Stop -> Check
|
||||
if resp := session.callTool(t, 2, "check_kubeshark_status", nil); resp.Error != nil {
|
||||
t.Fatalf("Initial status check failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
if resp := session.callToolWithTimeout(t, 3, "start_kubeshark", nil, 3*time.Minute); resp.Error != nil {
|
||||
t.Fatalf("Start failed: %s", resp.Error.Message)
|
||||
}
|
||||
if err := waitForKubesharkReady(t, binary, startupTimeout); err != nil {
|
||||
t.Fatalf("Kubeshark did not become ready: %v", err)
|
||||
}
|
||||
|
||||
if resp := session.callTool(t, 4, "check_kubeshark_status", nil); resp.Error != nil {
|
||||
t.Fatalf("Status check after start failed: %s", resp.Error.Message)
|
||||
}
|
||||
|
||||
if resp := session.callToolWithTimeout(t, 5, "stop_kubeshark", nil, 2*time.Minute); resp.Error != nil {
|
||||
t.Fatalf("Stop failed: %s", resp.Error.Message)
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
if resp := session.callTool(t, 6, "check_kubeshark_status", nil); resp.Error != nil {
|
||||
t.Fatalf("Final status check failed: %s", resp.Error.Message)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_APIToolsRequireKubeshark tests that API tools return helpful errors when Kubeshark isn't running.
|
||||
func TestMCP_APIToolsRequireKubeshark(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
binary := getKubesharkBinary(t)
|
||||
cleanupKubeshark(t, binary)
|
||||
|
||||
session := startMCPSession(t, binary)
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
|
||||
for i, tool := range []string{"list_workloads", "list_api_calls", "get_api_stats"} {
|
||||
resp := session.callTool(t, i+2, tool, nil)
|
||||
// Either error or helpful message is acceptable
|
||||
if resp.Error != nil {
|
||||
t.Logf("%s returned error (expected): %s", tool, resp.Error.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestMCP_SetFlags tests that --set flags are passed correctly.
|
||||
func TestMCP_SetFlags(t *testing.T) {
|
||||
requireKubernetesCluster(t)
|
||||
session := startMCPSession(t, getKubesharkBinary(t), "--set", "tap.namespaces={default}")
|
||||
defer session.close()
|
||||
|
||||
session.initialize(t, 1)
|
||||
resp := session.sendRequest(t, MCPRequest{JSONRPC: "2.0", ID: 2, Method: "tools/list"})
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("tools/list failed with --set flags: %s", resp.Error.Message)
|
||||
}
|
||||
}
|
||||
|
||||
// BenchmarkMCP_CheckStatus benchmarks the check_kubeshark_status tool.
|
||||
func BenchmarkMCP_CheckStatus(b *testing.B) {
|
||||
if testing.Short() {
|
||||
b.Skip("Skipping benchmark in short mode")
|
||||
}
|
||||
if !hasKubernetesCluster() {
|
||||
b.Skip("Skipping: no Kubernetes cluster available")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
cmd := exec.CommandContext(ctx, getKubesharkBinary(b), "mcp")
|
||||
stdin, _ := cmd.StdinPipe()
|
||||
stdout, _ := cmd.StdoutPipe()
|
||||
reader := bufio.NewReader(stdout)
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
b.Fatalf("Failed to start MCP: %v", err)
|
||||
}
|
||||
defer func() { cancel(); _ = cmd.Wait() }()
|
||||
|
||||
// Initialize
|
||||
initReq, _ := json.Marshal(MCPRequest{
|
||||
JSONRPC: "2.0", ID: 0, Method: "initialize",
|
||||
Params: map[string]interface{}{
|
||||
"protocolVersion": "2024-11-05",
|
||||
"capabilities": map[string]interface{}{},
|
||||
"clientInfo": map[string]interface{}{"name": "bench", "version": "1.0"},
|
||||
},
|
||||
})
|
||||
_, _ = stdin.Write(append(initReq, '\n'))
|
||||
_, _ = reader.ReadString('\n')
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
req, _ := json.Marshal(MCPRequest{
|
||||
JSONRPC: "2.0", ID: i + 1, Method: "tools/call",
|
||||
Params: map[string]interface{}{"name": "check_kubeshark_status", "arguments": map[string]interface{}{}},
|
||||
})
|
||||
if _, err := stdin.Write(append(req, '\n')); err != nil {
|
||||
b.Fatalf("Write failed: %v", err)
|
||||
}
|
||||
if _, err := reader.ReadString('\n'); err != nil {
|
||||
b.Fatalf("Read failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -8,5 +8,5 @@ const (
|
||||
HubServiceName = HubPodName
|
||||
K8sAllNamespaces = ""
|
||||
MinKubernetesServerVersion = "1.16.0"
|
||||
AppLabelKey = "app.kubeshark.co/app"
|
||||
AppLabelKey = "app.kubeshark.com/app"
|
||||
)
|
||||
|
||||
@@ -67,7 +67,10 @@ func (h *Helm) Install() (rel *release.Release, err error) {
|
||||
client.Namespace = h.releaseNamespace
|
||||
client.ReleaseName = h.releaseName
|
||||
|
||||
chartPath := os.Getenv(fmt.Sprintf("%s_HELM_CHART_PATH", strings.ToUpper(misc.Program)))
|
||||
chartPath := config.Config.Tap.Release.HelmChartPath
|
||||
if chartPath == "" {
|
||||
chartPath = os.Getenv(fmt.Sprintf("%s_HELM_CHART_PATH", strings.ToUpper(misc.Program)))
|
||||
}
|
||||
if chartPath == "" {
|
||||
var chartURL string
|
||||
chartURL, err = repo.FindChartInRepoURL(h.repo, h.releaseName, "", "", "", "", getter.All(&cli.EnvSettings{}))
|
||||
|
||||
@@ -4,18 +4,17 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-hub-network-policy
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
@@ -34,10 +33,10 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-front-network-policy
|
||||
@@ -45,7 +44,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: front
|
||||
app.kubeshark.com/app: front
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
@@ -61,10 +60,10 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-dex-network-policy
|
||||
@@ -72,7 +71,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: dex
|
||||
app.kubeshark.com/app: dex
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
@@ -88,10 +87,10 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-worker-network-policy
|
||||
@@ -99,7 +98,7 @@ metadata:
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
@@ -117,12 +116,11 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-service-account
|
||||
namespace: default
|
||||
---
|
||||
@@ -133,11 +131,11 @@ metadata:
|
||||
name: kubeshark-secret
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
LICENSE: ''
|
||||
@@ -152,11 +150,11 @@ metadata:
|
||||
name: kubeshark-saml-x509-crt-secret
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
AUTH_SAML_X509_CRT: |
|
||||
@@ -168,11 +166,11 @@ metadata:
|
||||
name: kubeshark-saml-x509-key-secret
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
AUTH_SAML_X509_KEY: |
|
||||
@@ -184,10 +182,10 @@ metadata:
|
||||
name: kubeshark-nginx-config-map
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
data:
|
||||
default.conf: |
|
||||
@@ -211,8 +209,10 @@ data:
|
||||
proxy_set_header Authorization $http_authorization;
|
||||
proxy_pass_header Authorization;
|
||||
proxy_connect_timeout 4s;
|
||||
proxy_read_timeout 120s;
|
||||
proxy_send_timeout 12s;
|
||||
# Disable buffering for gRPC/Connect streaming
|
||||
client_max_body_size 0;
|
||||
proxy_request_buffering off;
|
||||
proxy_buffering off;
|
||||
proxy_pass_request_headers on;
|
||||
}
|
||||
|
||||
@@ -247,18 +247,19 @@ metadata:
|
||||
name: kubeshark-config-map
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
data:
|
||||
POD_REGEX: '.*'
|
||||
NAMESPACES: ''
|
||||
EXCLUDED_NAMESPACES: ''
|
||||
BPF_OVERRIDE: ''
|
||||
STOPPED: 'false'
|
||||
DISSECTION_ENABLED: 'true'
|
||||
CAPTURE_SELF: 'false'
|
||||
SCRIPTING_SCRIPTS: '{}'
|
||||
SCRIPTING_ACTIVE_SCRIPTS: ''
|
||||
INGRESS_ENABLED: 'false'
|
||||
@@ -268,7 +269,7 @@ data:
|
||||
AUTH_TYPE: 'default'
|
||||
AUTH_SAML_IDP_METADATA_URL: ''
|
||||
AUTH_SAML_ROLE_ATTRIBUTE: 'role'
|
||||
AUTH_SAML_ROLES: '{"admin":{"canDownloadPCAP":true,"canStopTrafficCapturing":true,"canUpdateTargetedPods":true,"canUseScripting":true,"filter":"","scriptingPermissions":{"canActivate":true,"canDelete":true,"canSave":true},"showAdminConsoleLink":true}}'
|
||||
AUTH_SAML_ROLES: '{"admin":{"canControlDissection":true,"canDownloadPCAP":true,"canStopTrafficCapturing":true,"canUpdateTargetedPods":true,"canUseScripting":true,"filter":"","scriptingPermissions":{"canActivate":true,"canDelete":true,"canSave":true},"showAdminConsoleLink":true}}'
|
||||
AUTH_OIDC_ISSUER: 'not set'
|
||||
AUTH_OIDC_REFRESH_TOKEN_LIFETIME: '3960h'
|
||||
AUTH_OIDC_STATE_PARAM_EXPIRY: '10m'
|
||||
@@ -278,38 +279,38 @@ data:
|
||||
TARGETED_PODS_UPDATE_DISABLED: ''
|
||||
PRESET_FILTERS_CHANGING_ENABLED: 'true'
|
||||
RECORDING_DISABLED: ''
|
||||
STOP_TRAFFIC_CAPTURING_DISABLED: 'false'
|
||||
DISSECTION_CONTROL_ENABLED: 'true'
|
||||
GLOBAL_FILTER: ""
|
||||
DEFAULT_FILTER: "!dns and !error"
|
||||
DEFAULT_FILTER: ""
|
||||
TRAFFIC_SAMPLE_RATE: '100'
|
||||
JSON_TTL: '5m'
|
||||
PCAP_TTL: '10s'
|
||||
PCAP_ERROR_TTL: '60s'
|
||||
PCAP_TTL: '0'
|
||||
PCAP_ERROR_TTL: '0'
|
||||
TIMEZONE: ' '
|
||||
CLOUD_LICENSE_ENABLED: 'true'
|
||||
AI_ASSISTANT_ENABLED: 'true'
|
||||
DUPLICATE_TIMEFRAME: '200ms'
|
||||
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,ws,ldap,radius,diameter'
|
||||
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,ws,ldap,radius,diameter,udp-flow,tcp-flow,udp-conn,tcp-conn'
|
||||
CUSTOM_MACROS: '{"https":"tls and (http or http2)"}'
|
||||
DISSECTORS_UPDATING_ENABLED: 'true'
|
||||
DETECT_DUPLICATES: 'false'
|
||||
PCAP_DUMP_ENABLE: 'true'
|
||||
PCAP_DUMP_ENABLE: 'false'
|
||||
PCAP_TIME_INTERVAL: '1m'
|
||||
PCAP_MAX_TIME: '1h'
|
||||
PCAP_MAX_SIZE: '500MB'
|
||||
PORT_MAPPING: '{"amqp":[5671,5672],"diameter":[3868],"http":[80,443,8080],"kafka":[9092],"ldap":[389],"redis":[6379]}'
|
||||
RAW_CAPTURE_ENABLED: 'true'
|
||||
RAW_CAPTURE_STORAGE_SIZE: '1Gi'
|
||||
---
|
||||
# Source: kubeshark/templates/02-cluster-role.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-cluster-role-default
|
||||
namespace: default
|
||||
rules:
|
||||
@@ -352,12 +353,11 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-cluster-role-binding-default
|
||||
namespace: default
|
||||
roleRef:
|
||||
@@ -374,10 +374,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-self-config-role
|
||||
@@ -412,16 +412,22 @@ rules:
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
- "*"
|
||||
---
|
||||
# Source: kubeshark/templates/03-cluster-role-binding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-self-config-role-binding
|
||||
@@ -440,13 +446,12 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-hub
|
||||
namespace: default
|
||||
spec:
|
||||
@@ -455,7 +460,7 @@ spec:
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
type: ClusterIP
|
||||
---
|
||||
# Source: kubeshark/templates/07-front-service.yaml
|
||||
@@ -463,12 +468,11 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-front
|
||||
namespace: default
|
||||
spec:
|
||||
@@ -477,7 +481,7 @@ spec:
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app.kubeshark.co/app: front
|
||||
app.kubeshark.com/app: front
|
||||
type: ClusterIP
|
||||
---
|
||||
# Source: kubeshark/templates/15-worker-service-metrics.yaml
|
||||
@@ -485,10 +489,10 @@ kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
@@ -497,11 +501,11 @@ metadata:
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
app.kubeshark.co/app: worker
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
app.kubeshark.com/app: worker
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
ports:
|
||||
- name: metrics
|
||||
@@ -514,10 +518,10 @@ kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
@@ -526,11 +530,11 @@ metadata:
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
ports:
|
||||
- name: metrics
|
||||
@@ -543,30 +547,29 @@ apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
sidecar.istio.io/inject: "false"
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: worker
|
||||
app.kubeshark.com/app: worker
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: worker
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
app.kubeshark.com/app: worker
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: kubeshark
|
||||
@@ -576,7 +579,7 @@ spec:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mkdir -p /sys/fs/bpf && mount | grep -q '/sys/fs/bpf' || mount -t bpf bpf /sys/fs/bpf
|
||||
image: 'docker.io/kubeshark/worker:v52.7.5'
|
||||
image: 'docker.io/kubeshark/worker:v53.1'
|
||||
imagePullPolicy: Always
|
||||
name: mount-bpf
|
||||
securityContext:
|
||||
@@ -605,7 +608,17 @@ spec:
|
||||
- 'auto'
|
||||
- -staletimeout
|
||||
- '30'
|
||||
image: 'docker.io/kubeshark/worker:v52.7.5'
|
||||
- -tcp-flow-full-timeout
|
||||
- '1200'
|
||||
- -udp-flow-full-timeout
|
||||
- '1200'
|
||||
- -storage-size
|
||||
- '10Gi'
|
||||
- -capture-db-max-size
|
||||
- '500Mi'
|
||||
- -cloud-api-url
|
||||
- 'https://api.kubeshark.com'
|
||||
image: 'docker.io/kubeshark/worker:v53.1'
|
||||
imagePullPolicy: Always
|
||||
name: sniffer
|
||||
ports:
|
||||
@@ -625,8 +638,6 @@ spec:
|
||||
value: '10000'
|
||||
- name: TCP_STREAM_CHANNEL_TIMEOUT_SHOW
|
||||
value: 'false'
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubeshark.co'
|
||||
- name: PROFILING_ENABLED
|
||||
value: 'false'
|
||||
- name: SENTRY_ENABLED
|
||||
@@ -679,7 +690,7 @@ spec:
|
||||
- -disable-tls-log
|
||||
- -loglevel
|
||||
- 'warning'
|
||||
image: 'docker.io/kubeshark/worker:v52.7.5'
|
||||
image: 'docker.io/kubeshark/worker:v53.1'
|
||||
imagePullPolicy: Always
|
||||
name: tracer
|
||||
env:
|
||||
@@ -763,37 +774,36 @@ spec:
|
||||
name: root
|
||||
- name: data
|
||||
emptyDir:
|
||||
sizeLimit: 5Gi
|
||||
sizeLimit: 10Gi
|
||||
---
|
||||
# Source: kubeshark/templates/04-hub-deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-hub
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: hub
|
||||
app.kubeshark.com/app: hub
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: hub
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
@@ -806,6 +816,18 @@ spec:
|
||||
- "8080"
|
||||
- -loglevel
|
||||
- 'warning'
|
||||
- -capture-stop-after
|
||||
- "5m"
|
||||
- -snapshot-size-limit
|
||||
- ''
|
||||
- -dissector-image
|
||||
- 'docker.io/kubeshark/worker:v53.1'
|
||||
- -dissector-cpu
|
||||
- '1'
|
||||
- -dissector-memory
|
||||
- '4Gi'
|
||||
- -cloud-api-url
|
||||
- 'https://api.kubeshark.com'
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
@@ -819,11 +841,9 @@ spec:
|
||||
value: 'false'
|
||||
- name: SENTRY_ENVIRONMENT
|
||||
value: 'production'
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubeshark.co'
|
||||
- name: PROFILING_ENABLED
|
||||
value: 'false'
|
||||
image: 'docker.io/kubeshark/hub:v52.7.5'
|
||||
image: 'docker.io/kubeshark/hub:v53.1'
|
||||
imagePullPolicy: Always
|
||||
readinessProbe:
|
||||
periodSeconds: 5
|
||||
@@ -856,6 +876,8 @@ spec:
|
||||
- name: saml-x509-volume
|
||||
mountPath: "/etc/saml/x509"
|
||||
readOnly: true
|
||||
- name: snapshots-volume
|
||||
mountPath: "/app/data/snapshots"
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
@@ -879,36 +901,38 @@ spec:
|
||||
items:
|
||||
- key: AUTH_SAML_X509_KEY
|
||||
path: kubeshark.key
|
||||
- name: snapshots-volume
|
||||
emptyDir:
|
||||
sizeLimit: 20Gi
|
||||
---
|
||||
# Source: kubeshark/templates/06-front-deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: front
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
app.kubeshark.com/app: front
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-front
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1 # Set the desired number of replicas
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubeshark.co/app: front
|
||||
app.kubeshark.com/app: front
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.co/app: front
|
||||
helm.sh/chart: kubeshark-52.7.5
|
||||
app.kubeshark.com/app: front
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.7.5"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
containers:
|
||||
@@ -919,10 +943,14 @@ spec:
|
||||
value: 'default'
|
||||
- name: REACT_APP_COMPLETE_STREAMING_ENABLED
|
||||
value: 'true'
|
||||
- name: REACT_APP_STREAMING_TYPE
|
||||
value: 'connect-rpc'
|
||||
- name: REACT_APP_AUTH_SAML_IDP_METADATA_URL
|
||||
value: ' '
|
||||
- name: REACT_APP_TIMEZONE
|
||||
value: ' '
|
||||
- name: REACT_APP_SCRIPTING_HIDDEN
|
||||
value: 'true'
|
||||
- name: REACT_APP_SCRIPTING_DISABLED
|
||||
value: 'false'
|
||||
- name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED
|
||||
@@ -933,21 +961,25 @@ spec:
|
||||
value: 'true'
|
||||
- name: REACT_APP_RECORDING_DISABLED
|
||||
value: 'false'
|
||||
- name: REACT_APP_STOP_TRAFFIC_CAPTURING_DISABLED
|
||||
value: 'false'
|
||||
- name: 'REACT_APP_CLOUD_LICENSE_ENABLED'
|
||||
- name: REACT_APP_DISSECTION_ENABLED
|
||||
value: 'true'
|
||||
- name: 'REACT_APP_AI_ASSISTANT_ENABLED'
|
||||
- name: REACT_APP_DISSECTION_CONTROL_ENABLED
|
||||
value: 'true'
|
||||
- name: 'REACT_APP_CLOUD_LICENSE_ENABLED'
|
||||
value: 'true'
|
||||
- name: REACT_APP_SUPPORT_CHAT_ENABLED
|
||||
value: 'false'
|
||||
- name: REACT_APP_BETA_ENABLED
|
||||
value: 'false'
|
||||
- name: REACT_APP_DISSECTORS_UPDATING_ENABLED
|
||||
value: 'true'
|
||||
- name: REACT_APP_RAW_CAPTURE_ENABLED
|
||||
value: 'true'
|
||||
- name: REACT_APP_SENTRY_ENABLED
|
||||
value: 'false'
|
||||
- name: REACT_APP_SENTRY_ENVIRONMENT
|
||||
value: 'production'
|
||||
image: 'docker.io/kubeshark/front:v52.7.5'
|
||||
image: 'docker.io/kubeshark/front:v53.1'
|
||||
imagePullPolicy: Always
|
||||
name: kubeshark-front
|
||||
livenessProbe:
|
||||
|
||||
@@ -5,7 +5,7 @@ metadata:
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: info@kubeshark.co
|
||||
email: info@kubeshark.com
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod-key
|
||||
solvers:
|
||||
|
||||
204
mcp/README.md
Normal file
204
mcp/README.md
Normal file
@@ -0,0 +1,204 @@
|
||||
# Kubeshark MCP Server
|
||||
|
||||
[Kubeshark](https://kubeshark.com) MCP (Model Context Protocol) server enables AI assistants like Claude Desktop, Cursor, and other MCP-compatible clients to query real-time Kubernetes network traffic.
|
||||
|
||||
## Features
|
||||
|
||||
- **L7 API Traffic Analysis**: Query HTTP, gRPC, Redis, Kafka, DNS transactions
|
||||
- **L4 Network Flows**: View TCP/UDP flows with traffic statistics
|
||||
- **Cluster Management**: Start/stop Kubeshark deployments (with safety controls)
|
||||
- **PCAP Snapshots**: Create and export network captures
|
||||
- **Built-in Prompts**: Pre-configured prompts for common analysis tasks
|
||||
|
||||
## Installation
|
||||
|
||||
### 1. Install Kubeshark CLI
|
||||
|
||||
```bash
|
||||
# macOS
|
||||
brew install kubeshark
|
||||
|
||||
# Linux
|
||||
sh <(curl -Ls https://kubeshark.com/install)
|
||||
|
||||
# Windows (PowerShell)
|
||||
choco install kubeshark
|
||||
```
|
||||
|
||||
Or download from [GitHub Releases](https://github.com/kubeshark/kubeshark/releases).
|
||||
|
||||
### 2. Configure Claude Desktop
|
||||
|
||||
Add to your Claude Desktop configuration:
|
||||
|
||||
**macOS**: `~/Library/Application Support/Claude/claude_desktop_config.json`
|
||||
**Windows**: `%APPDATA%\Claude\claude_desktop_config.json`
|
||||
|
||||
#### URL Mode (Recommended for existing deployments)
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp", "--url", "https://kubeshark.example.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Proxy Mode (Requires kubectl access)
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp", "--kubeconfig", "/path/to/.kube/config"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
or:
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### With Destructive Operations
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp", "--allow-destructive", "--kubeconfig", "/path/to/.kube/config"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Generate Configuration
|
||||
|
||||
Use the CLI to generate configuration:
|
||||
|
||||
```bash
|
||||
kubeshark mcp --mcp-config --url https://kubeshark.example.com
|
||||
```
|
||||
|
||||
## Available Tools
|
||||
|
||||
### Traffic Analysis (All Modes)
|
||||
|
||||
| Tool | Description |
|
||||
|------|-------------|
|
||||
| `list_workloads` | List pods, services, namespaces with observed traffic |
|
||||
| `list_api_calls` | Query L7 API transactions with KFL filtering |
|
||||
| `get_api_call` | Get detailed info about a specific API call |
|
||||
| `get_api_stats` | Get aggregated API statistics |
|
||||
| `list_l4_flows` | List L4 (TCP/UDP) network flows |
|
||||
| `get_l4_flow_summary` | Get L4 connectivity summary |
|
||||
| `list_snapshots` | List all PCAP snapshots |
|
||||
| `create_snapshot` | Create a new PCAP snapshot |
|
||||
| `get_dissection_status` | Check L7 protocol parsing status |
|
||||
| `enable_dissection` | Enable L7 protocol dissection |
|
||||
| `disable_dissection` | Disable L7 protocol dissection |
|
||||
|
||||
### Cluster Management (Proxy Mode Only)
|
||||
|
||||
| Tool | Description | Requires |
|
||||
|------|-------------|----------|
|
||||
| `check_kubeshark_status` | Check if Kubeshark is running | - |
|
||||
| `start_kubeshark` | Deploy Kubeshark to cluster | `--allow-destructive` |
|
||||
| `stop_kubeshark` | Remove Kubeshark from cluster | `--allow-destructive` |
|
||||
|
||||
## Available Prompts
|
||||
|
||||
| Prompt | Description |
|
||||
|--------|-------------|
|
||||
| `analyze_traffic` | Analyze API traffic patterns and identify issues |
|
||||
| `find_errors` | Find and summarize API errors and failures |
|
||||
| `trace_request` | Trace a request path through microservices |
|
||||
| `show_topology` | Show service communication topology |
|
||||
| `latency_analysis` | Analyze latency patterns and identify slow endpoints |
|
||||
| `security_audit` | Audit traffic for security concerns |
|
||||
| `compare_traffic` | Compare traffic patterns between time periods |
|
||||
| `debug_connection` | Debug connectivity issues between services |
|
||||
|
||||
## Example Conversations
|
||||
|
||||
```
|
||||
User: Show me all HTTP 500 errors in the last hour
|
||||
|
||||
Claude: I'll query the API traffic for 500 errors.
|
||||
[Calling list_api_calls with kfl="http and response.status == 500"]
|
||||
|
||||
Found 12 HTTP 500 errors:
|
||||
1. POST /api/checkout -> payment-service (500)
|
||||
Time: 10:23:45 | Latency: 2340ms
|
||||
...
|
||||
```
|
||||
|
||||
```
|
||||
User: What services are communicating with the database?
|
||||
|
||||
Claude: Let me check the L4 flows to the database.
|
||||
[Calling list_l4_flows with dst_filter="postgres"]
|
||||
|
||||
Found 5 services connecting to postgres:5432:
|
||||
- orders-service: 456KB transferred
|
||||
- users-service: 123KB transferred
|
||||
...
|
||||
```
|
||||
|
||||
## CLI Options
|
||||
|
||||
| Option | Description |
|
||||
|--------|-------------|
|
||||
| `--url` | Direct URL to Kubeshark Hub |
|
||||
| `--kubeconfig` | Path to kubeconfig file |
|
||||
| `--allow-destructive` | Enable start/stop operations |
|
||||
| `--list-tools` | List available tools and exit |
|
||||
| `--mcp-config` | Print Claude Desktop config JSON |
|
||||
|
||||
## KFL (Kubeshark Filter Language)
|
||||
|
||||
Query traffic using KFL syntax:
|
||||
|
||||
```
|
||||
# HTTP requests to a specific path
|
||||
http and request.path == "/api/users"
|
||||
|
||||
# Errors only
|
||||
response.status >= 400
|
||||
|
||||
# Specific source pod
|
||||
src.pod.name == "frontend-.*"
|
||||
|
||||
# Multiple conditions
|
||||
http and src.namespace == "default" and response.status == 500
|
||||
```
|
||||
|
||||
## MCP Registry
|
||||
|
||||
Kubeshark is published to the [MCP Registry](https://registry.modelcontextprotocol.io/) automatically on each release.
|
||||
|
||||
The `server.json` in this directory is a reference file. The actual registry metadata (version, SHA256 hashes) is auto-generated during the release workflow. See [`.github/workflows/release.yml`](../.github/workflows/release.yml) for details.
|
||||
|
||||
## Links
|
||||
|
||||
- [Documentation](https://docs.kubeshark.com/en/mcp)
|
||||
- [GitHub](https://github.com/kubeshark/kubeshark)
|
||||
- [Website](https://kubeshark.com)
|
||||
- [MCP Registry](https://registry.modelcontextprotocol.io/)
|
||||
|
||||
## License
|
||||
|
||||
Apache-2.0
|
||||
178
mcp/server.json
Normal file
178
mcp/server.json
Normal file
@@ -0,0 +1,178 @@
|
||||
{
|
||||
"$schema": "https://static.modelcontextprotocol.io/schemas/2025-12-11/server.schema.json",
|
||||
"name": "io.github.kubeshark/mcp",
|
||||
"displayName": "Kubeshark",
|
||||
"description": "Real-time Kubernetes network traffic visibility and API analysis for HTTP, gRPC, Redis, Kafka, DNS.",
|
||||
"icon": "https://raw.githubusercontent.com/kubeshark/assets/refs/heads/master/logo/ico/icon.ico",
|
||||
"repository": {
|
||||
"url": "https://github.com/kubeshark/kubeshark",
|
||||
"source": "github"
|
||||
},
|
||||
"homepage": "https://kubeshark.com",
|
||||
"license": "Apache-2.0",
|
||||
"version": "AUTO_GENERATED_AT_RELEASE",
|
||||
"authors": [
|
||||
{
|
||||
"name": "Kubeshark",
|
||||
"url": "https://kubeshark.com"
|
||||
}
|
||||
],
|
||||
"categories": [
|
||||
"kubernetes",
|
||||
"networking",
|
||||
"observability",
|
||||
"debugging",
|
||||
"security"
|
||||
],
|
||||
"_note": "version and packages.fileSha256 are auto-generated at release time by .github/workflows/release.yml",
|
||||
"tags": ["kubernetes", "network", "traffic", "api", "http", "grpc", "kafka", "redis", "dns", "pcap", "wireshark", "tcpdump", "observability", "debugging", "microservices"],
|
||||
"packages": [
|
||||
{
|
||||
"registryType": "mcpb",
|
||||
"identifier": "https://github.com/kubeshark/kubeshark/releases/download/vX.Y.Z/kubeshark-mcp_darwin_arm64.mcpb",
|
||||
"fileSha256": "AUTO_GENERATED",
|
||||
"transport": { "type": "stdio" }
|
||||
},
|
||||
{
|
||||
"registryType": "mcpb",
|
||||
"identifier": "https://github.com/kubeshark/kubeshark/releases/download/vX.Y.Z/kubeshark-mcp_darwin_amd64.mcpb",
|
||||
"fileSha256": "AUTO_GENERATED",
|
||||
"transport": { "type": "stdio" }
|
||||
},
|
||||
{
|
||||
"registryType": "mcpb",
|
||||
"identifier": "https://github.com/kubeshark/kubeshark/releases/download/vX.Y.Z/kubeshark-mcp_linux_arm64.mcpb",
|
||||
"fileSha256": "AUTO_GENERATED",
|
||||
"transport": { "type": "stdio" }
|
||||
},
|
||||
{
|
||||
"registryType": "mcpb",
|
||||
"identifier": "https://github.com/kubeshark/kubeshark/releases/download/vX.Y.Z/kubeshark-mcp_linux_amd64.mcpb",
|
||||
"fileSha256": "AUTO_GENERATED",
|
||||
"transport": { "type": "stdio" }
|
||||
},
|
||||
{
|
||||
"registryType": "mcpb",
|
||||
"identifier": "https://github.com/kubeshark/kubeshark/releases/download/vX.Y.Z/kubeshark-mcp_windows_amd64.mcpb",
|
||||
"fileSha256": "AUTO_GENERATED",
|
||||
"transport": { "type": "stdio" }
|
||||
}
|
||||
],
|
||||
"tools": [
|
||||
{
|
||||
"name": "check_kubeshark_status",
|
||||
"description": "Check if Kubeshark is currently running in the cluster. Read-only operation.",
|
||||
"mode": "proxy"
|
||||
},
|
||||
{
|
||||
"name": "start_kubeshark",
|
||||
"description": "Deploy Kubeshark to the Kubernetes cluster. Requires --allow-destructive flag.",
|
||||
"mode": "proxy",
|
||||
"destructive": true
|
||||
},
|
||||
{
|
||||
"name": "stop_kubeshark",
|
||||
"description": "Remove Kubeshark from the Kubernetes cluster. Requires --allow-destructive flag.",
|
||||
"mode": "proxy",
|
||||
"destructive": true
|
||||
},
|
||||
{
|
||||
"name": "list_workloads",
|
||||
"description": "List pods, services, namespaces, and nodes with observed L7 traffic.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "list_api_calls",
|
||||
"description": "Query L7 API transactions (HTTP, gRPC, Redis, Kafka, DNS) with KFL filtering.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "get_api_call",
|
||||
"description": "Get detailed information about a specific API call including headers and body.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "get_api_stats",
|
||||
"description": "Get aggregated API statistics and metrics.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "list_l4_flows",
|
||||
"description": "List L4 (TCP/UDP) network flows with traffic statistics.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "get_l4_flow_summary",
|
||||
"description": "Get L4 connectivity summary including top talkers and cross-namespace traffic.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "list_snapshots",
|
||||
"description": "List all PCAP snapshots.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "create_snapshot",
|
||||
"description": "Create a new PCAP snapshot of captured traffic.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "get_dissection_status",
|
||||
"description": "Check L7 protocol parsing status.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "enable_dissection",
|
||||
"description": "Enable L7 protocol dissection.",
|
||||
"mode": "all"
|
||||
},
|
||||
{
|
||||
"name": "disable_dissection",
|
||||
"description": "Disable L7 protocol dissection.",
|
||||
"mode": "all"
|
||||
}
|
||||
],
|
||||
"prompts": [
|
||||
{ "name": "analyze_traffic", "description": "Analyze API traffic patterns and identify issues" },
|
||||
{ "name": "find_errors", "description": "Find and summarize API errors and failures" },
|
||||
{ "name": "trace_request", "description": "Trace a request path through microservices" },
|
||||
{ "name": "show_topology", "description": "Show service communication topology" },
|
||||
{ "name": "latency_analysis", "description": "Analyze latency patterns and identify slow endpoints" },
|
||||
{ "name": "security_audit", "description": "Audit traffic for security concerns" },
|
||||
{ "name": "compare_traffic", "description": "Compare traffic patterns between time periods" },
|
||||
{ "name": "debug_connection", "description": "Debug connectivity issues between services" }
|
||||
],
|
||||
"configuration": {
|
||||
"properties": {
|
||||
"url": {
|
||||
"type": "string",
|
||||
"description": "Direct URL to Kubeshark Hub (e.g., https://kubeshark.example.com). When set, connects directly without kubectl/proxy.",
|
||||
"examples": ["https://kubeshark.example.com", "http://localhost:8899"]
|
||||
},
|
||||
"kubeconfig": {
|
||||
"type": "string",
|
||||
"description": "Path to kubeconfig file for proxy mode.",
|
||||
"examples": ["~/.kube/config", "/path/to/.kube/config"]
|
||||
},
|
||||
"allow-destructive": {
|
||||
"type": "boolean",
|
||||
"description": "Enable destructive operations (start_kubeshark, stop_kubeshark). Default: false for safety.",
|
||||
"default": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"modes": {
|
||||
"url": {
|
||||
"description": "Connect directly to an existing Kubeshark deployment via URL. Cluster management tools are disabled.",
|
||||
"args": ["mcp", "--url", "${url}"]
|
||||
},
|
||||
"proxy": {
|
||||
"description": "Connect via kubectl port-forward. Requires kubeconfig access to the cluster.",
|
||||
"args": ["mcp", "--kubeconfig", "${kubeconfig}"]
|
||||
},
|
||||
"proxy-destructive": {
|
||||
"description": "Proxy mode with destructive operations enabled.",
|
||||
"args": ["mcp", "--kubeconfig", "${kubeconfig}", "--allow-destructive"]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -10,8 +10,8 @@ var (
|
||||
Software = "Kubeshark"
|
||||
Program = "kubeshark"
|
||||
Description = "The API Traffic Analyzer for Kubernetes"
|
||||
Website = "https://kubeshark.co"
|
||||
Email = "info@kubeshark.co"
|
||||
Website = "https://kubeshark.com"
|
||||
Email = "support@kubeshark.com"
|
||||
Ver = "0.0.0"
|
||||
Branch = "master"
|
||||
GitCommitHash = "" // this var is overridden using ldflags in makefile when building
|
||||
|
||||
331
skills/kfl/SKILL.md
Normal file
331
skills/kfl/SKILL.md
Normal file
@@ -0,0 +1,331 @@
|
||||
---
|
||||
name: kfl
|
||||
description: >
|
||||
KFL2 (Kubeshark Filter Language) expert. Use this skill whenever the user needs to
|
||||
write, debug, or optimize KFL filters for Kubeshark traffic queries. Trigger on any
|
||||
mention of KFL, CEL filters, traffic filtering, display filters, query syntax,
|
||||
filter expressions, "how do I filter", "show me only", "find traffic where",
|
||||
protocol-specific queries (HTTP status codes, DNS lookups, Redis commands, Kafka topics),
|
||||
Kubernetes-aware filtering (by namespace, pod, service, label, annotation),
|
||||
L4 connection/flow filters, capture source filters, time-based queries, or any
|
||||
request to slice/search/narrow network traffic in Kubeshark. Also trigger when other
|
||||
skills need help constructing filters — KFL is the query language for all Kubeshark
|
||||
traffic analysis.
|
||||
---
|
||||
|
||||
# KFL2 — Kubeshark Filter Language
|
||||
|
||||
You are a KFL2 expert. KFL2 is built on Google's CEL (Common Expression Language)
|
||||
and is the query language for all Kubeshark traffic analysis. It operates as a
|
||||
**display filter** — it doesn't affect what's captured, only what you see.
|
||||
|
||||
Think of KFL the way you think of SQL for databases or Google search syntax for
|
||||
the web. Kubeshark captures and indexes all cluster traffic; KFL is how you
|
||||
search it.
|
||||
|
||||
For the complete variable and field reference, see `references/kfl2-reference.md`.
|
||||
|
||||
## Core Syntax
|
||||
|
||||
KFL expressions are boolean CEL expressions. An empty filter matches everything.
|
||||
|
||||
### Operators
|
||||
|
||||
| Category | Operators |
|
||||
|----------|-----------|
|
||||
| Comparison | `==`, `!=`, `<`, `<=`, `>`, `>=` |
|
||||
| Logical | `&&`, `\|\|`, `!` |
|
||||
| Arithmetic | `+`, `-`, `*`, `/`, `%` |
|
||||
| Membership | `in` |
|
||||
| Ternary | `condition ? true_val : false_val` |
|
||||
|
||||
### String Functions
|
||||
|
||||
```
|
||||
str.contains(substring) // Substring search
|
||||
str.startsWith(prefix) // Prefix match
|
||||
str.endsWith(suffix) // Suffix match
|
||||
str.matches(regex) // Regex match
|
||||
size(str) // String length
|
||||
```
|
||||
|
||||
### Collection Functions
|
||||
|
||||
```
|
||||
size(collection) // List/map/string length
|
||||
key in map // Key existence
|
||||
map[key] // Value access
|
||||
map_get(map, key, default) // Safe access with default
|
||||
value in list // List membership
|
||||
```
|
||||
|
||||
### Time Functions
|
||||
|
||||
```
|
||||
timestamp("2026-03-14T22:00:00Z") // Parse ISO timestamp
|
||||
duration("5m") // Parse duration
|
||||
now() // Current time (snapshot at filter creation)
|
||||
```
|
||||
|
||||
## Protocol Detection
|
||||
|
||||
Boolean flags that indicate which protocol was detected. Use these as the first
|
||||
filter term — they're fast and narrow the search space immediately.
|
||||
|
||||
| Flag | Protocol | Flag | Protocol |
|
||||
|------|----------|------|----------|
|
||||
| `http` | HTTP/1.1, HTTP/2 | `redis` | Redis |
|
||||
| `dns` | DNS | `kafka` | Kafka |
|
||||
| `tls` | TLS/SSL | `amqp` | AMQP |
|
||||
| `tcp` | TCP | `ldap` | LDAP |
|
||||
| `udp` | UDP | `ws` | WebSocket |
|
||||
| `sctp` | SCTP | `gql` | GraphQL (v1+v2) |
|
||||
| `icmp` | ICMP | `gqlv1` / `gqlv2` | GraphQL version-specific |
|
||||
| `radius` | RADIUS | `conn` / `flow` | L4 connection/flow tracking |
|
||||
| `diameter` | Diameter | `tcp_conn` / `udp_conn` | Transport-specific connections |
|
||||
|
||||
## Kubernetes Context
|
||||
|
||||
The most common starting point. Filter by where traffic originates or terminates.
|
||||
|
||||
### Pod and Service Fields
|
||||
|
||||
```
|
||||
src.pod.name == "orders-594487879c-7ddxf"
|
||||
dst.pod.namespace == "production"
|
||||
src.service.name == "api-gateway"
|
||||
dst.service.namespace == "payments"
|
||||
```
|
||||
|
||||
Pod fields fall back to service data when pod info is unavailable, so
|
||||
`dst.pod.namespace` works even for service-level entries.
|
||||
|
||||
### Aggregate Collections
|
||||
|
||||
Match against any direction (src or dst):
|
||||
|
||||
```
|
||||
"production" in namespaces // Any namespace match
|
||||
"orders" in pods // Any pod name match
|
||||
"api-gateway" in services // Any service name match
|
||||
```
|
||||
|
||||
### Labels and Annotations
|
||||
|
||||
```
|
||||
local_labels["app"] == "checkout"
|
||||
remote_labels["version"] == "canary"
|
||||
"tier" in local_labels // Label existence
|
||||
map_get(local_labels, "env", "") == "prod" // Safe access
|
||||
```
|
||||
|
||||
### Node and Process
|
||||
|
||||
```
|
||||
node_name == "ip-10-0-25-170.ec2.internal"
|
||||
local_process_name == "nginx"
|
||||
remote_process_name.contains("postgres")
|
||||
```
|
||||
|
||||
### DNS Resolution
|
||||
|
||||
```
|
||||
src.dns == "api.example.com"
|
||||
dst.dns.contains("redis")
|
||||
```
|
||||
|
||||
## HTTP Filtering
|
||||
|
||||
HTTP is the most common protocol for API-level investigation.
|
||||
|
||||
### Fields
|
||||
|
||||
| Field | Type | Example |
|
||||
|-------|------|---------|
|
||||
| `method` | string | `"GET"`, `"POST"`, `"PUT"`, `"DELETE"` |
|
||||
| `url` | string | Full path + query: `"/api/users?id=123"` |
|
||||
| `path` | string | Path only: `"/api/users"` |
|
||||
| `status_code` | int | `200`, `404`, `500` |
|
||||
| `http_version` | string | `"HTTP/1.1"`, `"HTTP/2"` |
|
||||
| `request.headers` | map | `request.headers["content-type"]` |
|
||||
| `response.headers` | map | `response.headers["server"]` |
|
||||
| `request.cookies` | map | `request.cookies["session"]` |
|
||||
| `response.cookies` | map | `response.cookies["token"]` |
|
||||
| `query_string` | map | `query_string["id"]` |
|
||||
| `request_body_size` | int | Request body bytes |
|
||||
| `response_body_size` | int | Response body bytes |
|
||||
| `elapsed_time` | int | Duration in **microseconds** |
|
||||
|
||||
### Common Patterns
|
||||
|
||||
```
|
||||
// Error investigation
|
||||
http && status_code >= 500 // Server errors
|
||||
http && status_code == 429 // Rate limiting
|
||||
http && status_code >= 400 && status_code < 500 // Client errors
|
||||
|
||||
// Endpoint targeting
|
||||
http && method == "POST" && path.contains("/orders")
|
||||
http && url.matches(".*/api/v[0-9]+/users.*")
|
||||
|
||||
// Performance
|
||||
http && elapsed_time > 5000000 // > 5 seconds
|
||||
http && response_body_size > 1000000 // > 1MB responses
|
||||
|
||||
// Header inspection
|
||||
http && "authorization" in request.headers
|
||||
http && request.headers["content-type"] == "application/json"
|
||||
|
||||
// GraphQL (subset of HTTP)
|
||||
gql && method == "POST" && status_code >= 400
|
||||
```
|
||||
|
||||
## DNS Filtering
|
||||
|
||||
DNS issues are often the hidden root cause of outages.
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `dns_questions` | []string | Question domain names |
|
||||
| `dns_answers` | []string | Answer domain names |
|
||||
| `dns_question_types` | []string | Record types: A, AAAA, CNAME, MX, TXT, SRV, PTR |
|
||||
| `dns_request` | bool | Is request |
|
||||
| `dns_response` | bool | Is response |
|
||||
| `dns_request_length` | int | Request size |
|
||||
| `dns_response_length` | int | Response size |
|
||||
|
||||
```
|
||||
dns && "api.external-service.com" in dns_questions
|
||||
dns && dns_response && status_code != 0 // Failed lookups
|
||||
dns && "A" in dns_question_types // A record queries
|
||||
dns && size(dns_questions) > 1 // Multi-question
|
||||
```
|
||||
|
||||
## Database and Messaging Protocols
|
||||
|
||||
### Redis
|
||||
|
||||
```
|
||||
redis && redis_type == "GET" // Command type
|
||||
redis && redis_key.startsWith("session:") // Key pattern
|
||||
redis && redis_command.contains("DEL") // Command search
|
||||
redis && redis_total_size > 10000 // Large operations
|
||||
```
|
||||
|
||||
### Kafka
|
||||
|
||||
```
|
||||
kafka && kafka_api_key_name == "PRODUCE" // Produce operations
|
||||
kafka && kafka_client_id == "payment-processor" // Client filtering
|
||||
kafka && kafka_request_summary.contains("orders") // Topic filtering
|
||||
kafka && kafka_size > 10000 // Large messages
|
||||
```
|
||||
|
||||
### AMQP
|
||||
|
||||
```
|
||||
amqp && amqp_method == "basic.publish"
|
||||
amqp && amqp_summary.contains("order")
|
||||
```
|
||||
|
||||
### LDAP
|
||||
|
||||
```
|
||||
ldap && ldap_type == "bind" // Bind requests
|
||||
ldap && ldap_summary.contains("search")
|
||||
```
|
||||
|
||||
## Transport Layer (L4)
|
||||
|
||||
### TCP/UDP Fields
|
||||
|
||||
```
|
||||
tcp && tcp_error_type != "" // TCP errors
|
||||
udp && udp_length > 1000 // Large UDP packets
|
||||
```
|
||||
|
||||
### Connection Tracking
|
||||
|
||||
```
|
||||
conn && conn_state == "open" // Active connections
|
||||
conn && conn_local_bytes > 1000000 // High-volume
|
||||
conn && "HTTP" in conn_l7_detected // L7 protocol detection
|
||||
tcp_conn && conn_state == "closed" // Closed TCP connections
|
||||
```
|
||||
|
||||
### Flow Tracking (with Rate Metrics)
|
||||
|
||||
```
|
||||
flow && flow_local_pps > 1000 // High packet rate
|
||||
flow && flow_local_bps > 1000000 // High bandwidth
|
||||
flow && flow_state == "closed" && "TLS" in flow_l7_detected
|
||||
tcp_flow && flow_local_bps > 5000000 // High-throughput TCP
|
||||
```
|
||||
|
||||
## Network Layer
|
||||
|
||||
```
|
||||
src.ip == "10.0.53.101"
|
||||
dst.ip.startsWith("192.168.")
|
||||
src.port == 8080
|
||||
dst.port >= 8000 && dst.port <= 9000
|
||||
```
|
||||
|
||||
## Capture Source
|
||||
|
||||
Filter by how traffic was captured:
|
||||
|
||||
```
|
||||
capture_source == "ebpf" // eBPF captured
|
||||
capture_source == "ebpf_tls" // TLS decryption via eBPF
|
||||
capture_source == "af_packet" // AF_PACKET captured
|
||||
capture_backend == "ebpf" // eBPF backend family
|
||||
```
|
||||
|
||||
## Time-Based Filtering
|
||||
|
||||
```
|
||||
timestamp > timestamp("2026-03-14T22:00:00Z")
|
||||
timestamp >= timestamp("2026-03-14T22:00:00Z") && timestamp <= timestamp("2026-03-14T23:00:00Z")
|
||||
timestamp > now() - duration("5m") // Last 5 minutes
|
||||
elapsed_time > 2000000 // Older than 2 seconds
|
||||
```
|
||||
|
||||
## Building Filters: Progressive Narrowing
|
||||
|
||||
The most effective investigation technique — start broad, add constraints:
|
||||
|
||||
```
|
||||
// Step 1: Protocol + namespace
|
||||
http && dst.pod.namespace == "production"
|
||||
|
||||
// Step 2: Add error condition
|
||||
http && dst.pod.namespace == "production" && status_code >= 500
|
||||
|
||||
// Step 3: Narrow to service
|
||||
http && dst.pod.namespace == "production" && status_code >= 500 && dst.service.name == "payment-service"
|
||||
|
||||
// Step 4: Narrow to endpoint
|
||||
http && dst.pod.namespace == "production" && status_code >= 500 && dst.service.name == "payment-service" && path.contains("/charge")
|
||||
|
||||
// Step 5: Add timing
|
||||
http && dst.pod.namespace == "production" && status_code >= 500 && dst.service.name == "payment-service" && path.contains("/charge") && elapsed_time > 2000000
|
||||
```
|
||||
|
||||
## Performance Tips
|
||||
|
||||
1. **Protocol flags first** — `http && ...` is faster than `... && http`
|
||||
2. **`startsWith`/`endsWith` over `contains`** — prefix/suffix checks are faster
|
||||
3. **Specific ports before string ops** — `dst.port == 80` is cheaper than `url.contains(...)`
|
||||
4. **Use `map_get` for labels** — avoids errors on missing keys
|
||||
5. **Keep filters simple** — CEL short-circuits on `&&`, so put cheap checks first
|
||||
|
||||
## Type Safety
|
||||
|
||||
KFL2 is statically typed. Common gotchas:
|
||||
|
||||
- `status_code` is `int`, not string — use `status_code == 200`, not `"200"`
|
||||
- `elapsed_time` is in **microseconds** — 5 seconds = `5000000`
|
||||
- `timestamp` requires `timestamp()` function — not a raw string
|
||||
- Map access on missing keys errors — use `key in map` or `map_get()` first
|
||||
- List membership uses `value in list` — not `list.contains(value)`
|
||||
375
skills/kfl/references/kfl2-reference.md
Normal file
375
skills/kfl/references/kfl2-reference.md
Normal file
@@ -0,0 +1,375 @@
|
||||
# KFL2 Complete Variable and Field Reference
|
||||
|
||||
This is the exhaustive reference for every variable available in KFL2 filters.
|
||||
KFL2 is built on Google's CEL (Common Expression Language) and evaluates against
|
||||
Kubeshark's protobuf-based `BaseEntry` structure.
|
||||
|
||||
## Network-Level Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `src.ip` | string | Source IP address | `"10.0.53.101"` |
|
||||
| `dst.ip` | string | Destination IP address | `"192.168.1.1"` |
|
||||
| `src.port` | int | Source port number | `43210` |
|
||||
| `dst.port` | int | Destination port number | `8080` |
|
||||
| `protocol` | string | Detected protocol type | `"HTTP"`, `"DNS"` |
|
||||
|
||||
## Identity and Metadata Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `id` | int | BaseEntry unique identifier (assigned by sniffer) |
|
||||
| `node_id` | string | Node identifier (assigned by hub) |
|
||||
| `index` | int | Entry index for stream uniqueness |
|
||||
| `stream` | string | Stream identifier (hex string) |
|
||||
| `timestamp` | timestamp | Event time (UTC), use with `timestamp()` function |
|
||||
| `elapsed_time` | int | Age since timestamp in microseconds |
|
||||
| `worker` | string | Worker identifier |
|
||||
|
||||
## Cross-Reference Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `conn_id` | int | L7 to L4 connection cross-reference ID |
|
||||
| `flow_id` | int | L7 to L4 flow cross-reference ID |
|
||||
| `has_pcap` | bool | Whether PCAP data is available for this entry |
|
||||
|
||||
## Capture Source Variables
|
||||
|
||||
| Variable | Type | Description | Values |
|
||||
|----------|------|-------------|--------|
|
||||
| `capture_source` | string | Canonical capture source | `"unspecified"`, `"af_packet"`, `"ebpf"`, `"ebpf_tls"` |
|
||||
| `capture_backend` | string | Backend family | `"af_packet"`, `"ebpf"` |
|
||||
| `capture_source_code` | int | Numeric enum | 0=unspecified, 1=af_packet, 2=ebpf, 3=ebpf_tls |
|
||||
| `capture` | map | Nested map access | `capture["source"]`, `capture["backend"]` |
|
||||
|
||||
## Protocol Detection Flags
|
||||
|
||||
Boolean variables indicating detected protocol. Use as first filter term for performance.
|
||||
|
||||
| Variable | Protocol | Variable | Protocol |
|
||||
|----------|----------|----------|----------|
|
||||
| `http` | HTTP/1.1, HTTP/2 | `redis` | Redis |
|
||||
| `dns` | DNS | `kafka` | Kafka |
|
||||
| `tls` | TLS/SSL handshake | `amqp` | AMQP messaging |
|
||||
| `tcp` | TCP transport | `ldap` | LDAP directory |
|
||||
| `udp` | UDP transport | `ws` | WebSocket |
|
||||
| `sctp` | SCTP streaming | `gql` | GraphQL (v1 or v2) |
|
||||
| `icmp` | ICMP | `gqlv1` | GraphQL v1 only |
|
||||
| `radius` | RADIUS auth | `gqlv2` | GraphQL v2 only |
|
||||
| `diameter` | Diameter | `conn` | L4 connection tracking |
|
||||
| `flow` | L4 flow tracking | `tcp_conn` | TCP connection tracking |
|
||||
| `tcp_flow` | TCP flow tracking | `udp_conn` | UDP connection tracking |
|
||||
| `udp_flow` | UDP flow tracking | | |
|
||||
|
||||
## HTTP Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `method` | string | HTTP method | `"GET"`, `"POST"`, `"PUT"`, `"DELETE"`, `"PATCH"` |
|
||||
| `url` | string | Full URL path and query string | `"/api/users?id=123"` |
|
||||
| `path` | string | URL path component (no query) | `"/api/users"` |
|
||||
| `status_code` | int | HTTP response status code | `200`, `404`, `500` |
|
||||
| `http_version` | string | HTTP protocol version | `"HTTP/1.1"`, `"HTTP/2"` |
|
||||
| `query_string` | map[string]string | Parsed URL query parameters | `query_string["id"]` → `"123"` |
|
||||
| `request.headers` | map[string]string | Request HTTP headers | `request.headers["content-type"]` |
|
||||
| `response.headers` | map[string]string | Response HTTP headers | `response.headers["server"]` |
|
||||
| `request.cookies` | map[string]string | Request cookies | `request.cookies["session"]` |
|
||||
| `response.cookies` | map[string]string | Response cookies | `response.cookies["token"]` |
|
||||
| `request_headers_size` | int | Request headers size in bytes | |
|
||||
| `request_body_size` | int | Request body size in bytes | |
|
||||
| `response_headers_size` | int | Response headers size in bytes | |
|
||||
| `response_body_size` | int | Response body size in bytes | |
|
||||
|
||||
GraphQL requests have `gql` (or `gqlv1`/`gqlv2`) set to true and all HTTP
|
||||
variables available.
|
||||
|
||||
## DNS Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `dns_questions` | []string | Question domain names (request + response) | `["example.com"]` |
|
||||
| `dns_answers` | []string | Answer domain names | `["1.2.3.4"]` |
|
||||
| `dns_question_types` | []string | Record types in questions | `["A"]`, `["AAAA"]`, `["CNAME"]` |
|
||||
| `dns_request` | bool | Is DNS request message | |
|
||||
| `dns_response` | bool | Is DNS response message | |
|
||||
| `dns_request_length` | int | DNS request size in bytes (0 if absent) | |
|
||||
| `dns_response_length` | int | DNS response size in bytes (0 if absent) | |
|
||||
| `dns_total_size` | int | Sum of request + response sizes | |
|
||||
|
||||
Supported question types: A, AAAA, NS, CNAME, SOA, MX, TXT, SRV, PTR, ANY.
|
||||
|
||||
## TLS Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `tls` | bool | TLS payload detected | |
|
||||
| `tls_summary` | string | TLS handshake summary | `"ClientHello"`, `"ServerHello"` |
|
||||
| `tls_info` | string | TLS connection details | `"TLS 1.3, AES-256-GCM"` |
|
||||
| `tls_request_size` | int | TLS request size in bytes | |
|
||||
| `tls_response_size` | int | TLS response size in bytes | |
|
||||
| `tls_total_size` | int | Sum of request + response (computed if not provided) | |
|
||||
|
||||
## TCP Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `tcp` | bool | TCP payload detected |
|
||||
| `tcp_method` | string | TCP method information |
|
||||
| `tcp_payload` | bytes | Raw TCP payload data |
|
||||
| `tcp_error_type` | string | TCP error type (empty if none) |
|
||||
| `tcp_error_message` | string | TCP error message (empty if none) |
|
||||
|
||||
## UDP Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `udp` | bool | UDP payload detected |
|
||||
| `udp_length` | int | UDP packet length |
|
||||
| `udp_checksum` | int | UDP checksum value |
|
||||
| `udp_payload` | bytes | Raw UDP payload data |
|
||||
|
||||
## SCTP Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `sctp` | bool | SCTP payload detected |
|
||||
| `sctp_checksum` | int | SCTP checksum value |
|
||||
| `sctp_chunk_type` | string | SCTP chunk type |
|
||||
| `sctp_length` | int | SCTP chunk length |
|
||||
|
||||
## ICMP Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `icmp` | bool | ICMP payload detected |
|
||||
| `icmp_type` | string | ICMP type code |
|
||||
| `icmp_version` | int | ICMP version (4 or 6) |
|
||||
| `icmp_length` | int | ICMP message length |
|
||||
|
||||
## WebSocket Variables
|
||||
|
||||
| Variable | Type | Description | Values |
|
||||
|----------|------|-------------|--------|
|
||||
| `ws` | bool | WebSocket payload detected | |
|
||||
| `ws_opcode` | string | WebSocket operation code | `"text"`, `"binary"`, `"close"`, `"ping"`, `"pong"` |
|
||||
| `ws_request` | bool | Is WebSocket request | |
|
||||
| `ws_response` | bool | Is WebSocket response | |
|
||||
| `ws_request_payload_data` | string | Request payload (safely truncated) | |
|
||||
| `ws_request_payload_length` | int | Request payload length in bytes | |
|
||||
| `ws_response_payload_length` | int | Response payload length in bytes | |
|
||||
|
||||
## Redis Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `redis` | bool | Redis payload detected | |
|
||||
| `redis_type` | string | Redis command verb | `"GET"`, `"SET"`, `"DEL"`, `"HGET"` |
|
||||
| `redis_command` | string | Full Redis command line | `"GET session:1234"` |
|
||||
| `redis_key` | string | Key (truncated to 64 bytes) | `"session:1234"` |
|
||||
| `redis_request_size` | int | Request size (0 if absent) | |
|
||||
| `redis_response_size` | int | Response size (0 if absent) | |
|
||||
| `redis_total_size` | int | Sum of request + response | |
|
||||
|
||||
## Kafka Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `kafka` | bool | Kafka payload detected | |
|
||||
| `kafka_api_key` | int | Kafka API key number | 0=FETCH, 1=PRODUCE |
|
||||
| `kafka_api_key_name` | string | Human-readable API operation | `"PRODUCE"`, `"FETCH"` |
|
||||
| `kafka_client_id` | string | Kafka client identifier | `"payment-processor"` |
|
||||
| `kafka_size` | int | Message size (request preferred, else response) | |
|
||||
| `kafka_request` | bool | Is Kafka request | |
|
||||
| `kafka_response` | bool | Is Kafka response | |
|
||||
| `kafka_request_summary` | string | Request summary/topic | `"orders-topic"` |
|
||||
| `kafka_request_size` | int | Request size (0 if absent) | |
|
||||
| `kafka_response_size` | int | Response size (0 if absent) | |
|
||||
|
||||
## AMQP Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `amqp` | bool | AMQP payload detected | |
|
||||
| `amqp_method` | string | AMQP method name | `"basic.publish"`, `"channel.open"` |
|
||||
| `amqp_summary` | string | Operation summary | |
|
||||
| `amqp_request` | bool | Is AMQP request | |
|
||||
| `amqp_response` | bool | Is AMQP response | |
|
||||
| `amqp_request_length` | int | Request length (0 if absent) | |
|
||||
| `amqp_response_length` | int | Response length (0 if absent) | |
|
||||
| `amqp_total_size` | int | Sum of request + response | |
|
||||
|
||||
## LDAP Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `ldap` | bool | LDAP payload detected |
|
||||
| `ldap_type` | string | LDAP operation type (request preferred) |
|
||||
| `ldap_summary` | string | Operation summary |
|
||||
| `ldap_request` | bool | Is LDAP request |
|
||||
| `ldap_response` | bool | Is LDAP response |
|
||||
| `ldap_request_length` | int | Request length (0 if absent) |
|
||||
| `ldap_response_length` | int | Response length (0 if absent) |
|
||||
| `ldap_total_size` | int | Sum of request + response |
|
||||
|
||||
## RADIUS Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `radius` | bool | RADIUS payload detected | |
|
||||
| `radius_code` | int | RADIUS code (request preferred) | |
|
||||
| `radius_code_name` | string | Code name | `"Access-Request"` |
|
||||
| `radius_request` | bool | Is RADIUS request | |
|
||||
| `radius_response` | bool | Is RADIUS response | |
|
||||
| `radius_request_authenticator` | string | Request authenticator (hex) | |
|
||||
| `radius_request_length` | int | Request size (0 if absent) | |
|
||||
| `radius_response_length` | int | Response size (0 if absent) | |
|
||||
| `radius_total_size` | int | Sum of request + response | |
|
||||
|
||||
## Diameter Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `diameter` | bool | Diameter payload detected |
|
||||
| `diameter_method` | string | Method name (request preferred) |
|
||||
| `diameter_summary` | string | Operation summary |
|
||||
| `diameter_request` | bool | Is Diameter request |
|
||||
| `diameter_response` | bool | Is Diameter response |
|
||||
| `diameter_request_length` | int | Request size (0 if absent) |
|
||||
| `diameter_response_length` | int | Response size (0 if absent) |
|
||||
| `diameter_total_size` | int | Sum of request + response |
|
||||
|
||||
## L4 Connection Tracking Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `conn` | bool | Connection tracking entry | |
|
||||
| `conn_state` | string | Connection state | `"open"`, `"in_progress"`, `"closed"` |
|
||||
| `conn_local_pkts` | int | Packets from local peer | |
|
||||
| `conn_local_bytes` | int | Bytes from local peer | |
|
||||
| `conn_remote_pkts` | int | Packets from remote peer | |
|
||||
| `conn_remote_bytes` | int | Bytes from remote peer | |
|
||||
| `conn_l7_detected` | []string | L7 protocols detected on connection | `["HTTP", "TLS"]` |
|
||||
| `conn_group_id` | int | Connection group identifier | |
|
||||
|
||||
## L4 Flow Tracking Variables
|
||||
|
||||
Flows extend connections with rate metrics (packets/bytes per second).
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `flow` | bool | Flow tracking entry |
|
||||
| `flow_state` | string | Flow state (`"open"`, `"in_progress"`, `"closed"`) |
|
||||
| `flow_local_pkts` | int | Packets from local peer |
|
||||
| `flow_local_bytes` | int | Bytes from local peer |
|
||||
| `flow_remote_pkts` | int | Packets from remote peer |
|
||||
| `flow_remote_bytes` | int | Bytes from remote peer |
|
||||
| `flow_local_pps` | int | Local packets per second |
|
||||
| `flow_local_bps` | int | Local bytes per second |
|
||||
| `flow_remote_pps` | int | Remote packets per second |
|
||||
| `flow_remote_bps` | int | Remote bytes per second |
|
||||
| `flow_l7_detected` | []string | L7 protocols detected on flow |
|
||||
| `flow_group_id` | int | Flow group identifier |
|
||||
|
||||
## Kubernetes Variables
|
||||
|
||||
### Pod and Service (Directional)
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `src.pod.name` | string | Source pod name |
|
||||
| `src.pod.namespace` | string | Source pod namespace |
|
||||
| `dst.pod.name` | string | Destination pod name |
|
||||
| `dst.pod.namespace` | string | Destination pod namespace |
|
||||
| `src.service.name` | string | Source service name |
|
||||
| `src.service.namespace` | string | Source service namespace |
|
||||
| `dst.service.name` | string | Destination service name |
|
||||
| `dst.service.namespace` | string | Destination service namespace |
|
||||
|
||||
**Fallback behavior**: Pod namespace/name fields automatically fall back to
|
||||
service data when pod info is unavailable. This means `dst.pod.namespace` works
|
||||
even when only service-level resolution exists.
|
||||
|
||||
### Aggregate Collections (Non-Directional)
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `namespaces` | []string | All namespaces (src + dst, pod + service) |
|
||||
| `pods` | []string | All pod names (src + dst) |
|
||||
| `services` | []string | All service names (src + dst) |
|
||||
|
||||
### Labels and Annotations
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `local_labels` | map[string]string | Kubernetes labels of local peer |
|
||||
| `local_annotations` | map[string]string | Kubernetes annotations of local peer |
|
||||
| `remote_labels` | map[string]string | Kubernetes labels of remote peer |
|
||||
| `remote_annotations` | map[string]string | Kubernetes annotations of remote peer |
|
||||
|
||||
Use `map_get(local_labels, "key", "default")` for safe access that won't error
|
||||
on missing keys.
|
||||
|
||||
### Node Information
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `node` | map | Nested: `node["name"]`, `node["ip"]` |
|
||||
| `node_name` | string | Node name (flat alias) |
|
||||
| `node_ip` | string | Node IP (flat alias) |
|
||||
| `local_node_name` | string | Node name of local peer |
|
||||
| `remote_node_name` | string | Node name of remote peer |
|
||||
|
||||
### Process Information
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `local_process_name` | string | Process name on local peer |
|
||||
| `remote_process_name` | string | Process name on remote peer |
|
||||
|
||||
### DNS Resolution
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `src.dns` | string | DNS resolution of source IP |
|
||||
| `dst.dns` | string | DNS resolution of destination IP |
|
||||
| `dns_resolutions` | []string | All DNS resolutions (deduplicated) |
|
||||
|
||||
### Resolution Status
|
||||
|
||||
| Variable | Type | Values |
|
||||
|----------|------|--------|
|
||||
| `local_resolution_status` | string | `""` (resolved), `"no_node_mapping"`, `"rpc_error"`, `"rpc_empty"`, `"cache_miss"`, `"queue_full"` |
|
||||
| `remote_resolution_status` | string | Same as above |
|
||||
|
||||
## Default Values
|
||||
|
||||
When a variable is not present in an entry, KFL2 uses these defaults:
|
||||
|
||||
| Type | Default |
|
||||
|------|---------|
|
||||
| string | `""` |
|
||||
| int | `0` |
|
||||
| bool | `false` |
|
||||
| list | `[]` |
|
||||
| map | `{}` |
|
||||
| bytes | `[]` |
|
||||
|
||||
## Protocol Variable Precedence
|
||||
|
||||
For protocols with request/response pairs (Kafka, RADIUS, Diameter), merged
|
||||
fields prefer the **request** side. If no request exists, the response value
|
||||
is used. Size totals are always computed as `request_size + response_size`.
|
||||
|
||||
## CEL Language Features
|
||||
|
||||
KFL2 supports the full CEL specification:
|
||||
|
||||
- **Short-circuit evaluation**: `&&` stops on first false, `||` stops on first true
|
||||
- **Ternary**: `condition ? value_if_true : value_if_false`
|
||||
- **Regex**: `str.matches("pattern")` uses RE2 syntax
|
||||
- **Type coercion**: Timestamps require `timestamp()`, durations require `duration()`
|
||||
- **Null safety**: Use `in` operator or `map_get()` before accessing map keys
|
||||
|
||||
For the full CEL specification, see the
|
||||
[CEL Language Definition](https://github.com/google/cel-spec/blob/master/doc/langdef.md).
|
||||
409
skills/network-rca/SKILL.md
Normal file
409
skills/network-rca/SKILL.md
Normal file
@@ -0,0 +1,409 @@
|
||||
---
|
||||
name: network-rca
|
||||
description: >
|
||||
Kubernetes network root cause analysis skill powered by Kubeshark MCP. Use this skill
|
||||
whenever the user wants to investigate past incidents, perform retrospective traffic
|
||||
analysis, take or manage traffic snapshots, extract PCAPs, dissect L7 API calls from
|
||||
historical captures, compare traffic patterns over time, detect drift or anomalies
|
||||
between snapshots, or do any kind of forensic network analysis in Kubernetes.
|
||||
Also trigger when the user mentions snapshots, raw capture, PCAP extraction,
|
||||
traffic replay, postmortem analysis, "what happened yesterday/last week",
|
||||
root cause analysis, RCA, cloud snapshot storage, snapshot dissection, or KFL filters
|
||||
for historical traffic. Even if the user just says "figure out what went wrong"
|
||||
or "compare today's traffic to yesterday" in a Kubernetes context, use this skill.
|
||||
---
|
||||
|
||||
# Network Root Cause Analysis with Kubeshark MCP
|
||||
|
||||
You are a Kubernetes network forensics specialist. Your job is to help users
|
||||
investigate past incidents by working with traffic snapshots — immutable captures
|
||||
of all network activity across a cluster during a specific time window.
|
||||
|
||||
Kubeshark is a search engine for network traffic. Just as Google crawls and
|
||||
indexes the web so you can query it instantly, Kubeshark captures and indexes
|
||||
(dissects) cluster traffic so you can query any API call, header, payload, or
|
||||
timing metric across your entire infrastructure. Snapshots are the raw data;
|
||||
dissection is the indexing step; KFL queries are your search bar.
|
||||
|
||||
Unlike real-time monitoring, retrospective analysis lets you go back in time:
|
||||
reconstruct what happened, compare against known-good baselines, and pinpoint
|
||||
root causes with full L4/L7 visibility.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before starting any analysis, verify the environment is ready.
|
||||
|
||||
### Kubeshark MCP Health Check
|
||||
|
||||
Confirm the Kubeshark MCP is accessible and tools are available. Look for tools
|
||||
like `list_api_calls`, `list_l4_flows`, `create_snapshot`, etc.
|
||||
|
||||
**Tool**: `check_kubeshark_status`
|
||||
|
||||
If tools like `list_api_calls` or `list_l4_flows` are missing from the response,
|
||||
something is wrong with the MCP connection. Guide the user through setup
|
||||
(see Setup Reference at the bottom).
|
||||
|
||||
### Raw Capture Must Be Enabled
|
||||
|
||||
Retrospective analysis depends on raw capture — Kubeshark's kernel-level (eBPF)
|
||||
packet recording that stores traffic at the node level. Without it, snapshots
|
||||
have nothing to work with.
|
||||
|
||||
Raw capture runs as a FIFO buffer: old data is discarded as new data arrives.
|
||||
The buffer size determines how far back you can go. Larger buffer = wider
|
||||
snapshot window.
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
capture:
|
||||
raw:
|
||||
enabled: true
|
||||
storageSize: 10Gi # Per-node FIFO buffer
|
||||
```
|
||||
|
||||
If raw capture isn't enabled, inform the user that retrospective analysis
|
||||
requires it and share the configuration above.
|
||||
|
||||
### Snapshot Storage
|
||||
|
||||
Snapshots are assembled on the Hub's storage, which is ephemeral by default.
|
||||
For serious forensic work, persistent storage is recommended:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
local:
|
||||
storageClass: gp2
|
||||
storageSize: 1000Gi
|
||||
```
|
||||
|
||||
## Core Workflow
|
||||
|
||||
The general flow for any RCA investigation:
|
||||
|
||||
1. **Determine time window** — When did the issue occur? Use `get_data_boundaries`
|
||||
to see what raw capture data is available.
|
||||
2. **Create or locate a snapshot** — Either take a new snapshot covering the
|
||||
incident window, or find an existing one with `list_snapshots`.
|
||||
3. **Dissect the snapshot** — Activate L7 dissection so you can query API calls,
|
||||
not just raw packets.
|
||||
4. **Investigate** — Use KFL filters to slice through the traffic. Start broad,
|
||||
narrow progressively.
|
||||
5. **Extract evidence** — Export filtered PCAPs, resolve workload IPs, pull
|
||||
specific API call details.
|
||||
6. **Compare** (optional) — Diff against a known-good snapshot to identify
|
||||
what changed.
|
||||
|
||||
## Snapshot Operations
|
||||
|
||||
### Check Data Boundaries
|
||||
|
||||
Before creating a snapshot, check what raw capture data exists across the cluster.
|
||||
|
||||
**Tool**: `get_data_boundaries`
|
||||
|
||||
This returns the time window available per node. You can only create snapshots
|
||||
within these boundaries — data outside the window has already been rotated out
|
||||
of the FIFO buffer.
|
||||
|
||||
**Example response**:
|
||||
```
|
||||
Cluster-wide:
|
||||
Oldest: 2026-03-14 16:12:34 UTC
|
||||
Newest: 2026-03-14 18:05:20 UTC
|
||||
|
||||
Per node:
|
||||
┌─────────────────────────────┬──────────┬──────────┐
|
||||
│ Node │ Oldest │ Newest │
|
||||
├─────────────────────────────┼──────────┼──────────┤
|
||||
│ ip-10-0-25-170.ec2.internal │ 16:12:34 │ 18:03:39 │
|
||||
│ ip-10-0-32-115.ec2.internal │ 16:13:45 │ 18:05:20 │
|
||||
└─────────────────────────────┴──────────┴──────────┘
|
||||
```
|
||||
|
||||
If the user's incident falls outside the available window, let them know the
|
||||
data has been rotated out. Suggest increasing `storageSize` for future coverage.
|
||||
|
||||
### Create a Snapshot
|
||||
|
||||
**Tool**: `create_snapshot`
|
||||
|
||||
Specify nodes (or cluster-wide) and a time window within the data boundaries.
|
||||
Snapshots include everything needed to reconstruct the traffic picture:
|
||||
raw capture files, Kubernetes pod events, and eBPF cgroup events.
|
||||
|
||||
Snapshots take time to build. After creating one, check its status.
|
||||
|
||||
**Tool**: `get_snapshot`
|
||||
|
||||
Wait until status is `completed` before proceeding with dissection or PCAP export.
|
||||
|
||||
### List Existing Snapshots
|
||||
|
||||
**Tool**: `list_snapshots`
|
||||
|
||||
Shows all snapshots on the local Hub, with name, size, status, and node count.
|
||||
Use this when the user wants to work with a previously captured snapshot.
|
||||
|
||||
### Cloud Storage
|
||||
|
||||
Snapshots on the Hub are ephemeral and space-limited. Cloud storage (S3, GCS,
|
||||
Azure Blob) provides long-term retention. Snapshots can be downloaded to any
|
||||
cluster with Kubeshark — not necessarily the original cluster. This means you can
|
||||
download a production snapshot to a local KinD cluster for safe analysis.
|
||||
|
||||
**Check cloud status**: `get_cloud_storage_status`
|
||||
**Upload to cloud**: `upload_snapshot_to_cloud`
|
||||
**Download from cloud**: `download_snapshot_from_cloud`
|
||||
|
||||
When cloud storage is configured, recommend uploading snapshots after analysis
|
||||
for long-term retention, especially for compliance or post-mortem documentation.
|
||||
|
||||
## L7 API Dissection
|
||||
|
||||
Think of dissection the way a search engine thinks of indexing. A raw snapshot
|
||||
is like the raw internet — billions of packets, impossible to query efficiently.
|
||||
Dissection indexes that traffic: it reconstructs packets into structured L7 API
|
||||
calls, builds a queryable database of every request, response, header, payload,
|
||||
and timing metric. Once dissected, Kubeshark becomes a search engine for your
|
||||
network traffic — you type a query (using KFL filters), and get instant,
|
||||
precise answers from terabytes of captured data.
|
||||
|
||||
Without dissection, you have PCAPs. With dissection, you have answers.
|
||||
|
||||
### Activate Dissection
|
||||
|
||||
**Tool**: `start_snapshot_dissection`
|
||||
|
||||
Dissection takes time proportional to the snapshot size — it's parsing every
|
||||
packet, reassembling streams, and building the index. After it completes,
|
||||
the full query engine is available:
|
||||
- `list_api_calls` — Search API transactions with filters (the "Google search" for your traffic)
|
||||
- `get_api_call` — Drill into a specific call (headers, body, timing)
|
||||
- `get_api_stats` — Aggregated statistics (throughput, error rates, latency)
|
||||
|
||||
### Investigation Strategy
|
||||
|
||||
Start broad, then narrow:
|
||||
|
||||
1. `get_api_stats` — Get the overall picture: error rates, latency percentiles,
|
||||
throughput. Look for spikes or anomalies.
|
||||
2. `list_api_calls` filtered by error codes (4xx, 5xx) or high latency — find
|
||||
the problematic transactions.
|
||||
3. `get_api_call` on specific calls — inspect headers, bodies, timing to
|
||||
understand what went wrong.
|
||||
4. Use KFL filters (see below) to slice the traffic by namespace, service,
|
||||
protocol, or any combination.
|
||||
|
||||
## PCAP Extraction
|
||||
|
||||
Sometimes you need the raw packets — for Wireshark analysis, sharing with
|
||||
network teams, or compliance evidence.
|
||||
|
||||
### Export a PCAP
|
||||
|
||||
**Tool**: `export_snapshot_pcap`
|
||||
|
||||
You can export the full snapshot or filter it down using:
|
||||
- **Nodes** — specific nodes only
|
||||
- **Time** — sub-window within the snapshot
|
||||
- **BPF filter** — standard Berkeley Packet Filter syntax (e.g., `host 10.0.53.101`,
|
||||
`port 8080`, `net 10.0.0.0/16`)
|
||||
|
||||
### Resolve Workload IPs
|
||||
|
||||
When you care about specific workloads but don't have their IPs, resolve them
|
||||
from the snapshot's metadata. Snapshots preserve the pod-to-IP mappings from
|
||||
capture time, so you get accurate resolution even if pods have since been
|
||||
rescheduled.
|
||||
|
||||
**Tool**: `resolve_workload`
|
||||
|
||||
**Example**: Resolve the IP of `orders-594487879c-7ddxf` from snapshot `slim-timestamp`
|
||||
→ Returns `10.0.53.101`
|
||||
|
||||
Then use that IP in a BPF filter to extract only that workload's traffic:
|
||||
`export_snapshot_pcap` with BPF `host 10.0.53.101`
|
||||
|
||||
## KFL — Kubeshark Filter Language
|
||||
|
||||
KFL2 is the query language for slicing through dissected traffic. For the
|
||||
complete KFL2 reference (all variables, operators, protocol fields, and examples),
|
||||
see the **KFL skill** (`skills/kfl/`).
|
||||
|
||||
### RCA-Specific Filter Patterns
|
||||
|
||||
Layer filters progressively when investigating an incident:
|
||||
|
||||
```
|
||||
// Step 1: Protocol + namespace
|
||||
http && dst.pod.namespace == "production"
|
||||
|
||||
// Step 2: Add error condition
|
||||
http && dst.pod.namespace == "production" && status_code >= 500
|
||||
|
||||
// Step 3: Narrow to service
|
||||
http && dst.pod.namespace == "production" && status_code >= 500 && dst.service.name == "payment-service"
|
||||
|
||||
// Step 4: Narrow to endpoint
|
||||
http && dst.pod.namespace == "production" && status_code >= 500 && dst.service.name == "payment-service" && path.contains("/charge")
|
||||
|
||||
// Step 5: Add timing
|
||||
http && dst.pod.namespace == "production" && status_code >= 500 && dst.service.name == "payment-service" && path.contains("/charge") && elapsed_time > 2000000
|
||||
```
|
||||
|
||||
Other common RCA filters:
|
||||
|
||||
```
|
||||
dns && dns_response && status_code != 0 // Failed DNS lookups
|
||||
src.service.namespace != dst.service.namespace // Cross-namespace traffic
|
||||
http && elapsed_time > 5000000 // Slow transactions (> 5s)
|
||||
conn && conn_state == "open" && conn_local_bytes > 1000000 // High-volume connections
|
||||
```
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Post-Incident RCA
|
||||
|
||||
The primary use case. Something broke, it's been resolved, and now you need
|
||||
to understand why.
|
||||
|
||||
1. Identify the incident time window from alerts, logs, or user reports
|
||||
2. Check `get_data_boundaries` — is the window still in raw capture?
|
||||
3. `create_snapshot` covering the incident window (add buffer: 15 minutes
|
||||
before and after the reported time)
|
||||
4. `start_snapshot_dissection`
|
||||
5. `get_api_stats` — look for error rate spikes, latency jumps
|
||||
6. `list_api_calls` filtered to errors — identify the failing service chain
|
||||
7. `get_api_call` on specific failures — read headers, bodies, timing
|
||||
8. Follow the dependency chain upstream until you find the originating failure
|
||||
9. Export relevant PCAPs for the post-mortem document
|
||||
|
||||
### Trend Analysis and Drift Detection
|
||||
|
||||
Take snapshots at regular intervals (daily, weekly) with consistent parameters.
|
||||
Compare them to detect:
|
||||
|
||||
- **Latency drift** — p95 latency creeping up over days
|
||||
- **API surface changes** — new endpoints appearing, old ones disappearing
|
||||
- **Error rate trends** — gradual increase in 5xx responses
|
||||
- **Traffic pattern shifts** — new service-to-service connections, volume changes
|
||||
- **Security posture regression** — unencrypted traffic appearing, new external
|
||||
connections
|
||||
|
||||
**Workflow**:
|
||||
1. `create_snapshot` with consistent parameters (same time-of-day, same duration)
|
||||
2. `start_snapshot_dissection` on each
|
||||
3. `get_api_stats` on each — compare metrics side by side
|
||||
4. `list_api_calls` with targeted KFL filters — diff the results
|
||||
5. Flag anomalies and regressions
|
||||
|
||||
This is powerful when combined with scheduled tasks — automate daily snapshot
|
||||
creation and comparison to catch drift before it becomes an incident.
|
||||
|
||||
### Forensic Evidence Preservation
|
||||
|
||||
For compliance, legal, or audit requirements:
|
||||
|
||||
1. `create_snapshot` immediately when an incident is detected
|
||||
2. `upload_snapshot_to_cloud` — immutable copy in long-term storage
|
||||
3. Document the snapshot ID, time window, and chain of custody
|
||||
4. The snapshot can be downloaded to any Kubeshark cluster for later analysis,
|
||||
even months later, even on a completely different cluster
|
||||
|
||||
### Production-to-Local Replay
|
||||
|
||||
Investigate production issues safely on a local cluster:
|
||||
|
||||
1. `create_snapshot` on the production cluster
|
||||
2. `upload_snapshot_to_cloud`
|
||||
3. On a local KinD/minikube cluster with Kubeshark: `download_snapshot_from_cloud`
|
||||
4. `start_snapshot_dissection` — full L7 analysis locally
|
||||
5. Investigate without touching production
|
||||
|
||||
## Composability
|
||||
|
||||
This skill is designed to work alongside other Kubeshark-powered skills:
|
||||
|
||||
- **API Security Skill** — Run security scans against a snapshot's dissected traffic.
|
||||
Take daily snapshots and diff security findings to detect posture drift.
|
||||
- **Incident Response Skill** — Use this skill's snapshot workflow as the evidence
|
||||
preservation and forensic analysis layer within the IR methodology.
|
||||
- **Network Engineering Skill** — Use snapshots for baseline traffic characterization
|
||||
and architecture reviews.
|
||||
|
||||
When multiple skills are loaded, they share context. A snapshot created here
|
||||
can be analyzed by the security skill's OWASP scans or the IR skill's
|
||||
7-phase methodology.
|
||||
|
||||
## Setup Reference
|
||||
|
||||
### Installing the CLI
|
||||
|
||||
**Homebrew (macOS)**:
|
||||
```bash
|
||||
brew install kubeshark
|
||||
```
|
||||
|
||||
**Linux**:
|
||||
```bash
|
||||
sh <(curl -Ls https://kubeshark.com/install)
|
||||
```
|
||||
|
||||
**From source**:
|
||||
```bash
|
||||
git clone https://github.com/kubeshark/kubeshark
|
||||
cd kubeshark && make
|
||||
```
|
||||
|
||||
### MCP Configuration
|
||||
|
||||
**Claude Desktop / Cowork** (`claude_desktop_config.json`):
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Claude Code (CLI)**:
|
||||
```bash
|
||||
claude mcp add kubeshark -- kubeshark mcp
|
||||
```
|
||||
|
||||
**Without kubectl access** (direct URL mode):
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp", "--url", "https://kubeshark.example.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
# Claude Code equivalent:
|
||||
claude mcp add kubeshark -- kubeshark mcp --url https://kubeshark.example.com
|
||||
```
|
||||
|
||||
### Verification
|
||||
|
||||
- Claude Code: `/mcp` to check connection status
|
||||
- Terminal: `kubeshark mcp --list-tools`
|
||||
- Cluster: `kubectl get pods -l app=kubeshark-hub`
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
- **Binary not found** → Install via Homebrew or the install script above
|
||||
- **Connection refused** → Deploy Kubeshark first: `kubeshark tap`
|
||||
- **No L7 data** → Check `get_dissection_status` and `enable_dissection`
|
||||
- **Snapshot creation fails** → Verify raw capture is enabled in Kubeshark config
|
||||
- **Empty snapshot** → Check `get_data_boundaries` — the requested window may
|
||||
fall outside available data
|
||||
Reference in New Issue
Block a user