mirror of
https://github.com/kubeshark/kubeshark.git
synced 2026-03-18 18:42:44 +00:00
Compare commits
28 Commits
docs/add-m
...
add-kubesh
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
710345c628 | ||
|
|
963b3e4ac2 | ||
|
|
b2813e02bd | ||
|
|
707d7351b6 | ||
|
|
23c86be773 | ||
|
|
3f8a067f9b | ||
|
|
33f5310e8e | ||
|
|
5f2f34e826 | ||
|
|
f9a5fbbb78 | ||
|
|
73f8e3585d | ||
|
|
a6daefc567 | ||
|
|
e6a67cc3b7 | ||
|
|
eb7dc42b6e | ||
|
|
d266408377 | ||
|
|
40ae6c626b | ||
|
|
e3283327f9 | ||
|
|
a46f05c4aa | ||
|
|
dbfd17d901 | ||
|
|
95c18b57a4 | ||
|
|
6fd2e4b1b2 | ||
|
|
686c7eba54 | ||
|
|
1ad61798f6 | ||
|
|
318b35e785 | ||
|
|
fecf290a25 | ||
|
|
a01f7bed74 | ||
|
|
633a17a0e0 | ||
|
|
8fac9a5ad5 | ||
|
|
76c5eb6b59 |
2
.github/workflows/mcp-publish.yml
vendored
2
.github/workflows/mcp-publish.yml
vendored
@@ -168,7 +168,7 @@ jobs:
|
||||
- name: Login to MCP Registry
|
||||
if: github.event_name != 'workflow_dispatch' || github.event.inputs.dry_run != 'true'
|
||||
shell: bash
|
||||
run: mcp-publisher login github
|
||||
run: mcp-publisher login github-oidc
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
|
||||
24
.github/workflows/release-tag.yml
vendored
Normal file
24
.github/workflows/release-tag.yml
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
name: Auto-tag release
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [closed]
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
tag:
|
||||
if: github.event.pull_request.merged == true && startsWith(github.event.pull_request.head.ref, 'release/v')
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create and push tag
|
||||
run: |
|
||||
VERSION="${GITHUB_HEAD_REF#release/}"
|
||||
echo "Creating tag $VERSION on master"
|
||||
git tag "$VERSION"
|
||||
git push origin "$VERSION"
|
||||
51
.github/workflows/test.yml
vendored
51
.github/workflows/test.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
timeout-minutes: 20
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
@@ -29,3 +29,52 @@ jobs:
|
||||
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
|
||||
helm-tests:
|
||||
name: Helm Chart Tests
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4
|
||||
|
||||
- name: Helm lint (default values)
|
||||
run: helm lint ./helm-chart
|
||||
|
||||
- name: Helm lint (S3 values)
|
||||
run: helm lint ./helm-chart -f ./helm-chart/tests/fixtures/values-s3.yaml
|
||||
|
||||
- name: Helm lint (Azure Blob values)
|
||||
run: helm lint ./helm-chart -f ./helm-chart/tests/fixtures/values-azblob.yaml
|
||||
|
||||
- name: Helm lint (GCS values)
|
||||
run: helm lint ./helm-chart -f ./helm-chart/tests/fixtures/values-gcs.yaml
|
||||
|
||||
- name: Helm lint (cloud refs values)
|
||||
run: helm lint ./helm-chart -f ./helm-chart/tests/fixtures/values-cloud-refs.yaml
|
||||
|
||||
- name: Install helm-unittest plugin
|
||||
run: helm plugin install https://github.com/helm-unittest/helm-unittest --verify=false
|
||||
|
||||
- name: Run helm unit tests
|
||||
run: helm unittest ./helm-chart
|
||||
|
||||
- name: Install kubeconform
|
||||
run: |
|
||||
curl -sL https://github.com/yannh/kubeconform/releases/latest/download/kubeconform-linux-amd64.tar.gz | tar xz
|
||||
sudo mv kubeconform /usr/local/bin/
|
||||
|
||||
- name: Validate default template
|
||||
run: helm template kubeshark ./helm-chart | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
|
||||
- name: Validate S3 template
|
||||
run: helm template kubeshark ./helm-chart -f ./helm-chart/tests/fixtures/values-s3.yaml | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
|
||||
- name: Validate Azure Blob template
|
||||
run: helm template kubeshark ./helm-chart -f ./helm-chart/tests/fixtures/values-azblob.yaml | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
|
||||
- name: Validate GCS template
|
||||
run: helm template kubeshark ./helm-chart -f ./helm-chart/tests/fixtures/values-gcs.yaml | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -63,4 +63,7 @@ bin
|
||||
scripts/
|
||||
|
||||
# CWD config YAML
|
||||
kubeshark.yaml
|
||||
kubeshark.yaml
|
||||
|
||||
# Claude Code
|
||||
CLAUDE.md
|
||||
102
CLAUDE.md
Normal file
102
CLAUDE.md
Normal file
@@ -0,0 +1,102 @@
|
||||
Do not include any Claude/AI attribution (Co-Authored-By lines, badges, etc.) in commit messages or pull request descriptions.
|
||||
|
||||
## Skills
|
||||
|
||||
Kubeshark is building an ecosystem of open-source AI skills that work with the Kubeshark MCP.
|
||||
Skills live in the `skills/` directory at the root of this repo.
|
||||
|
||||
### What is a skill?
|
||||
|
||||
A skill is a SKILL.md file (with optional reference docs) that teaches an AI agent a domain-specific
|
||||
methodology. The Kubeshark MCP provides the tools (snapshot creation, API call queries, PCAP export,
|
||||
etc.) — a skill tells the agent *how* to use those tools for a specific job.
|
||||
|
||||
### Skill structure
|
||||
|
||||
```
|
||||
skills/
|
||||
└── <skill-name>/
|
||||
├── SKILL.md # Required. YAML frontmatter + markdown instructions.
|
||||
└── references/ # Optional. Reference docs loaded on demand.
|
||||
└── *.md
|
||||
```
|
||||
|
||||
### SKILL.md format
|
||||
|
||||
Every SKILL.md starts with YAML frontmatter:
|
||||
|
||||
```yaml
|
||||
---
|
||||
name: skill-name
|
||||
description: >
|
||||
When to trigger this skill. Be specific about user intents, keywords, and contexts.
|
||||
The description is the primary mechanism for AI agents to decide whether to load the skill.
|
||||
---
|
||||
```
|
||||
|
||||
The body is markdown instructions that define the methodology: prerequisites, workflows,
|
||||
tool usage patterns, output guidelines, and reference pointers.
|
||||
|
||||
### Guidelines for writing skills
|
||||
|
||||
- Keep SKILL.md under 500 lines. Put detailed references in `references/` with clear pointers.
|
||||
- Use imperative tone ("Check data boundaries", "Create a snapshot").
|
||||
- Reference Kubeshark MCP tools by exact name (e.g., `create_snapshot`, `list_api_calls`).
|
||||
- Include realistic example tool responses so the agent knows what to expect.
|
||||
- Explain *why* things matter, not just *what* to do — the agent is smart and benefits from context.
|
||||
- Include a Setup Reference section with MCP configuration for Claude Code and Claude Desktop.
|
||||
- The description frontmatter should be "pushy" — include trigger keywords generously so the skill
|
||||
activates when needed. Better to over-trigger than under-trigger.
|
||||
|
||||
### Kubeshark MCP tools available to skills
|
||||
|
||||
**Cluster management**: `check_kubeshark_status`, `start_kubeshark`, `stop_kubeshark`
|
||||
**Inventory**: `list_workloads`
|
||||
**L7 API**: `list_api_calls`, `get_api_call`, `get_api_stats`
|
||||
**L4 flows**: `list_l4_flows`, `get_l4_flow_summary`
|
||||
**Snapshots**: `get_data_boundaries`, `create_snapshot`, `get_snapshot`, `list_snapshots`, `start_snapshot_dissection`
|
||||
**PCAP**: `export_snapshot_pcap`, `resolve_workload`
|
||||
**Cloud storage**: `get_cloud_storage_status`, `upload_snapshot_to_cloud`, `download_snapshot_from_cloud`
|
||||
**Dissection**: `get_dissection_status`, `enable_dissection`, `disable_dissection`
|
||||
|
||||
### KFL (Kubeshark Filter Language)
|
||||
|
||||
KFL2 is built on CEL (Common Expression Language). Skills that involve traffic filtering should
|
||||
reference KFL. Key concepts:
|
||||
|
||||
- Display filter (post-capture), not capture filter
|
||||
- Fields: `src.ip`, `dst.ip`, `src.pod.name`, `dst.pod.namespace`, `src.service.name`, etc.
|
||||
- Protocol booleans: `http`, `dns`, `redis`, `kafka`, `tls`, `grpc`, `amqp`, `ws`
|
||||
- HTTP fields: `url`, `method`, `status_code`, `path`, `request.headers`, `response.headers`,
|
||||
`request_body_size`, `response_body_size`, `elapsed_time` (microseconds)
|
||||
- DNS fields: `dns_questions`, `dns_answers`, `dns_question_types`
|
||||
- Operators: `==`, `!=`, `<`, `>`, `&&`, `||`, `in`
|
||||
- String functions: `.contains()`, `.startsWith()`, `.endsWith()`, `.matches()` (regex)
|
||||
- Collection: `size()`, `[index]`, `[key]`
|
||||
- Full reference: https://docs.kubeshark.com/en/v2/kfl2
|
||||
|
||||
### Key Kubeshark concepts for skill authors
|
||||
|
||||
- **eBPF capture**: Kernel-level, no sidecars/proxies. Decrypts TLS without private keys.
|
||||
- **Protocols**: HTTP, gRPC, GraphQL, WebSocket, Kafka, Redis, AMQP, DNS, and more.
|
||||
- **Raw capture**: FIFO buffer per node. Must be enabled for retrospective analysis.
|
||||
- **Snapshots**: Immutable freeze of traffic in a time window. Includes raw capture files,
|
||||
K8s pod events, and eBPF cgroup events.
|
||||
- **Dissection**: The "indexing" step. Reconstructs raw packets into structured L7 API calls.
|
||||
Think of it like a search engine indexing web pages — without dissection you have PCAPs,
|
||||
with dissection you have a queryable database. Kubeshark is the search engine for network traffic.
|
||||
- **Cloud storage**: Snapshots can be uploaded to S3/GCS/Azure and downloaded to any cluster.
|
||||
A production snapshot can be analyzed on a local KinD cluster.
|
||||
|
||||
### Current skills
|
||||
|
||||
- `skills/network-rca/` — Network Root Cause Analysis. Retrospective traffic analysis via
|
||||
snapshots, dissection, KFL queries, PCAP extraction, trend comparison.
|
||||
- `skills/kfl/` — KFL2 (Kubeshark Filter Language) expert. Complete reference for writing,
|
||||
debugging, and optimizing CEL-based traffic filters across all supported protocols.
|
||||
|
||||
### Planned skills (not yet created)
|
||||
|
||||
- `skills/api-security/` — OWASP API Top 10 assessment against live or snapshot traffic.
|
||||
- `skills/incident-response/` — 7-phase forensic incident investigation methodology.
|
||||
- `skills/network-engineering/` — Real-time traffic analysis, latency debugging, dependency mapping.
|
||||
78
Makefile
78
Makefile
@@ -137,6 +137,16 @@ test-integration-short: ## Run quick integration tests (skips long-running tests
|
||||
rm -f $$LOG_FILE; \
|
||||
exit $$status
|
||||
|
||||
helm-test: ## Run Helm lint and unit tests.
|
||||
helm lint ./helm-chart
|
||||
helm unittest ./helm-chart
|
||||
|
||||
helm-test-full: helm-test ## Run Helm tests with kubeconform schema validation.
|
||||
helm template kubeshark ./helm-chart | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
helm template kubeshark ./helm-chart -f ./helm-chart/tests/fixtures/values-s3.yaml | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
helm template kubeshark ./helm-chart -f ./helm-chart/tests/fixtures/values-azblob.yaml | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
helm template kubeshark ./helm-chart -f ./helm-chart/tests/fixtures/values-gcs.yaml | kubeconform -strict -kubernetes-version 1.35.0 -summary
|
||||
|
||||
lint: ## Lint the source code.
|
||||
golangci-lint run
|
||||
|
||||
@@ -242,31 +252,75 @@ proxy:
|
||||
port-forward:
|
||||
kubectl port-forward $$(kubectl get pods | awk '$$1 ~ /^$(POD_PREFIX)/' | awk 'END {print $$1}') $(SRC_PORT):$(DST_PORT)
|
||||
|
||||
release:
|
||||
release: ## Print release workflow instructions.
|
||||
@echo "Release workflow (2 steps):"
|
||||
@echo ""
|
||||
@echo " 1. make release-pr VERSION=x.y.z"
|
||||
@echo " Tags sibling repos, bumps version, creates PRs"
|
||||
@echo " (kubeshark + kubeshark.github.io helm chart)."
|
||||
@echo " Review and merge both PRs manually."
|
||||
@echo ""
|
||||
@echo " 2. (automatic) Tag is created when release PR merges."
|
||||
@echo " Fallback: make release-tag VERSION=x.y.z"
|
||||
|
||||
release-pr: ## Step 1: Tag sibling repos, bump version, create release PR.
|
||||
@cd ../worker && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../tracer && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../hub && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../front && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@cd ../kubeshark && git checkout master && git pull && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make
|
||||
@cd ../kubeshark && git checkout master && git pull
|
||||
@sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml
|
||||
@$(MAKE) build VER=$(VERSION)
|
||||
@if [ "$(shell uname)" = "Darwin" ]; then \
|
||||
codesign --sign - --force --preserve-metadata=entitlements,requirements,flags,runtime ./bin/kubeshark__; \
|
||||
fi
|
||||
@make generate-helm-values && make generate-manifests
|
||||
@git add -A . && git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)" && git push
|
||||
@git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
|
||||
@rm -rf ../kubeshark.github.io/charts/chart && mkdir ../kubeshark.github.io/charts/chart && cp -r helm-chart/ ../kubeshark.github.io/charts/chart/
|
||||
@cd ../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push
|
||||
@$(MAKE) generate-helm-values && $(MAKE) generate-manifests
|
||||
@git checkout -b release/v$(VERSION)
|
||||
@git add -A .
|
||||
@git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)"
|
||||
@git push -u origin release/v$(VERSION)
|
||||
@gh pr create --title ":bookmark: Release v$(VERSION)" \
|
||||
--body "Automated release PR for v$(VERSION)." \
|
||||
--base master \
|
||||
--reviewer corest
|
||||
@rm -rf ../kubeshark.github.io/charts/chart
|
||||
@mkdir ../kubeshark.github.io/charts/chart
|
||||
@cp -r helm-chart/ ../kubeshark.github.io/charts/chart/
|
||||
@cd ../kubeshark.github.io && git checkout master && git pull \
|
||||
&& git checkout -b helm-v$(VERSION) \
|
||||
&& git add -A . \
|
||||
&& git commit -m ":sparkles: Update the Helm chart to v$(VERSION)" \
|
||||
&& git push -u origin helm-v$(VERSION) \
|
||||
&& gh pr create --title ":sparkles: Helm chart v$(VERSION)" \
|
||||
--body "Update Helm chart for release v$(VERSION)." \
|
||||
--base master \
|
||||
--reviewer corest
|
||||
@cd ../kubeshark
|
||||
@echo ""
|
||||
@echo "Release PRs created:"
|
||||
@echo " - kubeshark: Review and merge the release PR."
|
||||
@echo " - kubeshark.github.io: Review and merge the helm chart PR."
|
||||
@echo "Tag will be created automatically, or run: make release-tag VERSION=$(VERSION)"
|
||||
|
||||
release-tag: ## Step 2 (fallback): Tag master after release PR is merged.
|
||||
@echo "Verifying release PR was merged..."
|
||||
@if ! gh pr list --state merged --head release/v$(VERSION) --json number --jq '.[0].number' | grep -q .; then \
|
||||
echo "Error: No merged PR found for release/v$(VERSION). Merge the PR first."; \
|
||||
exit 1; \
|
||||
fi
|
||||
@git checkout master && git pull
|
||||
@git tag -d v$(VERSION) 2>/dev/null; git tag v$(VERSION) && git push origin --tags
|
||||
@echo ""
|
||||
@echo "Tagged v$(VERSION) on master. GitHub Actions will build the release."
|
||||
|
||||
release-dry-run:
|
||||
@cd ../worker && git checkout master && git pull
|
||||
@cd ../tracer && git checkout master && git pull
|
||||
# @cd ../tracer && git checkout master && git pull
|
||||
@cd ../hub && git checkout master && git pull
|
||||
@cd ../front && git checkout master && git pull
|
||||
@cd ../kubeshark && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make
|
||||
@if [ "$(shell uname)" = "Darwin" ]; then \
|
||||
codesign --sign - --force --preserve-metadata=entitlements,requirements,flags,runtime ./bin/kubeshark__; \
|
||||
fi
|
||||
# @if [ "$(shell uname)" = "Darwin" ]; then \
|
||||
# codesign --sign - --force --preserve-metadata=entitlements,requirements,flags,runtime ./bin/kubeshark__; \
|
||||
# fi
|
||||
@make generate-helm-values && make generate-manifests
|
||||
@rm -rf ../kubeshark.github.io/charts/chart && mkdir ../kubeshark.github.io/charts/chart && cp -r helm-chart/ ../kubeshark.github.io/charts/chart/
|
||||
@cd ../kubeshark.github.io/
|
||||
|
||||
196
README.md
196
README.md
@@ -1,120 +1,132 @@
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/kubeshark/assets/master/svg/kubeshark-logo.svg" alt="Kubeshark: Traffic analyzer for Kubernetes." height="128px"/>
|
||||
<img src="https://raw.githubusercontent.com/kubeshark/assets/master/svg/kubeshark-logo.svg" alt="Kubeshark" height="120px"/>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/kubeshark/kubeshark/releases/latest">
|
||||
<img alt="GitHub Latest Release" src="https://img.shields.io/github/v/release/kubeshark/kubeshark?logo=GitHub&style=flat-square">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/kubeshark/worker">
|
||||
<img alt="Docker pulls" src="https://img.shields.io/docker/pulls/kubeshark/worker?color=%23099cec&logo=Docker&style=flat-square">
|
||||
</a>
|
||||
<a href="https://hub.docker.com/r/kubeshark/worker">
|
||||
<img alt="Image size" src="https://img.shields.io/docker/image-size/kubeshark/kubeshark/latest?logo=Docker&style=flat-square">
|
||||
</a>
|
||||
<a href="https://discord.gg/WkvRGMUcx7">
|
||||
<img alt="Discord" src="https://img.shields.io/discord/1042559155224973352?logo=Discord&style=flat-square&label=discord">
|
||||
</a>
|
||||
<a href="https://join.slack.com/t/kubeshark/shared_invite/zt-3jdcdgxdv-1qNkhBh9c6CFoE7bSPkpBQ">
|
||||
<img alt="Slack" src="https://img.shields.io/badge/slack-join_chat-green?logo=Slack&style=flat-square&label=slack">
|
||||
</a>
|
||||
<a href="https://github.com/kubeshark/kubeshark/releases/latest"><img alt="Release" src="https://img.shields.io/github/v/release/kubeshark/kubeshark?logo=GitHub&style=flat-square"></a>
|
||||
<a href="https://hub.docker.com/r/kubeshark/worker"><img alt="Docker pulls" src="https://img.shields.io/docker/pulls/kubeshark/worker?color=%23099cec&logo=Docker&style=flat-square"></a>
|
||||
<a href="https://discord.gg/WkvRGMUcx7"><img alt="Discord" src="https://img.shields.io/discord/1042559155224973352?logo=Discord&style=flat-square&label=discord"></a>
|
||||
<a href="https://join.slack.com/t/kubeshark/shared_invite/zt-3jdcdgxdv-1qNkhBh9c6CFoE7bSPkpBQ"><img alt="Slack" src="https://img.shields.io/badge/slack-join_chat-green?logo=Slack&style=flat-square"></a>
|
||||
</p>
|
||||
|
||||
<p align="center"><b>Network Observability for SREs & AI Agents</b></p>
|
||||
|
||||
<p align="center">
|
||||
<b>
|
||||
Want to see Kubeshark in action right now? Visit this
|
||||
<a href="https://demo.kubeshark.com/">live demo deployment</a> of Kubeshark.
|
||||
</b>
|
||||
<a href="https://demo.kubeshark.com/">Live Demo</a> · <a href="https://docs.kubeshark.com">Docs</a>
|
||||
</p>
|
||||
|
||||
**Kubeshark** is an API traffic analyzer for Kubernetes, providing deep packet inspection with complete API and Kubernetes contexts, retaining cluster-wide L4 traffic (PCAP), and using minimal production compute resources.
|
||||
---
|
||||
|
||||

|
||||
Kubeshark captures cluster-wide network traffic at the speed and scale of Kubernetes, continuously, at the kernel level using eBPF. It consolidates a highly fragmented picture — dozens of nodes, thousands of workloads, millions of connections — into a single, queryable view with full Kubernetes and API context.
|
||||
|
||||
Think [TCPDump](https://en.wikipedia.org/wiki/Tcpdump) and [Wireshark](https://www.wireshark.org/) reimagined for Kubernetes.
|
||||
Network data is available to **AI agents via [MCP](https://docs.kubeshark.com/en/mcp)** and to **human operators via a [dashboard](https://docs.kubeshark.com/en/v2)**.
|
||||
|
||||
Access cluster-wide PCAP traffic by pressing a single button, without the need to install `tcpdump` or manually copy files. Understand the traffic context in relation to the API and Kubernetes contexts.
|
||||
**What's captured, cluster-wide:**
|
||||
|
||||
#### Service-Map w/Kubernetes Context
|
||||
- **L4 Packets & TCP Metrics** — retransmissions, RTT, window saturation, connection lifecycle, packet loss across every node-to-node path ([TCP insights →](https://docs.kubeshark.com/en/mcp/tcp_insights))
|
||||
- **L7 API Calls** — real-time request/response matching with full payload parsing: HTTP, gRPC, GraphQL, Redis, Kafka, DNS ([API dissection →](https://docs.kubeshark.com/en/v2/l7_api_dissection))
|
||||
- **Decrypted TLS** — eBPF-based TLS decryption without key management
|
||||
- **Kubernetes Context** — every packet and API call resolved to pod, service, namespace, and node
|
||||
- **PCAP Retention** — point-in-time raw packet snapshots, exportable for Wireshark ([Snapshots →](https://docs.kubeshark.com/en/v2/traffic_snapshots))
|
||||
|
||||

|
||||

|
||||
|
||||
#### Export Cluster-Wide L4 Traffic (PCAP)
|
||||
---
|
||||
|
||||
Imagine having a cluster-wide [TCPDump](https://www.tcpdump.org/)-like capability—exporting a single [PCAP](https://www.ietf.org/archive/id/draft-gharris-opsawg-pcap-01.html) file that consolidates traffic from multiple nodes, all accessible with a single click.
|
||||
## Get Started
|
||||
|
||||
1. Go to the **Snapshots** tab
|
||||
2. Create a new snapshot
|
||||
3. **Optionally** select the nodes (default: all nodes)
|
||||
4. **Optionally** select the time frame (default: last one hour)
|
||||
5. Press **Create**
|
||||
|
||||
<img width="3342" height="1206" alt="image" src="https://github.com/user-attachments/assets/e8e47996-52b7-4028-9698-f059a13ffdb7" />
|
||||
|
||||
|
||||
Once the snapshot is ready, click the PCAP file to export its contents and open it in Wireshark.
|
||||
|
||||
#### AI-Powered Network Analysis (MCP)
|
||||
|
||||
Connect your AI assistant to Kubeshark and query your cluster's network traffic using natural language. Kubeshark implements the [Model Context Protocol (MCP)](https://modelcontextprotocol.io/)—an open standard for connecting AI assistants to external data sources.
|
||||
|
||||
```shell
|
||||
# Add Kubeshark to Claude Code
|
||||
claude mcp add kubeshark -- kubeshark mcp --proxy
|
||||
|
||||
# Then ask questions like:
|
||||
# "Show me all HTTP 500 errors in the last hour"
|
||||
# "Which services communicate with payment-service?"
|
||||
# "Investigate why checkout is failing"
|
||||
```
|
||||
|
||||
**What AI can access:**
|
||||
- L7 API transactions (HTTP, gRPC, Redis, Kafka, etc.) with full request/response payloads
|
||||
- L4 TCP/UDP flows with connection metrics and TCP handshake RTT
|
||||
- Kubernetes context for every request (pod, service, namespace)
|
||||
- Snapshots and PCAP exports for forensic analysis
|
||||
|
||||
Works with Claude Code, Claude Desktop, Cursor, GitHub Copilot, and any MCP-compatible AI assistant. See the [MCP documentation](https://docs.kubeshark.com/en/mcp) for setup guides and use cases.
|
||||
|
||||
## Getting Started
|
||||
Download **Kubeshark**'s binary distribution [latest release](https://github.com/kubeshark/kubeshark/releases/latest) or use one of the following methods to deploy **Kubeshark**. The [web-based dashboard](https://docs.kubeshark.com/en/ui) should open in your browser, showing a real-time view of your cluster's traffic.
|
||||
|
||||
### Homebrew
|
||||
|
||||
[Homebrew](https://brew.sh/) :beer: users can install the Kubeshark CLI with:
|
||||
|
||||
```shell
|
||||
brew install kubeshark
|
||||
kubeshark tap
|
||||
```
|
||||
|
||||
To clean up:
|
||||
```shell
|
||||
kubeshark clean
|
||||
```
|
||||
|
||||
### Helm
|
||||
|
||||
Add the Helm repository and install the chart:
|
||||
|
||||
```shell
|
||||
```bash
|
||||
helm repo add kubeshark https://helm.kubeshark.com
|
||||
helm install kubeshark kubeshark/kubeshark
|
||||
```
|
||||
Follow the on-screen instructions how to connect to the dashboard.
|
||||
|
||||
To clean up:
|
||||
```shell
|
||||
helm uninstall kubeshark
|
||||
Dashboard opens automatically. You're capturing traffic.
|
||||
|
||||
**Connect an AI agent** via MCP:
|
||||
|
||||
```bash
|
||||
brew install kubeshark
|
||||
claude mcp add kubeshark -- kubeshark mcp
|
||||
```
|
||||
|
||||
## Building From Source
|
||||
[MCP setup guide →](https://docs.kubeshark.com/en/mcp)
|
||||
|
||||
Clone this repository and run the `make` command to build it. After the build is complete, the executable can be found at `./bin/kubeshark`.
|
||||
---
|
||||
|
||||
## Documentation
|
||||
### AI-Powered Network Analysis
|
||||
|
||||
To learn more, read the [documentation](https://docs.kubeshark.com).
|
||||
Kubeshark exposes all cluster-wide network data via MCP (Model Context Protocol). AI agents can query L4 metrics, investigate L7 API calls, analyze traffic patterns, and run root cause analysis — through natural language. Use cases include incident response, root cause analysis, troubleshooting, debugging, and reliability workflows.
|
||||
|
||||
> *"Why did checkout fail at 2:15 PM?"*
|
||||
> *"Which services have error rates above 1%?"*
|
||||
> *"Show TCP retransmission rates across all node-to-node paths"*
|
||||
> *"Trace request abc123 through all services"*
|
||||
|
||||
Works with Claude Code, Cursor, and any MCP-compatible AI.
|
||||
|
||||

|
||||
|
||||
[MCP setup guide →](https://docs.kubeshark.com/en/mcp)
|
||||
|
||||
---
|
||||
|
||||
### L7 API Dissection
|
||||
|
||||
Cluster-wide request/response matching with full payloads, parsed according to protocol specifications. HTTP, gRPC, Redis, Kafka, DNS, and more. Every API call resolved to source and destination pod, service, namespace, and node. No code instrumentation required.
|
||||
|
||||

|
||||
|
||||
[Learn more →](https://docs.kubeshark.com/en/v2/l7_api_dissection)
|
||||
|
||||
### L4/L7 Workload Map
|
||||
|
||||
Cluster-wide view of service communication: dependencies, traffic flow, and anomalies across all nodes and namespaces.
|
||||
|
||||

|
||||
|
||||
[Learn more →](https://docs.kubeshark.com/en/v2/service_map)
|
||||
|
||||
### Traffic Retention
|
||||
|
||||
Continuous raw packet capture with point-in-time snapshots. Export PCAP files for offline analysis with Wireshark or other tools.
|
||||
|
||||

|
||||
|
||||
[Snapshots guide →](https://docs.kubeshark.com/en/v2/traffic_snapshots)
|
||||
|
||||
---
|
||||
|
||||
## Features
|
||||
|
||||
| Feature | Description |
|
||||
|---------|-------------|
|
||||
| [**Raw Capture**](https://docs.kubeshark.com/en/v2/raw_capture) | Continuous cluster-wide packet capture with minimal overhead |
|
||||
| [**Traffic Snapshots**](https://docs.kubeshark.com/en/v2/traffic_snapshots) | Point-in-time snapshots, export as PCAP for Wireshark |
|
||||
| [**L7 API Dissection**](https://docs.kubeshark.com/en/v2/l7_api_dissection) | Request/response matching with full payloads and protocol parsing |
|
||||
| [**Protocol Support**](https://docs.kubeshark.com/en/protocols) | HTTP, gRPC, GraphQL, Redis, Kafka, DNS, and more |
|
||||
| [**TLS Decryption**](https://docs.kubeshark.com/en/encrypted_traffic) | eBPF-based decryption without key management |
|
||||
| [**AI-Powered Analysis**](https://docs.kubeshark.com/en/v2/ai_powered_analysis) | Query cluster-wide network data with Claude, Cursor, or any MCP-compatible AI |
|
||||
| [**Display Filters**](https://docs.kubeshark.com/en/v2/kfl2) | Wireshark-inspired display filters for precise traffic analysis |
|
||||
| [**100% On-Premises**](https://docs.kubeshark.com/en/air_gapped) | Air-gapped support, no external dependencies |
|
||||
|
||||
---
|
||||
|
||||
## Install
|
||||
|
||||
| Method | Command |
|
||||
|--------|---------|
|
||||
| Helm | `helm repo add kubeshark https://helm.kubeshark.com && helm install kubeshark kubeshark/kubeshark` |
|
||||
| Homebrew | `brew install kubeshark && kubeshark tap` |
|
||||
| Binary | [Download](https://github.com/kubeshark/kubeshark/releases/latest) |
|
||||
|
||||
[Installation guide →](https://docs.kubeshark.com/en/install)
|
||||
|
||||
---
|
||||
|
||||
## Contributing
|
||||
|
||||
We :heart: pull requests! See [CONTRIBUTING.md](CONTRIBUTING.md) for the contribution guide.
|
||||
We welcome contributions. See [CONTRIBUTING.md](CONTRIBUTING.md).
|
||||
|
||||
## License
|
||||
|
||||
[Apache-2.0](LICENSE)
|
||||
|
||||
169
cmd/mcpRunner.go
169
cmd/mcpRunner.go
@@ -10,6 +10,7 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -324,6 +325,16 @@ func (s *mcpServer) invalidateHubMCPCache() {
|
||||
s.cachedHubMCP = nil
|
||||
}
|
||||
|
||||
// getBaseURL returns the hub API base URL by stripping /mcp from hubBaseURL.
|
||||
// The hub URL is always the frontend URL + /api, and hubBaseURL is frontendURL/api/mcp.
|
||||
// Ensures backend connection is established first.
|
||||
func (s *mcpServer) getBaseURL() (string, error) {
|
||||
if errMsg := s.ensureBackendConnection(); errMsg != "" {
|
||||
return "", fmt.Errorf("%s", errMsg)
|
||||
}
|
||||
return strings.TrimSuffix(s.hubBaseURL, "/mcp"), nil
|
||||
}
|
||||
|
||||
func writeErrorToStderr(format string, args ...any) {
|
||||
fmt.Fprintf(os.Stderr, format+"\n", args...)
|
||||
}
|
||||
@@ -379,6 +390,14 @@ func (s *mcpServer) handleRequest(req *jsonRPCRequest) {
|
||||
|
||||
func (s *mcpServer) handleInitialize(req *jsonRPCRequest) {
|
||||
var instructions string
|
||||
fileDownloadInstructions := `
|
||||
|
||||
Downloading files (e.g., PCAP exports):
|
||||
When a tool like export_snapshot_pcap returns a relative file path, you MUST use the file tools to retrieve the file:
|
||||
- get_file_url: Resolves the relative path to a full download URL you can share with the user.
|
||||
- download_file: Downloads the file to the local filesystem so it can be opened or analyzed.
|
||||
Typical workflow: call export_snapshot_pcap → receive a relative path → call download_file with that path → share the local file path with the user.`
|
||||
|
||||
if s.urlMode {
|
||||
instructions = fmt.Sprintf(`Kubeshark MCP Server - Connected to: %s
|
||||
|
||||
@@ -392,7 +411,7 @@ Available tools for traffic analysis:
|
||||
- get_api_stats: Get aggregated API statistics
|
||||
- And more - use tools/list to see all available tools
|
||||
|
||||
Use the MCP tools directly - do NOT use kubectl or curl to access Kubeshark.`, s.directURL)
|
||||
Use the MCP tools directly - do NOT use kubectl or curl to access Kubeshark.`, s.directURL) + fileDownloadInstructions
|
||||
} else if s.allowDestructive {
|
||||
instructions = `Kubeshark MCP Server - Proxy Mode (Destructive Operations ENABLED)
|
||||
|
||||
@@ -410,7 +429,7 @@ Safe operations:
|
||||
Traffic analysis tools (require Kubeshark to be running):
|
||||
- list_workloads, list_api_calls, list_l4_flows, get_api_stats, and more
|
||||
|
||||
Use the MCP tools - do NOT use kubectl, helm, or curl directly.`
|
||||
Use the MCP tools - do NOT use kubectl, helm, or curl directly.` + fileDownloadInstructions
|
||||
} else {
|
||||
instructions = `Kubeshark MCP Server - Proxy Mode (Read-Only)
|
||||
|
||||
@@ -425,7 +444,7 @@ Available operations:
|
||||
Traffic analysis tools (require Kubeshark to be running):
|
||||
- list_workloads, list_api_calls, list_l4_flows, get_api_stats, and more
|
||||
|
||||
Use the MCP tools - do NOT use kubectl, helm, or curl directly.`
|
||||
Use the MCP tools - do NOT use kubectl, helm, or curl directly.` + fileDownloadInstructions
|
||||
}
|
||||
|
||||
result := mcpInitializeResult{
|
||||
@@ -456,6 +475,40 @@ func (s *mcpServer) handleListTools(req *jsonRPCRequest) {
|
||||
}`),
|
||||
})
|
||||
|
||||
// Add file URL and download tools - available in all modes
|
||||
tools = append(tools, mcpTool{
|
||||
Name: "get_file_url",
|
||||
Description: "When a tool (e.g., export_snapshot_pcap) returns a relative file path, use this tool to resolve it into a fully-qualified download URL. The URL can be shared with the user for manual download.",
|
||||
InputSchema: json.RawMessage(`{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": "The relative file path returned by a Hub tool (e.g., '/snapshots/abc/data.pcap')"
|
||||
}
|
||||
},
|
||||
"required": ["path"]
|
||||
}`),
|
||||
})
|
||||
tools = append(tools, mcpTool{
|
||||
Name: "download_file",
|
||||
Description: "When a tool (e.g., export_snapshot_pcap) returns a relative file path, use this tool to download the file to the local filesystem. This is the preferred way to retrieve PCAP exports and other files from Kubeshark.",
|
||||
InputSchema: json.RawMessage(`{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"path": {
|
||||
"type": "string",
|
||||
"description": "The relative file path returned by a Hub tool (e.g., '/snapshots/abc/data.pcap')"
|
||||
},
|
||||
"dest": {
|
||||
"type": "string",
|
||||
"description": "Local destination file path. If not provided, uses the filename from the path in the current directory."
|
||||
}
|
||||
},
|
||||
"required": ["path"]
|
||||
}`),
|
||||
})
|
||||
|
||||
// Add destructive tools only if --allow-destructive flag was set (and not in URL mode)
|
||||
if !s.urlMode && s.allowDestructive {
|
||||
tools = append(tools, mcpTool{
|
||||
@@ -653,6 +706,20 @@ func (s *mcpServer) handleCallTool(req *jsonRPCRequest) {
|
||||
IsError: isError,
|
||||
})
|
||||
return
|
||||
case "get_file_url":
|
||||
result, isError = s.callGetFileURL(params.Arguments)
|
||||
s.sendResult(req.ID, mcpCallToolResult{
|
||||
Content: []mcpContent{{Type: "text", Text: result}},
|
||||
IsError: isError,
|
||||
})
|
||||
return
|
||||
case "download_file":
|
||||
result, isError = s.callDownloadFile(params.Arguments)
|
||||
s.sendResult(req.ID, mcpCallToolResult{
|
||||
Content: []mcpContent{{Type: "text", Text: result}},
|
||||
IsError: isError,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// Forward Hub tools to the API
|
||||
@@ -671,7 +738,7 @@ func (s *mcpServer) callHubTool(toolName string, args map[string]any) (string, b
|
||||
|
||||
// Build the request body
|
||||
requestBody := map[string]any{
|
||||
"tool": toolName,
|
||||
"name": toolName,
|
||||
"arguments": args,
|
||||
}
|
||||
|
||||
@@ -706,6 +773,91 @@ func (s *mcpServer) callHubTool(toolName string, args map[string]any) (string, b
|
||||
}
|
||||
|
||||
|
||||
func (s *mcpServer) callGetFileURL(args map[string]any) (string, bool) {
|
||||
filePath, _ := args["path"].(string)
|
||||
if filePath == "" {
|
||||
return "Error: 'path' parameter is required", true
|
||||
}
|
||||
|
||||
baseURL, err := s.getBaseURL()
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error: %v", err), true
|
||||
}
|
||||
|
||||
// Ensure path starts with /
|
||||
if !strings.HasPrefix(filePath, "/") {
|
||||
filePath = "/" + filePath
|
||||
}
|
||||
|
||||
fullURL := strings.TrimSuffix(baseURL, "/") + filePath
|
||||
return fullURL, false
|
||||
}
|
||||
|
||||
func (s *mcpServer) callDownloadFile(args map[string]any) (string, bool) {
|
||||
filePath, _ := args["path"].(string)
|
||||
if filePath == "" {
|
||||
return "Error: 'path' parameter is required", true
|
||||
}
|
||||
|
||||
baseURL, err := s.getBaseURL()
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error: %v", err), true
|
||||
}
|
||||
|
||||
// Ensure path starts with /
|
||||
if !strings.HasPrefix(filePath, "/") {
|
||||
filePath = "/" + filePath
|
||||
}
|
||||
|
||||
fullURL := strings.TrimSuffix(baseURL, "/") + filePath
|
||||
|
||||
// Determine destination file path
|
||||
dest, _ := args["dest"].(string)
|
||||
if dest == "" {
|
||||
dest = path.Base(filePath)
|
||||
}
|
||||
|
||||
// Use a dedicated HTTP client for file downloads.
|
||||
// The default s.httpClient has a 30s total timeout which would fail for large files (up to 10GB).
|
||||
// This client sets only connection-level timeouts and lets the body stream without a deadline.
|
||||
downloadClient := &http.Client{
|
||||
Transport: &http.Transport{
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ResponseHeaderTimeout: 30 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
resp, err := downloadClient.Get(fullURL)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error downloading file: %v", err), true
|
||||
}
|
||||
defer func() { _ = resp.Body.Close() }()
|
||||
|
||||
if resp.StatusCode >= 400 {
|
||||
return fmt.Sprintf("Error downloading file: HTTP %d", resp.StatusCode), true
|
||||
}
|
||||
|
||||
// Write to destination
|
||||
outFile, err := os.Create(dest)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error creating file %s: %v", dest, err), true
|
||||
}
|
||||
defer func() { _ = outFile.Close() }()
|
||||
|
||||
written, err := io.Copy(outFile, resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error writing file %s: %v", dest, err), true
|
||||
}
|
||||
|
||||
result := map[string]any{
|
||||
"url": fullURL,
|
||||
"path": dest,
|
||||
"size": written,
|
||||
}
|
||||
resultBytes, _ := json.MarshalIndent(result, "", " ")
|
||||
return string(resultBytes), false
|
||||
}
|
||||
|
||||
func (s *mcpServer) callStartKubeshark(args map[string]any) (string, bool) {
|
||||
// Build the kubeshark tap command
|
||||
cmdArgs := []string{"tap"}
|
||||
@@ -913,6 +1065,11 @@ func listMCPTools(directURL string) {
|
||||
fmt.Printf("URL Mode: %s\n\n", directURL)
|
||||
fmt.Println("Cluster management tools disabled (Kubeshark managed externally)")
|
||||
fmt.Println()
|
||||
fmt.Println("Local Tools:")
|
||||
fmt.Println(" check_kubeshark_status Check if Kubeshark is running")
|
||||
fmt.Println(" get_file_url Resolve a relative path to a full download URL")
|
||||
fmt.Println(" download_file Download a file from Kubeshark to local disk")
|
||||
fmt.Println()
|
||||
|
||||
hubURL := strings.TrimSuffix(directURL, "/") + "/api/mcp"
|
||||
fetchAndDisplayTools(hubURL, 30*time.Second)
|
||||
@@ -925,6 +1082,10 @@ func listMCPTools(directURL string) {
|
||||
fmt.Println(" start_kubeshark Start Kubeshark to capture traffic")
|
||||
fmt.Println(" stop_kubeshark Stop Kubeshark and clean up resources")
|
||||
fmt.Println()
|
||||
fmt.Println("File Tools:")
|
||||
fmt.Println(" get_file_url Resolve a relative path to a full download URL")
|
||||
fmt.Println(" download_file Download a file from Kubeshark to local disk")
|
||||
fmt.Println()
|
||||
|
||||
// Establish proxy connection to Kubeshark
|
||||
fmt.Println("Connecting to Kubeshark...")
|
||||
|
||||
205
cmd/mcp_test.go
205
cmd/mcp_test.go
@@ -5,6 +5,8 @@ import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
@@ -126,8 +128,18 @@ func TestMCP_ToolsList_CLIOnly(t *testing.T) {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
tools := resp.Result.(map[string]any)["tools"].([]any)
|
||||
if len(tools) != 1 || tools[0].(map[string]any)["name"] != "check_kubeshark_status" {
|
||||
t.Error("Expected only check_kubeshark_status tool")
|
||||
// Should have check_kubeshark_status + get_file_url + download_file = 3 tools
|
||||
if len(tools) != 3 {
|
||||
t.Errorf("Expected 3 tools, got %d", len(tools))
|
||||
}
|
||||
toolNames := make(map[string]bool)
|
||||
for _, tool := range tools {
|
||||
toolNames[tool.(map[string]any)["name"].(string)] = true
|
||||
}
|
||||
for _, expected := range []string{"check_kubeshark_status", "get_file_url", "download_file"} {
|
||||
if !toolNames[expected] {
|
||||
t.Errorf("Missing expected tool: %s", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -163,9 +175,9 @@ func TestMCP_ToolsList_WithHubBackend(t *testing.T) {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
tools := resp.Result.(map[string]any)["tools"].([]any)
|
||||
// Should have CLI tools (3) + Hub tools (2) = 5 tools
|
||||
if len(tools) < 5 {
|
||||
t.Errorf("Expected at least 5 tools, got %d", len(tools))
|
||||
// Should have CLI tools (3) + file tools (2) + Hub tools (2) = 7 tools
|
||||
if len(tools) < 7 {
|
||||
t.Errorf("Expected at least 7 tools, got %d", len(tools))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -218,7 +230,7 @@ func newTestMCPServerWithMockBackend(handler http.HandlerFunc) (*mcpServer, *htt
|
||||
}
|
||||
|
||||
type hubToolCallRequest struct {
|
||||
Tool string `json:"tool"`
|
||||
Tool string `json:"name"`
|
||||
Arguments map[string]any `json:"arguments"`
|
||||
}
|
||||
|
||||
@@ -463,6 +475,187 @@ func TestMCP_BackendInitialization_Concurrent(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetFileURL_ProxyMode(t *testing.T) {
|
||||
s := &mcpServer{
|
||||
httpClient: &http.Client{},
|
||||
stdin: &bytes.Buffer{},
|
||||
stdout: &bytes.Buffer{},
|
||||
hubBaseURL: "http://127.0.0.1:8899/api/mcp",
|
||||
backendInitialized: true,
|
||||
}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
|
||||
Name: "get_file_url",
|
||||
Arguments: map[string]any{"path": "/snapshots/abc/data.pcap"},
|
||||
}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
text := resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
expected := "http://127.0.0.1:8899/api/snapshots/abc/data.pcap"
|
||||
if text != expected {
|
||||
t.Errorf("Expected %q, got %q", expected, text)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetFileURL_URLMode(t *testing.T) {
|
||||
s := &mcpServer{
|
||||
httpClient: &http.Client{},
|
||||
stdin: &bytes.Buffer{},
|
||||
stdout: &bytes.Buffer{},
|
||||
hubBaseURL: "https://kubeshark.example.com/api/mcp",
|
||||
backendInitialized: true,
|
||||
urlMode: true,
|
||||
directURL: "https://kubeshark.example.com",
|
||||
}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
|
||||
Name: "get_file_url",
|
||||
Arguments: map[string]any{"path": "/snapshots/xyz/export.pcap"},
|
||||
}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
text := resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
expected := "https://kubeshark.example.com/api/snapshots/xyz/export.pcap"
|
||||
if text != expected {
|
||||
t.Errorf("Expected %q, got %q", expected, text)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_GetFileURL_MissingPath(t *testing.T) {
|
||||
s := &mcpServer{
|
||||
httpClient: &http.Client{},
|
||||
stdin: &bytes.Buffer{},
|
||||
stdout: &bytes.Buffer{},
|
||||
hubBaseURL: "http://127.0.0.1:8899/api/mcp",
|
||||
backendInitialized: true,
|
||||
}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
|
||||
Name: "get_file_url",
|
||||
Arguments: map[string]any{},
|
||||
}))
|
||||
result := resp.Result.(map[string]any)
|
||||
if !result["isError"].(bool) {
|
||||
t.Error("Expected isError=true when path is missing")
|
||||
}
|
||||
text := result["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
if !strings.Contains(text, "path") {
|
||||
t.Error("Error message should mention 'path'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_DownloadFile(t *testing.T) {
|
||||
fileContent := "test pcap data content"
|
||||
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/api/snapshots/abc/data.pcap" {
|
||||
_, _ = w.Write([]byte(fileContent))
|
||||
} else {
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
// Use temp dir for download destination
|
||||
tmpDir := t.TempDir()
|
||||
dest := filepath.Join(tmpDir, "downloaded.pcap")
|
||||
|
||||
s := &mcpServer{
|
||||
httpClient: &http.Client{},
|
||||
stdin: &bytes.Buffer{},
|
||||
stdout: &bytes.Buffer{},
|
||||
hubBaseURL: mockServer.URL + "/api/mcp",
|
||||
backendInitialized: true,
|
||||
}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
|
||||
Name: "download_file",
|
||||
Arguments: map[string]any{"path": "/snapshots/abc/data.pcap", "dest": dest},
|
||||
}))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
result := resp.Result.(map[string]any)
|
||||
if result["isError"] != nil && result["isError"].(bool) {
|
||||
t.Fatalf("Expected no error, got: %v", result["content"])
|
||||
}
|
||||
|
||||
text := result["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
var downloadResult map[string]any
|
||||
if err := json.Unmarshal([]byte(text), &downloadResult); err != nil {
|
||||
t.Fatalf("Failed to parse download result JSON: %v", err)
|
||||
}
|
||||
if downloadResult["path"] != dest {
|
||||
t.Errorf("Expected path %q, got %q", dest, downloadResult["path"])
|
||||
}
|
||||
if downloadResult["size"].(float64) != float64(len(fileContent)) {
|
||||
t.Errorf("Expected size %d, got %v", len(fileContent), downloadResult["size"])
|
||||
}
|
||||
|
||||
// Verify the file was actually written
|
||||
content, err := os.ReadFile(dest)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read downloaded file: %v", err)
|
||||
}
|
||||
if string(content) != fileContent {
|
||||
t.Errorf("Expected file content %q, got %q", fileContent, string(content))
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_DownloadFile_CustomDest(t *testing.T) {
|
||||
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
_, _ = w.Write([]byte("data"))
|
||||
}))
|
||||
defer mockServer.Close()
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
customDest := filepath.Join(tmpDir, "custom-name.pcap")
|
||||
|
||||
s := &mcpServer{
|
||||
httpClient: &http.Client{},
|
||||
stdin: &bytes.Buffer{},
|
||||
stdout: &bytes.Buffer{},
|
||||
hubBaseURL: mockServer.URL + "/api/mcp",
|
||||
backendInitialized: true,
|
||||
}
|
||||
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
|
||||
Name: "download_file",
|
||||
Arguments: map[string]any{"path": "/snapshots/abc/export.pcap", "dest": customDest},
|
||||
}))
|
||||
result := resp.Result.(map[string]any)
|
||||
if result["isError"] != nil && result["isError"].(bool) {
|
||||
t.Fatalf("Expected no error, got: %v", result["content"])
|
||||
}
|
||||
|
||||
text := result["content"].([]any)[0].(map[string]any)["text"].(string)
|
||||
var downloadResult map[string]any
|
||||
if err := json.Unmarshal([]byte(text), &downloadResult); err != nil {
|
||||
t.Fatalf("Failed to parse download result JSON: %v", err)
|
||||
}
|
||||
if downloadResult["path"] != customDest {
|
||||
t.Errorf("Expected path %q, got %q", customDest, downloadResult["path"])
|
||||
}
|
||||
|
||||
if _, err := os.Stat(customDest); os.IsNotExist(err) {
|
||||
t.Error("Expected file to exist at custom destination")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_ToolsList_IncludesFileTools(t *testing.T) {
|
||||
s := newTestMCPServer()
|
||||
resp := parseResponse(t, sendRequest(s, "tools/list", 1, nil))
|
||||
if resp.Error != nil {
|
||||
t.Fatalf("Unexpected error: %v", resp.Error)
|
||||
}
|
||||
tools := resp.Result.(map[string]any)["tools"].([]any)
|
||||
toolNames := make(map[string]bool)
|
||||
for _, tool := range tools {
|
||||
toolNames[tool.(map[string]any)["name"].(string)] = true
|
||||
}
|
||||
for _, expected := range []string{"get_file_url", "download_file"} {
|
||||
if !toolNames[expected] {
|
||||
t.Errorf("Missing expected tool: %s", expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMCP_FullConversation(t *testing.T) {
|
||||
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/" {
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientk8s "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
)
|
||||
@@ -39,7 +38,7 @@ type PodFileInfo struct {
|
||||
}
|
||||
|
||||
// listWorkerPods fetches all worker pods from multiple namespaces
|
||||
func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespaces []string) ([]*PodFileInfo, error) {
|
||||
func listWorkerPods(ctx context.Context, clientset *kubernetes.Clientset, namespaces []string) ([]*PodFileInfo, error) {
|
||||
var podFileInfos []*PodFileInfo
|
||||
var errs []error
|
||||
labelSelector := label
|
||||
@@ -65,7 +64,7 @@ func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespa
|
||||
}
|
||||
|
||||
// listFilesInPodDir lists all files in the specified directory inside the pod across multiple namespaces
|
||||
func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, config *rest.Config, pod *PodFileInfo, cutoffTime *time.Time) error {
|
||||
func listFilesInPodDir(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, pod *PodFileInfo, cutoffTime *time.Time) error {
|
||||
nodeName := pod.Pod.Spec.NodeName
|
||||
srcFilePath := filepath.Join("data", nodeName, srcDir)
|
||||
|
||||
|
||||
@@ -62,4 +62,5 @@ func init() {
|
||||
tapCmd.Flags().Bool(configStructs.TelemetryEnabledLabel, defaultTapConfig.Telemetry.Enabled, "Enable/disable Telemetry")
|
||||
tapCmd.Flags().Bool(configStructs.ResourceGuardEnabledLabel, defaultTapConfig.ResourceGuard.Enabled, "Enable/disable resource guard")
|
||||
tapCmd.Flags().Bool(configStructs.WatchdogEnabled, defaultTapConfig.Watchdog.Enabled, "Enable/disable watchdog")
|
||||
tapCmd.Flags().String(configStructs.HelmChartPathLabel, defaultTapConfig.Release.HelmChartPath, "Path to a local Helm chart folder (overrides the remote Helm repo)")
|
||||
}
|
||||
|
||||
@@ -116,6 +116,7 @@ func CreateDefaultConfig() ConfigStruct {
|
||||
},
|
||||
CanUpdateTargetedPods: true,
|
||||
CanStopTrafficCapturing: true,
|
||||
CanControlDissection: true,
|
||||
ShowAdminConsoleLink: true,
|
||||
},
|
||||
},
|
||||
@@ -139,8 +140,8 @@ func CreateDefaultConfig() ConfigStruct {
|
||||
"diameter",
|
||||
"udp-flow",
|
||||
"tcp-flow",
|
||||
"udp-flow-full",
|
||||
"tcp-flow-full",
|
||||
"udp-conn",
|
||||
"tcp-conn",
|
||||
},
|
||||
PortMapping: configStructs.PortMapping{
|
||||
HTTP: []uint16{80, 443, 8080},
|
||||
@@ -152,10 +153,13 @@ func CreateDefaultConfig() ConfigStruct {
|
||||
},
|
||||
Dashboard: configStructs.DashboardConfig{
|
||||
CompleteStreamingEnabled: true,
|
||||
ClusterWideMapEnabled: false,
|
||||
},
|
||||
Capture: configStructs.CaptureConfig{
|
||||
Stopped: false,
|
||||
StopAfter: "5m",
|
||||
Dissection: configStructs.DissectionConfig{
|
||||
Enabled: true,
|
||||
StopAfter: "5m",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
)
|
||||
|
||||
type ScriptingConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
Env map[string]interface{} `yaml:"env" json:"env" default:"{}"`
|
||||
Source string `yaml:"source" json:"source" default:""`
|
||||
Sources []string `yaml:"sources" json:"sources" default:"[]"`
|
||||
|
||||
@@ -45,6 +45,7 @@ const (
|
||||
PcapDumpEnabled = "enabled"
|
||||
PcapTime = "time"
|
||||
WatchdogEnabled = "watchdogEnabled"
|
||||
HelmChartPathLabel = "release-helmChartPath"
|
||||
)
|
||||
|
||||
type ResourceLimitsHub struct {
|
||||
@@ -167,6 +168,7 @@ type Role struct {
|
||||
ScriptingPermissions ScriptingPermissions `yaml:"scriptingPermissions" json:"scriptingPermissions"`
|
||||
CanUpdateTargetedPods bool `yaml:"canUpdateTargetedPods" json:"canUpdateTargetedPods" default:"false"`
|
||||
CanStopTrafficCapturing bool `yaml:"canStopTrafficCapturing" json:"canStopTrafficCapturing" default:"false"`
|
||||
CanControlDissection bool `yaml:"canControlDissection" json:"canControlDissection" default:"false"`
|
||||
ShowAdminConsoleLink bool `yaml:"showAdminConsoleLink" json:"showAdminConsoleLink" default:"false"`
|
||||
}
|
||||
|
||||
@@ -200,6 +202,7 @@ type RoutingConfig struct {
|
||||
type DashboardConfig struct {
|
||||
StreamingType string `yaml:"streamingType" json:"streamingType" default:"connect-rpc"`
|
||||
CompleteStreamingEnabled bool `yaml:"completeStreamingEnabled" json:"completeStreamingEnabled" default:"true"`
|
||||
ClusterWideMapEnabled bool `yaml:"clusterWideMapEnabled" json:"clusterWideMapEnabled" default:"false"`
|
||||
}
|
||||
|
||||
type FrontRoutingConfig struct {
|
||||
@@ -207,9 +210,10 @@ type FrontRoutingConfig struct {
|
||||
}
|
||||
|
||||
type ReleaseConfig struct {
|
||||
Repo string `yaml:"repo" json:"repo" default:"https://helm.kubeshark.com"`
|
||||
Name string `yaml:"name" json:"name" default:"kubeshark"`
|
||||
Namespace string `yaml:"namespace" json:"namespace" default:"default"`
|
||||
Repo string `yaml:"repo" json:"repo" default:"https://helm.kubeshark.com"`
|
||||
Name string `yaml:"name" json:"name" default:"kubeshark"`
|
||||
Namespace string `yaml:"namespace" json:"namespace" default:"default"`
|
||||
HelmChartPath string `yaml:"helmChartPath" json:"helmChartPath" default:""`
|
||||
}
|
||||
|
||||
type TelemetryConfig struct {
|
||||
@@ -260,6 +264,8 @@ type MiscConfig struct {
|
||||
DuplicateTimeframe string `yaml:"duplicateTimeframe" json:"duplicateTimeframe" default:"200ms"`
|
||||
DetectDuplicates bool `yaml:"detectDuplicates" json:"detectDuplicates" default:"false"`
|
||||
StaleTimeoutSeconds int `yaml:"staleTimeoutSeconds" json:"staleTimeoutSeconds" default:"30"`
|
||||
TcpFlowTimeout int `yaml:"tcpFlowTimeout" json:"tcpFlowTimeout" default:"1200"`
|
||||
UdpFlowTimeout int `yaml:"udpFlowTimeout" json:"udpFlowTimeout" default:"1200"`
|
||||
}
|
||||
|
||||
type PcapDumpConfig struct {
|
||||
@@ -305,20 +311,59 @@ type RawCaptureConfig struct {
|
||||
StorageSize string `yaml:"storageSize" json:"storageSize" default:"1Gi"`
|
||||
}
|
||||
|
||||
type SnapshotsConfig struct {
|
||||
type SnapshotsLocalConfig struct {
|
||||
StorageClass string `yaml:"storageClass" json:"storageClass" default:""`
|
||||
StorageSize string `yaml:"storageSize" json:"storageSize" default:"20Gi"`
|
||||
}
|
||||
|
||||
type SnapshotsCloudS3Config struct {
|
||||
Bucket string `yaml:"bucket" json:"bucket" default:""`
|
||||
Region string `yaml:"region" json:"region" default:""`
|
||||
AccessKey string `yaml:"accessKey" json:"accessKey" default:""`
|
||||
SecretKey string `yaml:"secretKey" json:"secretKey" default:""`
|
||||
RoleArn string `yaml:"roleArn" json:"roleArn" default:""`
|
||||
ExternalId string `yaml:"externalId" json:"externalId" default:""`
|
||||
}
|
||||
|
||||
type SnapshotsCloudAzblobConfig struct {
|
||||
StorageAccount string `yaml:"storageAccount" json:"storageAccount" default:""`
|
||||
Container string `yaml:"container" json:"container" default:""`
|
||||
StorageKey string `yaml:"storageKey" json:"storageKey" default:""`
|
||||
}
|
||||
|
||||
type SnapshotsCloudGCSConfig struct {
|
||||
Bucket string `yaml:"bucket" json:"bucket" default:""`
|
||||
Project string `yaml:"project" json:"project" default:""`
|
||||
CredentialsJson string `yaml:"credentialsJson" json:"credentialsJson" default:""`
|
||||
}
|
||||
|
||||
type SnapshotsCloudConfig struct {
|
||||
Provider string `yaml:"provider" json:"provider" default:""`
|
||||
Prefix string `yaml:"prefix" json:"prefix" default:""`
|
||||
ConfigMaps []string `yaml:"configMaps" json:"configMaps" default:"[]"`
|
||||
Secrets []string `yaml:"secrets" json:"secrets" default:"[]"`
|
||||
S3 SnapshotsCloudS3Config `yaml:"s3" json:"s3"`
|
||||
Azblob SnapshotsCloudAzblobConfig `yaml:"azblob" json:"azblob"`
|
||||
GCS SnapshotsCloudGCSConfig `yaml:"gcs" json:"gcs"`
|
||||
}
|
||||
|
||||
type SnapshotsConfig struct {
|
||||
Local SnapshotsLocalConfig `yaml:"local" json:"local"`
|
||||
Cloud SnapshotsCloudConfig `yaml:"cloud" json:"cloud"`
|
||||
}
|
||||
|
||||
type DelayedDissectionConfig struct {
|
||||
Image string `yaml:"image" json:"image" default:"kubeshark/worker:master"`
|
||||
CPU string `yaml:"cpu" json:"cpu" default:"1"`
|
||||
Memory string `yaml:"memory" json:"memory" default:"4Gi"`
|
||||
}
|
||||
|
||||
type DissectionConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"true"`
|
||||
StopAfter string `yaml:"stopAfter" json:"stopAfter" default:"5m"`
|
||||
}
|
||||
|
||||
type CaptureConfig struct {
|
||||
Stopped bool `yaml:"stopped" json:"stopped" default:"false"`
|
||||
StopAfter string `yaml:"stopAfter" json:"stopAfter" default:"5m"`
|
||||
Dissection DissectionConfig `yaml:"dissection" json:"dissection"`
|
||||
CaptureSelf bool `yaml:"captureSelf" json:"captureSelf" default:"false"`
|
||||
Raw RawCaptureConfig `yaml:"raw" json:"raw"`
|
||||
DbMaxSize string `yaml:"dbMaxSize" json:"dbMaxSize" default:"500Mi"`
|
||||
@@ -367,7 +412,6 @@ type TapConfig struct {
|
||||
Gitops GitopsConfig `yaml:"gitops" json:"gitops"`
|
||||
Sentry SentryConfig `yaml:"sentry" json:"sentry"`
|
||||
DefaultFilter string `yaml:"defaultFilter" json:"defaultFilter" default:""`
|
||||
LiveConfigMapChangesDisabled bool `yaml:"liveConfigMapChangesDisabled" json:"liveConfigMapChangesDisabled" default:"false"`
|
||||
GlobalFilter string `yaml:"globalFilter" json:"globalFilter" default:""`
|
||||
EnabledDissectors []string `yaml:"enabledDissectors" json:"enabledDissectors"`
|
||||
PortMapping PortMapping `yaml:"portMapping" json:"portMapping"`
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
apiVersion: v2
|
||||
name: kubeshark
|
||||
version: "52.12.0"
|
||||
version: "53.1.0"
|
||||
description: The API Traffic Analyzer for Kubernetes
|
||||
home: https://kubeshark.com
|
||||
keywords:
|
||||
|
||||
@@ -138,13 +138,32 @@ Example for overriding image names:
|
||||
| `tap.namespaces` | Target pods in namespaces | `[]` |
|
||||
| `tap.excludedNamespaces` | Exclude pods in namespaces | `[]` |
|
||||
| `tap.bpfOverride` | When using AF_PACKET as a traffic capture backend, override any existing pod targeting rules and set explicit BPF expression (e.g. `net 0.0.0.0/0`). | `[]` |
|
||||
| `tap.capture.stopped` | Set to `false` to have traffic processing start automatically. When set to `true`, traffic processing is stopped by default, resulting in almost no resource consumption (e.g. Kubeshark is dormant). This property can be dynamically control via the dashboard. | `false` |
|
||||
| `tap.capture.stopAfter` | Set to a duration (e.g. `30s`) to have traffic processing stop after no websocket activity between worker and hub. | `30s` |
|
||||
| `tap.capture.dissection.enabled` | Set to `true` to have L7 protocol dissection start automatically. When set to `false`, dissection is disabled by default. This property can be dynamically controlled via the dashboard. | `true` |
|
||||
| `tap.capture.dissection.stopAfter` | Set to a duration (e.g. `30s`) to have L7 dissection stop after no activity. | `5m` |
|
||||
| `tap.capture.raw.enabled` | Enable raw capture of packets and syscalls to disk for offline analysis | `true` |
|
||||
| `tap.capture.raw.storageSize` | Maximum storage size for raw capture files (supports K8s quantity format: `1Gi`, `500Mi`, etc.) | `1Gi` |
|
||||
| `tap.capture.dbMaxSize` | Maximum size for capture database (e.g., `4Gi`, `2000Mi`). When empty, automatically uses 80% of allocated storage (`tap.storageLimit`). | `""` |
|
||||
| `tap.snapshots.storageClass` | Storage class for snapshots volume. When empty, uses `emptyDir`. When set, creates a PVC with this storage class | `""` |
|
||||
| `tap.snapshots.storageSize` | Storage size for snapshots volume (supports K8s quantity format: `1Gi`, `500Mi`, etc.) | `10Gi` |
|
||||
| `tap.capture.captureSelf` | Include Kubeshark's own traffic in capture | `false` |
|
||||
| `tap.capture.dbMaxSize` | Maximum size for capture database (e.g., `4Gi`, `2000Mi`). | `500Mi` |
|
||||
| `tap.snapshots.local.storageClass` | Storage class for local snapshots volume. When empty, uses `emptyDir`. When set, creates a PVC with this storage class | `""` |
|
||||
| `tap.snapshots.local.storageSize` | Storage size for local snapshots volume (supports K8s quantity format: `1Gi`, `500Mi`, etc.) | `20Gi` |
|
||||
| `tap.snapshots.cloud.provider` | Cloud storage provider for snapshots: `s3`, `azblob`, or `gcs`. Empty string disables cloud storage. See [Cloud Storage docs](docs/snapshots_cloud_storage.md). | `""` |
|
||||
| `tap.snapshots.cloud.prefix` | Key prefix in the bucket/container (e.g. `snapshots/`). See [Cloud Storage docs](docs/snapshots_cloud_storage.md). | `""` |
|
||||
| `tap.snapshots.cloud.configMaps` | Names of pre-existing ConfigMaps with cloud storage env vars. Alternative to inline `s3`/`azblob`/`gcs` values below. See [Cloud Storage docs](docs/snapshots_cloud_storage.md). | `[]` |
|
||||
| `tap.snapshots.cloud.secrets` | Names of pre-existing Secrets with cloud storage credentials. Alternative to inline `s3`/`azblob`/`gcs` values below. See [Cloud Storage docs](docs/snapshots_cloud_storage.md). | `[]` |
|
||||
| `tap.snapshots.cloud.s3.bucket` | S3 bucket name. When set, the chart auto-creates a ConfigMap with `SNAPSHOT_AWS_BUCKET`. | `""` |
|
||||
| `tap.snapshots.cloud.s3.region` | AWS region for the S3 bucket. | `""` |
|
||||
| `tap.snapshots.cloud.s3.accessKey` | AWS access key ID. When set, the chart auto-creates a Secret with `SNAPSHOT_AWS_ACCESS_KEY`. | `""` |
|
||||
| `tap.snapshots.cloud.s3.secretKey` | AWS secret access key. When set, the chart auto-creates a Secret with `SNAPSHOT_AWS_SECRET_KEY`. | `""` |
|
||||
| `tap.snapshots.cloud.s3.roleArn` | IAM role ARN to assume via STS for cross-account S3 access. | `""` |
|
||||
| `tap.snapshots.cloud.s3.externalId` | External ID for the STS AssumeRole call. | `""` |
|
||||
| `tap.snapshots.cloud.azblob.storageAccount` | Azure storage account name. When set, the chart auto-creates a ConfigMap with `SNAPSHOT_AZBLOB_STORAGE_ACCOUNT`. | `""` |
|
||||
| `tap.snapshots.cloud.azblob.container` | Azure blob container name. | `""` |
|
||||
| `tap.snapshots.cloud.azblob.storageKey` | Azure storage account access key. When set, the chart auto-creates a Secret with `SNAPSHOT_AZBLOB_STORAGE_KEY`. | `""` |
|
||||
| `tap.snapshots.cloud.gcs.bucket` | GCS bucket name. When set, the chart auto-creates a ConfigMap with `SNAPSHOT_GCS_BUCKET`. | `""` |
|
||||
| `tap.snapshots.cloud.gcs.project` | GCP project ID. | `""` |
|
||||
| `tap.snapshots.cloud.gcs.credentialsJson` | Service account JSON key. When set, the chart auto-creates a Secret with `SNAPSHOT_GCS_CREDENTIALS_JSON`. | `""` |
|
||||
| `tap.delayedDissection.cpu` | CPU allocation for delayed dissection jobs | `1` |
|
||||
| `tap.delayedDissection.memory` | Memory allocation for delayed dissection jobs | `4Gi` |
|
||||
| `tap.release.repo` | URL of the Helm chart repository | `https://helm.kubeshark.com` |
|
||||
| `tap.release.name` | Helm release name | `kubeshark` |
|
||||
| `tap.release.namespace` | Helm release namespace | `default` |
|
||||
@@ -152,30 +171,30 @@ Example for overriding image names:
|
||||
| `tap.persistentStorageStatic` | Use static persistent volume provisioning (explicitly defined `PersistentVolume` ) | `false` |
|
||||
| `tap.persistentStoragePvcVolumeMode` | Set the pvc volume mode (Filesystem\|Block) | `Filesystem` |
|
||||
| `tap.efsFileSytemIdAndPath` | [EFS file system ID and, optionally, subpath and/or access point](https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/examples/kubernetes/access_points/README.md) `<FileSystemId>:<Path>:<AccessPointId>` | "" |
|
||||
| `tap.storageLimit` | Limit of either the `emptyDir` or `persistentVolumeClaim` | `5Gi` |
|
||||
| `tap.storageLimit` | Limit of either the `emptyDir` or `persistentVolumeClaim` | `10Gi` |
|
||||
| `tap.storageClass` | Storage class of the `PersistentVolumeClaim` | `standard` |
|
||||
| `tap.dryRun` | Preview of all pods matching the regex, without tapping them | `false` |
|
||||
| `tap.dnsConfig.nameservers` | Nameservers to use for DNS resolution | `[]` |
|
||||
| `tap.dnsConfig.searches` | Search domains to use for DNS resolution | `[]` |
|
||||
| `tap.dnsConfig.options` | DNS options to use for DNS resolution | `[]` |
|
||||
| `tap.dns.nameservers` | Nameservers to use for DNS resolution | `[]` |
|
||||
| `tap.dns.searches` | Search domains to use for DNS resolution | `[]` |
|
||||
| `tap.dns.options` | DNS options to use for DNS resolution | `[]` |
|
||||
| `tap.resources.hub.limits.cpu` | CPU limit for hub | `""` (no limit) |
|
||||
| `tap.resources.hub.limits.memory` | Memory limit for hub | `5Gi` |
|
||||
| `tap.resources.hub.requests.cpu` | CPU request for hub | `50m` |
|
||||
| `tap.resources.hub.requests.memory` | Memory request for hub | `50Mi` |
|
||||
| `tap.resources.sniffer.limits.cpu` | CPU limit for sniffer | `""` (no limit) |
|
||||
| `tap.resources.sniffer.limits.memory` | Memory limit for sniffer | `3Gi` |
|
||||
| `tap.resources.sniffer.limits.memory` | Memory limit for sniffer | `5Gi` |
|
||||
| `tap.resources.sniffer.requests.cpu` | CPU request for sniffer | `50m` |
|
||||
| `tap.resources.sniffer.requests.memory` | Memory request for sniffer | `50Mi` |
|
||||
| `tap.resources.tracer.limits.cpu` | CPU limit for tracer | `""` (no limit) |
|
||||
| `tap.resources.tracer.limits.memory` | Memory limit for tracer | `3Gi` |
|
||||
| `tap.resources.tracer.limits.memory` | Memory limit for tracer | `5Gi` |
|
||||
| `tap.resources.tracer.requests.cpu` | CPU request for tracer | `50m` |
|
||||
| `tap.resources.tracer.requests.memory` | Memory request for tracer | `50Mi` |
|
||||
| `tap.probes.hub.initialDelaySeconds` | Initial delay before probing the hub | `15` |
|
||||
| `tap.probes.hub.periodSeconds` | Period between probes for the hub | `10` |
|
||||
| `tap.probes.hub.initialDelaySeconds` | Initial delay before probing the hub | `5` |
|
||||
| `tap.probes.hub.periodSeconds` | Period between probes for the hub | `5` |
|
||||
| `tap.probes.hub.successThreshold` | Number of successful probes before considering the hub healthy | `1` |
|
||||
| `tap.probes.hub.failureThreshold` | Number of failed probes before considering the hub unhealthy | `3` |
|
||||
| `tap.probes.sniffer.initialDelaySeconds` | Initial delay before probing the sniffer | `15` |
|
||||
| `tap.probes.sniffer.periodSeconds` | Period between probes for the sniffer | `10` |
|
||||
| `tap.probes.sniffer.initialDelaySeconds` | Initial delay before probing the sniffer | `5` |
|
||||
| `tap.probes.sniffer.periodSeconds` | Period between probes for the sniffer | `5` |
|
||||
| `tap.probes.sniffer.successThreshold` | Number of successful probes before considering the sniffer healthy | `1` |
|
||||
| `tap.probes.sniffer.failureThreshold` | Number of failed probes before considering the sniffer unhealthy | `3` |
|
||||
| `tap.serviceMesh` | Capture traffic from service meshes like Istio, Linkerd, Consul, etc. | `true` |
|
||||
@@ -198,7 +217,7 @@ Example for overriding image names:
|
||||
| `tap.auth.saml.x509crt` | A self-signed X.509 `.cert` contents <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.x509key` | A self-signed X.509 `.key` contents <br/>(effective, if `tap.auth.type = saml`) | `` |
|
||||
| `tap.auth.saml.roleAttribute` | A SAML attribute name corresponding to user's authorization role <br/>(effective, if `tap.auth.type = saml`) | `role` |
|
||||
| `tap.auth.saml.roles` | A list of SAML authorization roles and their permissions <br/>(effective, if `tap.auth.type = saml`) | `{"admin":{"canDownloadPCAP":true,"canUpdateTargetedPods":true,"canUseScripting":true, "scriptingPermissions":{"canSave":true, "canActivate":true, "canDelete":true}, "canStopTrafficCapturing":true, "filter":"","showAdminConsoleLink":true}}` |
|
||||
| `tap.auth.saml.roles` | A list of SAML authorization roles and their permissions <br/>(effective, if `tap.auth.type = saml`) | `{"admin":{"canDownloadPCAP":true,"canUpdateTargetedPods":true,"canUseScripting":true, "scriptingPermissions":{"canSave":true, "canActivate":true, "canDelete":true}, "canStopTrafficCapturing":true, "canControlDissection":true, "filter":"","showAdminConsoleLink":true}}` |
|
||||
| `tap.ingress.enabled` | Enable `Ingress` | `false` |
|
||||
| `tap.ingress.className` | Ingress class name | `""` |
|
||||
| `tap.ingress.host` | Host of the `Ingress` | `ks.svc.cluster.local` |
|
||||
@@ -210,16 +229,20 @@ Example for overriding image names:
|
||||
| `tap.telemetry.enabled` | Enable anonymous usage statistics collection | `true` |
|
||||
| `tap.resourceGuard.enabled` | Enable resource guard worker process, which watches RAM/disk usage and enables/disables traffic capture based on available resources | `false` |
|
||||
| `tap.secrets` | List of secrets to be used as source for environment variables (e.g. `kubeshark-license`) | `[]` |
|
||||
| `tap.sentry.enabled` | Enable sending of error logs to Sentry | `true` (only for qualified users) |
|
||||
| `tap.sentry.enabled` | Enable sending of error logs to Sentry | `false` |
|
||||
| `tap.sentry.environment` | Sentry environment to label error logs with | `production` |
|
||||
| `tap.defaultFilter` | Sets the default dashboard KFL filter (e.g. `http`). By default, this value is set to filter out noisy protocols such as DNS, UDP, ICMP and TCP. The user can easily change this, **temporarily**, in the Dashboard. For a permanent change, you should change this value in the `values.yaml` or `config.yaml` file. | `""` |
|
||||
| `tap.liveConfigMapChangesDisabled` | If set to `true`, all user functionality (scripting, targeting settings, global & default KFL modification, traffic recording, traffic capturing on/off, protocol dissectors) involving dynamic ConfigMap changes from UI will be disabled | `false` |
|
||||
| `tap.globalFilter` | Prepends to any KFL filter and can be used to limit what is visible in the dashboard. For example, `redact("request.headers.Authorization")` will redact the appropriate field. Another example `!dns` will not show any DNS traffic. | `""` |
|
||||
| `tap.metrics.port` | Pod port used to expose Prometheus metrics | `49100` |
|
||||
| `tap.enabledDissectors` | This is an array of strings representing the list of supported protocols. Remove or comment out redundant protocols (e.g., dns).| The default list excludes: `udp` and `tcp` |
|
||||
| `tap.mountBpf` | BPF filesystem needs to be mounted for eBPF to work properly. This helm value determines whether Kubeshark will attempt to mount the filesystem. This option is not required if filesystem is already mounts. │ `true`|
|
||||
| `tap.hostNetwork` | Enable host network mode for worker DaemonSet pods. When enabled, worker pods use the host's network namespace for direct network access. | `true` |
|
||||
| `tap.packetCapture` | Packet capture backend: `best`, `af_packet`, or `pf_ring` | `best` |
|
||||
| `tap.misc.trafficSampleRate` | Percentage of traffic to process (0-100) | `100` |
|
||||
| `tap.misc.tcpStreamChannelTimeoutMs` | Timeout in milliseconds for TCP stream channel | `10000` |
|
||||
| `tap.gitops.enabled` | Enable GitOps functionality. This will allow you to use GitOps to manage your Kubeshark configuration. | `false` |
|
||||
| `tap.misc.tcpFlowTimeout` | TCP flow aggregation timeout in seconds. Controls how long the worker waits before finalizing a TCP flow. | `1200` |
|
||||
| `tap.misc.udpFlowTimeout` | UDP flow aggregation timeout in seconds. Controls how long the worker waits before finalizing a UDP flow. | `1200` |
|
||||
| `logs.file` | Logs dump path | `""` |
|
||||
| `pcapdump.enabled` | Enable recording of all traffic captured according to other parameters. Whatever Kubeshark captures, considering pod targeting rules, will be stored in pcap files ready to be viewed by tools | `false` |
|
||||
| `pcapdump.maxTime` | The time window into the past that will be stored. Older traffic will be discarded. | `2h` |
|
||||
@@ -229,6 +252,7 @@ Example for overriding image names:
|
||||
| `dumpLogs` | Enable dumping of logs | `false` |
|
||||
| `headless` | Enable running in headless mode | `false` |
|
||||
| `license` | License key for the Pro/Enterprise edition | `""` |
|
||||
| `scripting.enabled` | Enables scripting | `false` |
|
||||
| `scripting.env` | Environment variables for the scripting | `{}` |
|
||||
| `scripting.source` | Source directory of the scripts | `""` |
|
||||
| `scripting.watchScripts` | Enable watch mode for the scripts in source directory | `true` |
|
||||
@@ -236,10 +260,6 @@ Example for overriding image names:
|
||||
| `supportChatEnabled` | Enable real-time support chat channel based on Intercom | `false` |
|
||||
| `internetConnectivity` | Turns off API requests that are dependent on Internet connectivity such as `telemetry` and `online-support`. | `true` |
|
||||
|
||||
KernelMapping pairs kernel versions with a
|
||||
DriverContainer image. Kernel versions can be matched
|
||||
literally or using a regular expression
|
||||
|
||||
# Installing with SAML enabled
|
||||
|
||||
### Prerequisites:
|
||||
|
||||
583
helm-chart/docs/snapshots_cloud_storage.md
Normal file
583
helm-chart/docs/snapshots_cloud_storage.md
Normal file
@@ -0,0 +1,583 @@
|
||||
# Cloud Storage for Snapshots
|
||||
|
||||
Kubeshark can upload and download snapshots to cloud object storage, enabling cross-cluster sharing, backup/restore, and long-term retention.
|
||||
|
||||
Supported providers: **Amazon S3** (`s3`), **Azure Blob Storage** (`azblob`), and **Google Cloud Storage** (`gcs`).
|
||||
|
||||
## Helm Values
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "" # "s3", "azblob", or "gcs" (empty = disabled)
|
||||
prefix: "" # key prefix in the bucket/container (e.g. "snapshots/")
|
||||
configMaps: [] # names of pre-existing ConfigMaps with cloud config env vars
|
||||
secrets: [] # names of pre-existing Secrets with cloud credentials
|
||||
s3:
|
||||
bucket: ""
|
||||
region: ""
|
||||
accessKey: ""
|
||||
secretKey: ""
|
||||
roleArn: ""
|
||||
externalId: ""
|
||||
azblob:
|
||||
storageAccount: ""
|
||||
container: ""
|
||||
storageKey: ""
|
||||
gcs:
|
||||
bucket: ""
|
||||
project: ""
|
||||
credentialsJson: ""
|
||||
```
|
||||
|
||||
- `provider` selects which cloud backend to use. Leave empty to disable cloud storage.
|
||||
- `configMaps` and `secrets` are lists of names of existing ConfigMap/Secret resources. They are mounted as `envFrom` on the hub pod, injecting all their keys as environment variables.
|
||||
|
||||
### Inline Values (Alternative to External ConfigMaps/Secrets)
|
||||
|
||||
Instead of creating ConfigMap and Secret resources manually, you can set cloud storage configuration directly in `values.yaml` or via `--set` flags. The Helm chart will automatically create the necessary ConfigMap and Secret resources.
|
||||
|
||||
Both approaches can be used together — inline values are additive to external `configMaps`/`secrets` references.
|
||||
|
||||
---
|
||||
|
||||
## Amazon S3
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Required | Description |
|
||||
|----------|----------|-------------|
|
||||
| `SNAPSHOT_AWS_BUCKET` | Yes | S3 bucket name |
|
||||
| `SNAPSHOT_AWS_REGION` | No | AWS region (uses SDK default if empty) |
|
||||
| `SNAPSHOT_AWS_ACCESS_KEY` | No | Static access key ID (empty = use default credential chain) |
|
||||
| `SNAPSHOT_AWS_SECRET_KEY` | No | Static secret access key |
|
||||
| `SNAPSHOT_AWS_ROLE_ARN` | No | IAM role ARN to assume via STS (for cross-account access) |
|
||||
| `SNAPSHOT_AWS_EXTERNAL_ID` | No | External ID for the STS AssumeRole call |
|
||||
| `SNAPSHOT_CLOUD_PREFIX` | No | Key prefix in the bucket (e.g. `snapshots/`) |
|
||||
|
||||
### Authentication Methods
|
||||
|
||||
Credentials are resolved in this order:
|
||||
|
||||
1. **Static credentials** -- If `SNAPSHOT_AWS_ACCESS_KEY` is set, static credentials are used directly.
|
||||
2. **STS AssumeRole** -- If `SNAPSHOT_AWS_ROLE_ARN` is also set, the static (or default) credentials are used to assume the given IAM role. This is useful for cross-account S3 access.
|
||||
3. **AWS default credential chain** -- When no static credentials are provided, the SDK default chain is used:
|
||||
- **IRSA** (EKS service account token) -- recommended for production on EKS
|
||||
- EC2 instance profile
|
||||
- Standard AWS environment variables (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, etc.)
|
||||
- Shared credentials file (`~/.aws/credentials`)
|
||||
|
||||
The provider validates bucket access on startup via `HeadBucket`. If the bucket is inaccessible, the hub will fail to start.
|
||||
|
||||
### Example: Inline Values (simplest approach)
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "s3"
|
||||
s3:
|
||||
bucket: my-kubeshark-snapshots
|
||||
region: us-east-1
|
||||
```
|
||||
|
||||
Or with static credentials via `--set`:
|
||||
|
||||
```bash
|
||||
helm install kubeshark kubeshark/kubeshark \
|
||||
--set tap.snapshots.cloud.provider=s3 \
|
||||
--set tap.snapshots.cloud.s3.bucket=my-kubeshark-snapshots \
|
||||
--set tap.snapshots.cloud.s3.region=us-east-1 \
|
||||
--set tap.snapshots.cloud.s3.accessKey=AKIA... \
|
||||
--set tap.snapshots.cloud.s3.secretKey=wJal...
|
||||
```
|
||||
|
||||
### Example: IRSA (recommended for EKS)
|
||||
|
||||
[IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) lets EKS pods assume an IAM role without static credentials. EKS injects a short-lived token into the pod automatically.
|
||||
|
||||
**Prerequisites:**
|
||||
|
||||
1. Your EKS cluster must have an [OIDC provider](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html) associated with it.
|
||||
2. An IAM role with a trust policy that allows the Kubeshark service account to assume it.
|
||||
|
||||
**Step 1 — Create an IAM policy scoped to your bucket:**
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:GetObject",
|
||||
"s3:PutObject",
|
||||
"s3:DeleteObject",
|
||||
"s3:GetObjectVersion",
|
||||
"s3:DeleteObjectVersion",
|
||||
"s3:ListBucket",
|
||||
"s3:ListBucketVersions",
|
||||
"s3:GetBucketLocation",
|
||||
"s3:GetBucketVersioning"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::my-kubeshark-snapshots",
|
||||
"arn:aws:s3:::my-kubeshark-snapshots/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
> For read-only access, remove `s3:PutObject`, `s3:DeleteObject`, and `s3:DeleteObjectVersion`.
|
||||
|
||||
**Step 2 — Create an IAM role with IRSA trust policy:**
|
||||
|
||||
```bash
|
||||
# Get your cluster's OIDC provider URL
|
||||
OIDC_PROVIDER=$(aws eks describe-cluster --name CLUSTER_NAME \
|
||||
--query "cluster.identity.oidc.issuer" --output text | sed 's|https://||')
|
||||
|
||||
# Create a trust policy
|
||||
# The default K8s SA name is "<release-name>-service-account" (e.g. "kubeshark-service-account")
|
||||
cat > trust-policy.json <<EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"Federated": "arn:aws:iam::ACCOUNT_ID:oidc-provider/${OIDC_PROVIDER}"
|
||||
},
|
||||
"Action": "sts:AssumeRoleWithWebIdentity",
|
||||
"Condition": {
|
||||
"StringEquals": {
|
||||
"${OIDC_PROVIDER}:sub": "system:serviceaccount:NAMESPACE:kubeshark-service-account",
|
||||
"${OIDC_PROVIDER}:aud": "sts.amazonaws.com"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
EOF
|
||||
|
||||
# Create the role and attach your policy
|
||||
aws iam create-role \
|
||||
--role-name KubesharkS3Role \
|
||||
--assume-role-policy-document file://trust-policy.json
|
||||
|
||||
aws iam put-role-policy \
|
||||
--role-name KubesharkS3Role \
|
||||
--policy-name KubesharkSnapshotsBucketAccess \
|
||||
--policy-document file://bucket-policy.json
|
||||
```
|
||||
|
||||
**Step 3 — Create a ConfigMap with bucket configuration:**
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-s3-config
|
||||
data:
|
||||
SNAPSHOT_AWS_BUCKET: my-kubeshark-snapshots
|
||||
SNAPSHOT_AWS_REGION: us-east-1
|
||||
```
|
||||
|
||||
**Step 4 — Set Helm values with `tap.annotations` to annotate the service account:**
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
annotations:
|
||||
eks.amazonaws.com/role-arn: arn:aws:iam::ACCOUNT_ID:role/KubesharkS3Role
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "s3"
|
||||
configMaps:
|
||||
- kubeshark-s3-config
|
||||
```
|
||||
|
||||
Or via `--set`:
|
||||
|
||||
```bash
|
||||
helm install kubeshark kubeshark/kubeshark \
|
||||
--set tap.snapshots.cloud.provider=s3 \
|
||||
--set tap.snapshots.cloud.s3.bucket=my-kubeshark-snapshots \
|
||||
--set tap.snapshots.cloud.s3.region=us-east-1 \
|
||||
--set tap.annotations."eks\.amazonaws\.com/role-arn"=arn:aws:iam::ACCOUNT_ID:role/KubesharkS3Role
|
||||
```
|
||||
|
||||
No `accessKey`/`secretKey` is needed — EKS injects credentials automatically via the IRSA token.
|
||||
|
||||
### Example: Static Credentials
|
||||
|
||||
Create a Secret with credentials:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: kubeshark-s3-creds
|
||||
type: Opaque
|
||||
stringData:
|
||||
SNAPSHOT_AWS_ACCESS_KEY: AKIA...
|
||||
SNAPSHOT_AWS_SECRET_KEY: wJal...
|
||||
```
|
||||
|
||||
Create a ConfigMap with bucket configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-s3-config
|
||||
data:
|
||||
SNAPSHOT_AWS_BUCKET: my-kubeshark-snapshots
|
||||
SNAPSHOT_AWS_REGION: us-east-1
|
||||
```
|
||||
|
||||
Set Helm values:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "s3"
|
||||
configMaps:
|
||||
- kubeshark-s3-config
|
||||
secrets:
|
||||
- kubeshark-s3-creds
|
||||
```
|
||||
|
||||
### Example: Cross-Account Access via AssumeRole
|
||||
|
||||
Add the role ARN to your ConfigMap:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-s3-config
|
||||
data:
|
||||
SNAPSHOT_AWS_BUCKET: other-account-bucket
|
||||
SNAPSHOT_AWS_REGION: eu-west-1
|
||||
SNAPSHOT_AWS_ROLE_ARN: arn:aws:iam::123456789012:role/KubesharkCrossAccountRole
|
||||
SNAPSHOT_AWS_EXTERNAL_ID: my-external-id # optional, if required by the trust policy
|
||||
```
|
||||
|
||||
The hub will first authenticate using its own credentials (IRSA, static, or default chain), then assume the specified role to access the bucket.
|
||||
|
||||
---
|
||||
|
||||
## Azure Blob Storage
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Required | Description |
|
||||
|----------|----------|-------------|
|
||||
| `SNAPSHOT_AZBLOB_STORAGE_ACCOUNT` | Yes | Azure storage account name |
|
||||
| `SNAPSHOT_AZBLOB_CONTAINER` | Yes | Blob container name |
|
||||
| `SNAPSHOT_AZBLOB_STORAGE_KEY` | No | Storage account access key (empty = use DefaultAzureCredential) |
|
||||
| `SNAPSHOT_CLOUD_PREFIX` | No | Key prefix in the container (e.g. `snapshots/`) |
|
||||
|
||||
### Authentication Methods
|
||||
|
||||
Credentials are resolved in this order:
|
||||
|
||||
1. **Shared Key** -- If `SNAPSHOT_AZBLOB_STORAGE_KEY` is set, the storage account key is used directly.
|
||||
2. **DefaultAzureCredential** -- When no storage key is provided, the Azure SDK default credential chain is used:
|
||||
- **Workload Identity** (AKS pod identity) -- recommended for production on AKS
|
||||
- Managed Identity (system or user-assigned)
|
||||
- Azure CLI credentials
|
||||
- Environment variables (`AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, `AZURE_CLIENT_SECRET`)
|
||||
|
||||
The provider validates container access on startup via `GetProperties`. If the container is inaccessible, the hub will fail to start.
|
||||
|
||||
### Example: Inline Values
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "azblob"
|
||||
azblob:
|
||||
storageAccount: mykubesharksa
|
||||
container: snapshots
|
||||
storageKey: "base64-encoded-storage-key..." # optional, omit for DefaultAzureCredential
|
||||
```
|
||||
|
||||
### Example: Workload Identity (recommended for AKS)
|
||||
|
||||
Create a ConfigMap with storage configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-azblob-config
|
||||
data:
|
||||
SNAPSHOT_AZBLOB_STORAGE_ACCOUNT: mykubesharksa
|
||||
SNAPSHOT_AZBLOB_CONTAINER: snapshots
|
||||
```
|
||||
|
||||
Set Helm values:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "azblob"
|
||||
configMaps:
|
||||
- kubeshark-azblob-config
|
||||
```
|
||||
|
||||
The hub pod's service account must be configured for AKS Workload Identity with a managed identity that has the **Storage Blob Data Contributor** role on the container.
|
||||
|
||||
### Example: Storage Account Key
|
||||
|
||||
Create a Secret with the storage key:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: kubeshark-azblob-creds
|
||||
type: Opaque
|
||||
stringData:
|
||||
SNAPSHOT_AZBLOB_STORAGE_KEY: "base64-encoded-storage-key..."
|
||||
```
|
||||
|
||||
Create a ConfigMap with storage configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-azblob-config
|
||||
data:
|
||||
SNAPSHOT_AZBLOB_STORAGE_ACCOUNT: mykubesharksa
|
||||
SNAPSHOT_AZBLOB_CONTAINER: snapshots
|
||||
```
|
||||
|
||||
Set Helm values:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "azblob"
|
||||
configMaps:
|
||||
- kubeshark-azblob-config
|
||||
secrets:
|
||||
- kubeshark-azblob-creds
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Google Cloud Storage
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Required | Description |
|
||||
|----------|----------|-------------|
|
||||
| `SNAPSHOT_GCS_BUCKET` | Yes | GCS bucket name |
|
||||
| `SNAPSHOT_GCS_PROJECT` | No | GCP project ID |
|
||||
| `SNAPSHOT_GCS_CREDENTIALS_JSON` | No | Service account JSON key (empty = use Application Default Credentials) |
|
||||
| `SNAPSHOT_CLOUD_PREFIX` | No | Key prefix in the bucket (e.g. `snapshots/`) |
|
||||
|
||||
### Authentication Methods
|
||||
|
||||
Credentials are resolved in this order:
|
||||
|
||||
1. **Service Account JSON Key** -- If `SNAPSHOT_GCS_CREDENTIALS_JSON` is set, the provided JSON key is used directly.
|
||||
2. **Application Default Credentials** -- When no JSON key is provided, the GCP SDK default credential chain is used:
|
||||
- **Workload Identity** (GKE pod identity) -- recommended for production on GKE
|
||||
- GCE instance metadata (Compute Engine default service account)
|
||||
- Standard GCP environment variables (`GOOGLE_APPLICATION_CREDENTIALS`)
|
||||
- `gcloud` CLI credentials
|
||||
|
||||
The provider validates bucket access on startup via `Bucket.Attrs()`. If the bucket is inaccessible, the hub will fail to start.
|
||||
|
||||
### Required IAM Permissions
|
||||
|
||||
The service account needs different IAM roles depending on the access level:
|
||||
|
||||
**Read-only** (download, list, and sync snapshots from cloud):
|
||||
|
||||
| Role | Permissions provided | Purpose |
|
||||
|------|---------------------|---------|
|
||||
| `roles/storage.legacyBucketReader` | `storage.buckets.get`, `storage.objects.list` | Hub startup (bucket validation) + listing snapshots |
|
||||
| `roles/storage.objectViewer` | `storage.objects.get`, `storage.objects.list` | Downloading snapshots, checking existence, reading metadata |
|
||||
|
||||
```bash
|
||||
gcloud storage buckets add-iam-policy-binding gs://BUCKET_NAME \
|
||||
--member="serviceAccount:SA_EMAIL" \
|
||||
--role="roles/storage.legacyBucketReader"
|
||||
gcloud storage buckets add-iam-policy-binding gs://BUCKET_NAME \
|
||||
--member="serviceAccount:SA_EMAIL" \
|
||||
--role="roles/storage.objectViewer"
|
||||
```
|
||||
|
||||
**Read-write** (upload and delete snapshots in addition to read):
|
||||
|
||||
Add `roles/storage.objectAdmin` instead of `roles/storage.objectViewer` to also grant `storage.objects.create` and `storage.objects.delete`:
|
||||
|
||||
| Role | Permissions provided | Purpose |
|
||||
|------|---------------------|---------|
|
||||
| `roles/storage.legacyBucketReader` | `storage.buckets.get`, `storage.objects.list` | Hub startup (bucket validation) + listing snapshots |
|
||||
| `roles/storage.objectAdmin` | `storage.objects.*` | Full object CRUD (upload, download, delete, list, metadata) |
|
||||
|
||||
```bash
|
||||
gcloud storage buckets add-iam-policy-binding gs://BUCKET_NAME \
|
||||
--member="serviceAccount:SA_EMAIL" \
|
||||
--role="roles/storage.legacyBucketReader"
|
||||
gcloud storage buckets add-iam-policy-binding gs://BUCKET_NAME \
|
||||
--member="serviceAccount:SA_EMAIL" \
|
||||
--role="roles/storage.objectAdmin"
|
||||
```
|
||||
|
||||
### Example: Inline Values (simplest approach)
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "gcs"
|
||||
gcs:
|
||||
bucket: my-kubeshark-snapshots
|
||||
project: my-gcp-project
|
||||
```
|
||||
|
||||
Or with a service account key via `--set`:
|
||||
|
||||
```bash
|
||||
helm install kubeshark kubeshark/kubeshark \
|
||||
--set tap.snapshots.cloud.provider=gcs \
|
||||
--set tap.snapshots.cloud.gcs.bucket=my-kubeshark-snapshots \
|
||||
--set tap.snapshots.cloud.gcs.project=my-gcp-project \
|
||||
--set-file tap.snapshots.cloud.gcs.credentialsJson=service-account.json
|
||||
```
|
||||
|
||||
### Example: Workload Identity (recommended for GKE)
|
||||
|
||||
Create a ConfigMap with bucket configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-gcs-config
|
||||
data:
|
||||
SNAPSHOT_GCS_BUCKET: my-kubeshark-snapshots
|
||||
SNAPSHOT_GCS_PROJECT: my-gcp-project
|
||||
```
|
||||
|
||||
Set Helm values:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "gcs"
|
||||
configMaps:
|
||||
- kubeshark-gcs-config
|
||||
```
|
||||
|
||||
Configure GKE Workload Identity to allow the Kubernetes service account to impersonate the GCP service account:
|
||||
|
||||
```bash
|
||||
# Ensure the GKE cluster has Workload Identity enabled
|
||||
# (--workload-pool=PROJECT_ID.svc.id.goog at cluster creation)
|
||||
|
||||
# Create a GCP service account (if not already created)
|
||||
gcloud iam service-accounts create kubeshark-gcs \
|
||||
--display-name="Kubeshark GCS Snapshots"
|
||||
|
||||
# Grant bucket access (read-write — see Required IAM Permissions above)
|
||||
gcloud storage buckets add-iam-policy-binding gs://BUCKET_NAME \
|
||||
--member="serviceAccount:kubeshark-gcs@PROJECT_ID.iam.gserviceaccount.com" \
|
||||
--role="roles/storage.legacyBucketReader"
|
||||
gcloud storage buckets add-iam-policy-binding gs://BUCKET_NAME \
|
||||
--member="serviceAccount:kubeshark-gcs@PROJECT_ID.iam.gserviceaccount.com" \
|
||||
--role="roles/storage.objectAdmin"
|
||||
|
||||
# Allow the K8s service account to impersonate the GCP service account
|
||||
# Note: the K8s SA name is "<release-name>-service-account" (default: "kubeshark-service-account")
|
||||
gcloud iam service-accounts add-iam-policy-binding \
|
||||
kubeshark-gcs@PROJECT_ID.iam.gserviceaccount.com \
|
||||
--role="roles/iam.workloadIdentityUser" \
|
||||
--member="serviceAccount:PROJECT_ID.svc.id.goog[NAMESPACE/kubeshark-service-account]"
|
||||
```
|
||||
|
||||
Set Helm values — the `tap.annotations` field adds the Workload Identity annotation to the service account:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
annotations:
|
||||
iam.gke.io/gcp-service-account: kubeshark-gcs@PROJECT_ID.iam.gserviceaccount.com
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "gcs"
|
||||
configMaps:
|
||||
- kubeshark-gcs-config
|
||||
```
|
||||
|
||||
Or via `--set`:
|
||||
|
||||
```bash
|
||||
helm install kubeshark kubeshark/kubeshark \
|
||||
--set tap.snapshots.cloud.provider=gcs \
|
||||
--set tap.snapshots.cloud.gcs.bucket=BUCKET_NAME \
|
||||
--set tap.snapshots.cloud.gcs.project=PROJECT_ID \
|
||||
--set tap.annotations."iam\.gke\.io/gcp-service-account"=kubeshark-gcs@PROJECT_ID.iam.gserviceaccount.com
|
||||
```
|
||||
|
||||
No `credentialsJson` secret is needed — GKE injects credentials automatically via the Workload Identity metadata server.
|
||||
|
||||
### Example: Service Account Key
|
||||
|
||||
Create a Secret with the service account JSON key:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: kubeshark-gcs-creds
|
||||
type: Opaque
|
||||
stringData:
|
||||
SNAPSHOT_GCS_CREDENTIALS_JSON: |
|
||||
{
|
||||
"type": "service_account",
|
||||
"project_id": "my-gcp-project",
|
||||
"private_key_id": "...",
|
||||
"private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n",
|
||||
"client_email": "kubeshark@my-gcp-project.iam.gserviceaccount.com",
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Create a ConfigMap with bucket configuration:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kubeshark-gcs-config
|
||||
data:
|
||||
SNAPSHOT_GCS_BUCKET: my-kubeshark-snapshots
|
||||
SNAPSHOT_GCS_PROJECT: my-gcp-project
|
||||
```
|
||||
|
||||
Set Helm values:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: "gcs"
|
||||
configMaps:
|
||||
- kubeshark-gcs-config
|
||||
secrets:
|
||||
- kubeshark-gcs-creds
|
||||
```
|
||||
@@ -37,13 +37,17 @@ spec:
|
||||
- -loglevel
|
||||
- '{{ .Values.logLevel | default "warning" }}'
|
||||
- -capture-stop-after
|
||||
- "{{ if hasKey .Values.tap.capture "stopAfter" }}{{ .Values.tap.capture.stopAfter }}{{ else }}5m{{ end }}"
|
||||
- "{{ if hasKey .Values.tap.capture.dissection "stopAfter" }}{{ .Values.tap.capture.dissection.stopAfter }}{{ else }}5m{{ end }}"
|
||||
- -snapshot-size-limit
|
||||
- '{{ .Values.tap.snapshots.storageSize }}'
|
||||
{{- if .Values.tap.delayedDissection.image }}
|
||||
- '{{ .Values.tap.snapshots.local.storageSize }}'
|
||||
- -dissector-image
|
||||
- '{{ .Values.tap.delayedDissection.image }}'
|
||||
{{- end }}
|
||||
{{- if .Values.tap.docker.overrideImage.worker }}
|
||||
- '{{ .Values.tap.docker.overrideImage.worker }}'
|
||||
{{- else if .Values.tap.docker.overrideTag.worker }}
|
||||
- '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}'
|
||||
{{- else }}
|
||||
- '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'
|
||||
{{- end }}
|
||||
{{- if .Values.tap.delayedDissection.cpu }}
|
||||
- -dissector-cpu
|
||||
- '{{ .Values.tap.delayedDissection.cpu }}'
|
||||
@@ -57,12 +61,34 @@ spec:
|
||||
{{- end }}
|
||||
- -cloud-api-url
|
||||
- '{{ .Values.cloudApiUrl }}'
|
||||
{{- if .Values.tap.secrets }}
|
||||
{{- if .Values.tap.snapshots.cloud.provider }}
|
||||
- -cloud-storage-provider
|
||||
- '{{ .Values.tap.snapshots.cloud.provider }}'
|
||||
{{- end }}
|
||||
{{- $hasInlineConfig := or .Values.tap.snapshots.cloud.prefix .Values.tap.snapshots.cloud.s3.bucket .Values.tap.snapshots.cloud.s3.region .Values.tap.snapshots.cloud.s3.roleArn .Values.tap.snapshots.cloud.s3.externalId .Values.tap.snapshots.cloud.azblob.storageAccount .Values.tap.snapshots.cloud.azblob.container .Values.tap.snapshots.cloud.gcs.bucket .Values.tap.snapshots.cloud.gcs.project }}
|
||||
{{- $hasInlineSecrets := or .Values.tap.snapshots.cloud.s3.accessKey .Values.tap.snapshots.cloud.s3.secretKey .Values.tap.snapshots.cloud.azblob.storageKey .Values.tap.snapshots.cloud.gcs.credentialsJson }}
|
||||
{{- if or .Values.tap.secrets .Values.tap.snapshots.cloud.configMaps .Values.tap.snapshots.cloud.secrets $hasInlineConfig $hasInlineSecrets }}
|
||||
envFrom:
|
||||
{{- range .Values.tap.secrets }}
|
||||
- secretRef:
|
||||
name: {{ . }}
|
||||
{{- end }}
|
||||
{{- range .Values.tap.snapshots.cloud.configMaps }}
|
||||
- configMapRef:
|
||||
name: {{ . }}
|
||||
{{- end }}
|
||||
{{- range .Values.tap.snapshots.cloud.secrets }}
|
||||
- secretRef:
|
||||
name: {{ . }}
|
||||
{{- end }}
|
||||
{{- if $hasInlineConfig }}
|
||||
- configMapRef:
|
||||
name: {{ include "kubeshark.name" . }}-cloud-config
|
||||
{{- end }}
|
||||
{{- if $hasInlineSecrets }}
|
||||
- secretRef:
|
||||
name: {{ include "kubeshark.name" . }}-cloud-secret
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
env:
|
||||
- name: POD_NAME
|
||||
@@ -184,10 +210,10 @@ spec:
|
||||
- key: AUTH_SAML_X509_KEY
|
||||
path: kubeshark.key
|
||||
- name: snapshots-volume
|
||||
{{- if .Values.tap.snapshots.storageClass }}
|
||||
{{- if .Values.tap.snapshots.local.storageClass }}
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ include "kubeshark.name" . }}-snapshots-pvc
|
||||
{{- else }}
|
||||
emptyDir:
|
||||
sizeLimit: {{ .Values.tap.snapshots.storageSize }}
|
||||
sizeLimit: {{ .Values.tap.snapshots.local.storageSize }}
|
||||
{{- end }}
|
||||
|
||||
@@ -26,15 +26,15 @@ spec:
|
||||
- env:
|
||||
- name: REACT_APP_AUTH_ENABLED
|
||||
value: '{{- if or (and .Values.cloudLicenseEnabled (not (empty .Values.license))) (not .Values.internetConnectivity) -}}
|
||||
{{ (and .Values.tap.auth.enabled (eq .Values.tap.auth.type "dex")) | ternary true false }}
|
||||
{{ (default false .Values.demoModeEnabled) | ternary true ((and .Values.tap.auth.enabled (eq .Values.tap.auth.type "dex")) | ternary true false) }}
|
||||
{{- else -}}
|
||||
{{ .Values.cloudLicenseEnabled | ternary "true" .Values.tap.auth.enabled }}
|
||||
{{ .Values.cloudLicenseEnabled | ternary "true" ((default false .Values.demoModeEnabled) | ternary "true" .Values.tap.auth.enabled) }}
|
||||
{{- end }}'
|
||||
- name: REACT_APP_AUTH_TYPE
|
||||
value: '{{- if and .Values.cloudLicenseEnabled (not (eq .Values.tap.auth.type "dex")) -}}
|
||||
default
|
||||
{{- else -}}
|
||||
{{ .Values.tap.auth.type }}
|
||||
{{ (default false .Values.demoModeEnabled) | ternary "default" .Values.tap.auth.type }}
|
||||
{{- end }}'
|
||||
- name: REACT_APP_COMPLETE_STREAMING_ENABLED
|
||||
value: '{{- if and (hasKey .Values.tap "dashboard") (hasKey .Values.tap.dashboard "completeStreamingEnabled") -}}
|
||||
@@ -48,29 +48,29 @@ spec:
|
||||
value: '{{ not (eq .Values.tap.auth.saml.idpMetadataUrl "") | ternary .Values.tap.auth.saml.idpMetadataUrl " " }}'
|
||||
- name: REACT_APP_TIMEZONE
|
||||
value: '{{ not (eq .Values.timezone "") | ternary .Values.timezone " " }}'
|
||||
- name: REACT_APP_SCRIPTING_DISABLED
|
||||
value: '{{- if .Values.tap.liveConfigMapChangesDisabled -}}
|
||||
{{- if .Values.demoModeEnabled -}}
|
||||
{{ .Values.demoModeEnabled | ternary false true }}
|
||||
{{- else -}}
|
||||
true
|
||||
{{- end }}
|
||||
- name: REACT_APP_SCRIPTING_HIDDEN
|
||||
value: '{{- if and .Values.scripting (eq (.Values.scripting.enabled | toString) "false") -}}
|
||||
true
|
||||
{{- else -}}
|
||||
false
|
||||
{{- end }}'
|
||||
- name: REACT_APP_SCRIPTING_DISABLED
|
||||
value: '{{ default false .Values.demoModeEnabled }}'
|
||||
- name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled }}'
|
||||
value: '{{ default false .Values.demoModeEnabled }}'
|
||||
- name: REACT_APP_PRESET_FILTERS_CHANGING_ENABLED
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
value: '{{ not (default false .Values.demoModeEnabled) }}'
|
||||
- name: REACT_APP_BPF_OVERRIDE_DISABLED
|
||||
value: '{{ eq .Values.tap.packetCapture "af_packet" | ternary "false" "true" }}'
|
||||
- name: REACT_APP_RECORDING_DISABLED
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled }}'
|
||||
- name: REACT_APP_STOP_TRAFFIC_CAPTURING_DISABLED
|
||||
value: '{{- if and .Values.tap.liveConfigMapChangesDisabled .Values.tap.capture.stopped -}}
|
||||
false
|
||||
value: '{{ default false .Values.demoModeEnabled }}'
|
||||
- name: REACT_APP_DISSECTION_ENABLED
|
||||
value: '{{ .Values.tap.capture.dissection.enabled | ternary "true" "false" }}'
|
||||
- name: REACT_APP_DISSECTION_CONTROL_ENABLED
|
||||
value: '{{- if and (not .Values.demoModeEnabled) (not .Values.tap.capture.dissection.enabled) -}}
|
||||
true
|
||||
{{- else -}}
|
||||
{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "false" }}
|
||||
{{ not (default false .Values.demoModeEnabled) | ternary false true }}
|
||||
{{- end -}}'
|
||||
- name: 'REACT_APP_CLOUD_LICENSE_ENABLED'
|
||||
value: '{{- if or (and .Values.cloudLicenseEnabled (not (empty .Values.license))) (not .Values.internetConnectivity) -}}
|
||||
@@ -83,7 +83,13 @@ spec:
|
||||
- name: REACT_APP_BETA_ENABLED
|
||||
value: '{{ default false .Values.betaEnabled | ternary "true" "false" }}'
|
||||
- name: REACT_APP_DISSECTORS_UPDATING_ENABLED
|
||||
value: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
value: '{{ not (default false .Values.demoModeEnabled) }}'
|
||||
- name: REACT_APP_SNAPSHOTS_UPDATING_ENABLED
|
||||
value: '{{ not (default false .Values.demoModeEnabled) }}'
|
||||
- name: REACT_APP_DEMO_MODE_ENABLED
|
||||
value: '{{ default false .Values.demoModeEnabled }}'
|
||||
- name: REACT_APP_CLUSTER_WIDE_MAP_ENABLED
|
||||
value: '{{ default false (((.Values).tap).dashboard).clusterWideMapEnabled }}'
|
||||
- name: REACT_APP_RAW_CAPTURE_ENABLED
|
||||
value: '{{ .Values.tap.capture.raw.enabled | ternary "true" "false" }}'
|
||||
- name: REACT_APP_SENTRY_ENABLED
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
---
|
||||
{{- if .Values.tap.snapshots.storageClass }}
|
||||
{{- if .Values.tap.snapshots.local.storageClass }}
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.tap.snapshots.storageSize }}
|
||||
storageClassName: {{ .Values.tap.snapshots.storageClass }}
|
||||
storage: {{ .Values.tap.snapshots.local.storageSize }}
|
||||
storageClassName: {{ .Values.tap.snapshots.local.storageClass }}
|
||||
status: {}
|
||||
{{- end }}
|
||||
|
||||
@@ -99,6 +99,10 @@ spec:
|
||||
- '{{ .Values.tap.misc.resolutionStrategy }}'
|
||||
- -staletimeout
|
||||
- '{{ .Values.tap.misc.staleTimeoutSeconds }}'
|
||||
- -tcp-flow-full-timeout
|
||||
- '{{ .Values.tap.misc.tcpFlowTimeout }}'
|
||||
- -udp-flow-full-timeout
|
||||
- '{{ .Values.tap.misc.udpFlowTimeout }}'
|
||||
- -storage-size
|
||||
- '{{ .Values.tap.storageLimit }}'
|
||||
- -capture-db-max-size
|
||||
|
||||
@@ -30,8 +30,10 @@ data:
|
||||
proxy_set_header Authorization $http_authorization;
|
||||
proxy_pass_header Authorization;
|
||||
proxy_connect_timeout 4s;
|
||||
proxy_read_timeout 120s;
|
||||
proxy_send_timeout 12s;
|
||||
# Disable buffering for gRPC/Connect streaming
|
||||
client_max_body_size 0;
|
||||
proxy_request_buffering off;
|
||||
proxy_buffering off;
|
||||
proxy_pass_request_headers on;
|
||||
}
|
||||
|
||||
@@ -86,4 +88,3 @@ data:
|
||||
root /usr/share/nginx/html;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ data:
|
||||
NAMESPACES: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}'
|
||||
EXCLUDED_NAMESPACES: '{{ gt (len .Values.tap.excludedNamespaces) 0 | ternary (join "," .Values.tap.excludedNamespaces) "" }}'
|
||||
BPF_OVERRIDE: '{{ .Values.tap.bpfOverride }}'
|
||||
STOPPED: '{{ .Values.tap.capture.stopped | ternary "true" "false" }}'
|
||||
DISSECTION_ENABLED: '{{ .Values.tap.capture.dissection.enabled | ternary "true" "false" }}'
|
||||
CAPTURE_SELF: '{{ .Values.tap.capture.captureSelf | ternary "true" "false" }}'
|
||||
SCRIPTING_SCRIPTS: '{}'
|
||||
SCRIPTING_ACTIVE_SCRIPTS: '{{ gt (len .Values.scripting.active) 0 | ternary (join "," .Values.scripting.active) "" }}'
|
||||
@@ -19,14 +19,14 @@ data:
|
||||
INGRESS_HOST: '{{ .Values.tap.ingress.host }}'
|
||||
PROXY_FRONT_PORT: '{{ .Values.tap.proxy.front.port }}'
|
||||
AUTH_ENABLED: '{{- if and .Values.cloudLicenseEnabled (not (empty .Values.license)) -}}
|
||||
{{ and .Values.tap.auth.enabled (eq .Values.tap.auth.type "dex") | ternary true false }}
|
||||
{{ (default false .Values.demoModeEnabled) | ternary true ((and .Values.tap.auth.enabled (eq .Values.tap.auth.type "dex")) | ternary true false) }}
|
||||
{{- else -}}
|
||||
{{ .Values.cloudLicenseEnabled | ternary "true" (.Values.tap.auth.enabled | ternary "true" "") }}
|
||||
{{ .Values.cloudLicenseEnabled | ternary "true" ((default false .Values.demoModeEnabled) | ternary "true" .Values.tap.auth.enabled) }}
|
||||
{{- end }}'
|
||||
AUTH_TYPE: '{{- if and .Values.cloudLicenseEnabled (not (eq .Values.tap.auth.type "dex")) -}}
|
||||
default
|
||||
{{- else -}}
|
||||
{{ .Values.tap.auth.type }}
|
||||
{{ (default false .Values.demoModeEnabled) | ternary "default" .Values.tap.auth.type }}
|
||||
{{- end }}'
|
||||
AUTH_SAML_IDP_METADATA_URL: '{{ .Values.tap.auth.saml.idpMetadataUrl }}'
|
||||
AUTH_SAML_ROLE_ATTRIBUTE: '{{ .Values.tap.auth.saml.roleAttribute }}'
|
||||
@@ -44,23 +44,15 @@ data:
|
||||
false
|
||||
{{- end }}'
|
||||
TELEMETRY_DISABLED: '{{ not .Values.internetConnectivity | ternary "true" (not .Values.tap.telemetry.enabled | ternary "true" "false") }}'
|
||||
SCRIPTING_DISABLED: '{{- if .Values.tap.liveConfigMapChangesDisabled -}}
|
||||
{{- if .Values.demoModeEnabled -}}
|
||||
{{ .Values.demoModeEnabled | ternary false true }}
|
||||
{{- else -}}
|
||||
true
|
||||
{{- end }}
|
||||
{{- else -}}
|
||||
false
|
||||
{{- end }}'
|
||||
TARGETED_PODS_UPDATE_DISABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "" }}'
|
||||
PRESET_FILTERS_CHANGING_ENABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
RECORDING_DISABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "" }}'
|
||||
STOP_TRAFFIC_CAPTURING_DISABLED: '{{- if and .Values.tap.liveConfigMapChangesDisabled .Values.tap.capture.stopped -}}
|
||||
false
|
||||
{{- else -}}
|
||||
{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "false" }}
|
||||
{{- end }}'
|
||||
SCRIPTING_DISABLED: '{{ default false .Values.demoModeEnabled }}'
|
||||
TARGETED_PODS_UPDATE_DISABLED: '{{ default false .Values.demoModeEnabled }}'
|
||||
PRESET_FILTERS_CHANGING_ENABLED: '{{ not (default false .Values.demoModeEnabled) }}'
|
||||
RECORDING_DISABLED: '{{ (default false .Values.demoModeEnabled) | ternary true false }}'
|
||||
DISSECTION_CONTROL_ENABLED: '{{- if and (not .Values.demoModeEnabled) (not .Values.tap.capture.dissection.enabled) -}}
|
||||
true
|
||||
{{- else -}}
|
||||
{{ (default false .Values.demoModeEnabled) | ternary false true }}
|
||||
{{- end }}'
|
||||
GLOBAL_FILTER: {{ include "kubeshark.escapeDoubleQuotes" .Values.tap.globalFilter | quote }}
|
||||
DEFAULT_FILTER: {{ include "kubeshark.escapeDoubleQuotes" .Values.tap.defaultFilter | quote }}
|
||||
TRAFFIC_SAMPLE_RATE: '{{ .Values.tap.misc.trafficSampleRate }}'
|
||||
@@ -76,12 +68,14 @@ data:
|
||||
DUPLICATE_TIMEFRAME: '{{ .Values.tap.misc.duplicateTimeframe }}'
|
||||
ENABLED_DISSECTORS: '{{ gt (len .Values.tap.enabledDissectors) 0 | ternary (join "," .Values.tap.enabledDissectors) "" }}'
|
||||
CUSTOM_MACROS: '{{ toJson .Values.tap.customMacros }}'
|
||||
DISSECTORS_UPDATING_ENABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}'
|
||||
DISSECTORS_UPDATING_ENABLED: '{{ not (default false .Values.demoModeEnabled) }}'
|
||||
SNAPSHOTS_UPDATING_ENABLED: '{{ not (default false .Values.demoModeEnabled) }}'
|
||||
DEMO_MODE_ENABLED: '{{ default false .Values.demoModeEnabled }}'
|
||||
DETECT_DUPLICATES: '{{ .Values.tap.misc.detectDuplicates | ternary "true" "false" }}'
|
||||
PCAP_DUMP_ENABLE: '{{ .Values.pcapdump.enabled }}'
|
||||
PCAP_TIME_INTERVAL: '{{ .Values.pcapdump.timeInterval }}'
|
||||
PCAP_MAX_TIME: '{{ .Values.pcapdump.maxTime }}'
|
||||
PCAP_MAX_SIZE: '{{ .Values.pcapdump.maxSize }}'
|
||||
PORT_MAPPING: '{{ toJson .Values.tap.portMapping }}'
|
||||
RAW_CAPTURE: '{{ .Values.tap.capture.raw.enabled | ternary "true" "false" }}'
|
||||
RAW_CAPTURE_ENABLED: '{{ .Values.tap.capture.raw.enabled | ternary "true" "false" }}'
|
||||
RAW_CAPTURE_STORAGE_SIZE: '{{ .Values.tap.capture.raw.storageSize }}'
|
||||
|
||||
64
helm-chart/templates/21-cloud-storage.yaml
Normal file
64
helm-chart/templates/21-cloud-storage.yaml
Normal file
@@ -0,0 +1,64 @@
|
||||
{{- $hasConfigValues := or .Values.tap.snapshots.cloud.prefix .Values.tap.snapshots.cloud.s3.bucket .Values.tap.snapshots.cloud.s3.region .Values.tap.snapshots.cloud.s3.roleArn .Values.tap.snapshots.cloud.s3.externalId .Values.tap.snapshots.cloud.azblob.storageAccount .Values.tap.snapshots.cloud.azblob.container .Values.tap.snapshots.cloud.gcs.bucket .Values.tap.snapshots.cloud.gcs.project -}}
|
||||
{{- $hasSecretValues := or .Values.tap.snapshots.cloud.s3.accessKey .Values.tap.snapshots.cloud.s3.secretKey .Values.tap.snapshots.cloud.azblob.storageKey .Values.tap.snapshots.cloud.gcs.credentialsJson -}}
|
||||
{{- if $hasConfigValues }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
name: {{ include "kubeshark.name" . }}-cloud-config
|
||||
namespace: {{ .Release.Namespace }}
|
||||
data:
|
||||
{{- if .Values.tap.snapshots.cloud.prefix }}
|
||||
SNAPSHOT_CLOUD_PREFIX: {{ .Values.tap.snapshots.cloud.prefix | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.s3.bucket }}
|
||||
SNAPSHOT_AWS_BUCKET: {{ .Values.tap.snapshots.cloud.s3.bucket | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.s3.region }}
|
||||
SNAPSHOT_AWS_REGION: {{ .Values.tap.snapshots.cloud.s3.region | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.s3.roleArn }}
|
||||
SNAPSHOT_AWS_ROLE_ARN: {{ .Values.tap.snapshots.cloud.s3.roleArn | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.s3.externalId }}
|
||||
SNAPSHOT_AWS_EXTERNAL_ID: {{ .Values.tap.snapshots.cloud.s3.externalId | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.azblob.storageAccount }}
|
||||
SNAPSHOT_AZBLOB_STORAGE_ACCOUNT: {{ .Values.tap.snapshots.cloud.azblob.storageAccount | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.azblob.container }}
|
||||
SNAPSHOT_AZBLOB_CONTAINER: {{ .Values.tap.snapshots.cloud.azblob.container | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.gcs.bucket }}
|
||||
SNAPSHOT_GCS_BUCKET: {{ .Values.tap.snapshots.cloud.gcs.bucket | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.gcs.project }}
|
||||
SNAPSHOT_GCS_PROJECT: {{ .Values.tap.snapshots.cloud.gcs.project | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $hasSecretValues }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
labels:
|
||||
{{- include "kubeshark.labels" . | nindent 4 }}
|
||||
name: {{ include "kubeshark.name" . }}-cloud-secret
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: Opaque
|
||||
stringData:
|
||||
{{- if .Values.tap.snapshots.cloud.s3.accessKey }}
|
||||
SNAPSHOT_AWS_ACCESS_KEY: {{ .Values.tap.snapshots.cloud.s3.accessKey | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.s3.secretKey }}
|
||||
SNAPSHOT_AWS_SECRET_KEY: {{ .Values.tap.snapshots.cloud.s3.secretKey | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.azblob.storageKey }}
|
||||
SNAPSHOT_AZBLOB_STORAGE_KEY: {{ .Values.tap.snapshots.cloud.azblob.storageKey | quote }}
|
||||
{{- end }}
|
||||
{{- if .Values.tap.snapshots.cloud.gcs.credentialsJson }}
|
||||
SNAPSHOT_GCS_CREDENTIALS_JSON: {{ .Values.tap.snapshots.cloud.gcs.credentialsJson | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
248
helm-chart/tests/cloud_storage_test.yaml
Normal file
248
helm-chart/tests/cloud_storage_test.yaml
Normal file
@@ -0,0 +1,248 @@
|
||||
suite: cloud storage template
|
||||
templates:
|
||||
- templates/21-cloud-storage.yaml
|
||||
tests:
|
||||
- it: should render nothing with default values
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 0
|
||||
|
||||
- it: should render ConfigMap with S3 config only
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
tap.snapshots.cloud.s3.region: us-east-1
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: metadata.name
|
||||
value: RELEASE-NAME-cloud-config
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AWS_BUCKET
|
||||
value: "my-bucket"
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AWS_REGION
|
||||
value: "us-east-1"
|
||||
documentIndex: 0
|
||||
- notExists:
|
||||
path: data.SNAPSHOT_AWS_ACCESS_KEY
|
||||
documentIndex: 0
|
||||
|
||||
- it: should render ConfigMap and Secret with S3 config and credentials
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
tap.snapshots.cloud.s3.region: us-east-1
|
||||
tap.snapshots.cloud.s3.accessKey: AKIAIOSFODNN7EXAMPLE
|
||||
tap.snapshots.cloud.s3.secretKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 2
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AWS_BUCKET
|
||||
value: "my-bucket"
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AWS_REGION
|
||||
value: "us-east-1"
|
||||
documentIndex: 0
|
||||
- isKind:
|
||||
of: Secret
|
||||
documentIndex: 1
|
||||
- equal:
|
||||
path: metadata.name
|
||||
value: RELEASE-NAME-cloud-secret
|
||||
documentIndex: 1
|
||||
- equal:
|
||||
path: stringData.SNAPSHOT_AWS_ACCESS_KEY
|
||||
value: "AKIAIOSFODNN7EXAMPLE"
|
||||
documentIndex: 1
|
||||
- equal:
|
||||
path: stringData.SNAPSHOT_AWS_SECRET_KEY
|
||||
value: "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||
documentIndex: 1
|
||||
|
||||
- it: should render ConfigMap with Azure Blob config only
|
||||
set:
|
||||
tap.snapshots.cloud.azblob.storageAccount: myaccount
|
||||
tap.snapshots.cloud.azblob.container: mycontainer
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AZBLOB_STORAGE_ACCOUNT
|
||||
value: "myaccount"
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AZBLOB_CONTAINER
|
||||
value: "mycontainer"
|
||||
documentIndex: 0
|
||||
|
||||
- it: should render ConfigMap and Secret with Azure Blob config and storage key
|
||||
set:
|
||||
tap.snapshots.cloud.azblob.storageAccount: myaccount
|
||||
tap.snapshots.cloud.azblob.container: mycontainer
|
||||
tap.snapshots.cloud.azblob.storageKey: c29tZWtleQ==
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 2
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AZBLOB_STORAGE_ACCOUNT
|
||||
value: "myaccount"
|
||||
documentIndex: 0
|
||||
- isKind:
|
||||
of: Secret
|
||||
documentIndex: 1
|
||||
- equal:
|
||||
path: stringData.SNAPSHOT_AZBLOB_STORAGE_KEY
|
||||
value: "c29tZWtleQ=="
|
||||
documentIndex: 1
|
||||
|
||||
- it: should render ConfigMap with GCS config only
|
||||
set:
|
||||
tap.snapshots.cloud.gcs.bucket: my-gcs-bucket
|
||||
tap.snapshots.cloud.gcs.project: my-gcp-project
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_GCS_BUCKET
|
||||
value: "my-gcs-bucket"
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_GCS_PROJECT
|
||||
value: "my-gcp-project"
|
||||
documentIndex: 0
|
||||
- notExists:
|
||||
path: data.SNAPSHOT_GCS_CREDENTIALS_JSON
|
||||
documentIndex: 0
|
||||
|
||||
- it: should render ConfigMap and Secret with GCS config and credentials
|
||||
set:
|
||||
tap.snapshots.cloud.gcs.bucket: my-gcs-bucket
|
||||
tap.snapshots.cloud.gcs.project: my-gcp-project
|
||||
tap.snapshots.cloud.gcs.credentialsJson: '{"type":"service_account"}'
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 2
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_GCS_BUCKET
|
||||
value: "my-gcs-bucket"
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_GCS_PROJECT
|
||||
value: "my-gcp-project"
|
||||
documentIndex: 0
|
||||
- isKind:
|
||||
of: Secret
|
||||
documentIndex: 1
|
||||
- equal:
|
||||
path: metadata.name
|
||||
value: RELEASE-NAME-cloud-secret
|
||||
documentIndex: 1
|
||||
- equal:
|
||||
path: stringData.SNAPSHOT_GCS_CREDENTIALS_JSON
|
||||
value: '{"type":"service_account"}'
|
||||
documentIndex: 1
|
||||
|
||||
- it: should render ConfigMap with GCS bucket only (no project)
|
||||
set:
|
||||
tap.snapshots.cloud.gcs.bucket: my-gcs-bucket
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_GCS_BUCKET
|
||||
value: "my-gcs-bucket"
|
||||
documentIndex: 0
|
||||
- notExists:
|
||||
path: data.SNAPSHOT_GCS_PROJECT
|
||||
documentIndex: 0
|
||||
|
||||
- it: should render ConfigMap with only prefix
|
||||
set:
|
||||
tap.snapshots.cloud.prefix: snapshots/prod
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_CLOUD_PREFIX
|
||||
value: "snapshots/prod"
|
||||
documentIndex: 0
|
||||
- notExists:
|
||||
path: data.SNAPSHOT_AWS_BUCKET
|
||||
documentIndex: 0
|
||||
- notExists:
|
||||
path: data.SNAPSHOT_AZBLOB_STORAGE_ACCOUNT
|
||||
documentIndex: 0
|
||||
- notExists:
|
||||
path: data.SNAPSHOT_GCS_BUCKET
|
||||
documentIndex: 0
|
||||
|
||||
- it: should render ConfigMap with role ARN without credentials (IAM auth)
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
tap.snapshots.cloud.s3.region: us-east-1
|
||||
tap.snapshots.cloud.s3.roleArn: arn:aws:iam::123456789012:role/my-role
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- isKind:
|
||||
of: ConfigMap
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AWS_ROLE_ARN
|
||||
value: "arn:aws:iam::123456789012:role/my-role"
|
||||
documentIndex: 0
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AWS_BUCKET
|
||||
value: "my-bucket"
|
||||
documentIndex: 0
|
||||
|
||||
- it: should render ConfigMap with externalId
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
tap.snapshots.cloud.s3.externalId: ext-12345
|
||||
asserts:
|
||||
- hasDocuments:
|
||||
count: 1
|
||||
- equal:
|
||||
path: data.SNAPSHOT_AWS_EXTERNAL_ID
|
||||
value: "ext-12345"
|
||||
documentIndex: 0
|
||||
|
||||
- it: should set correct namespace
|
||||
release:
|
||||
namespace: kubeshark-ns
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
asserts:
|
||||
- equal:
|
||||
path: metadata.namespace
|
||||
value: kubeshark-ns
|
||||
documentIndex: 0
|
||||
9
helm-chart/tests/fixtures/values-azblob.yaml
vendored
Normal file
9
helm-chart/tests/fixtures/values-azblob.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: azblob
|
||||
prefix: snapshots/
|
||||
azblob:
|
||||
storageAccount: kubesharkstore
|
||||
container: snapshots
|
||||
storageKey: c29tZWtleWhlcmU=
|
||||
8
helm-chart/tests/fixtures/values-cloud-refs.yaml
vendored
Normal file
8
helm-chart/tests/fixtures/values-cloud-refs.yaml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: s3
|
||||
configMaps:
|
||||
- my-cloud-config
|
||||
secrets:
|
||||
- my-cloud-secret
|
||||
9
helm-chart/tests/fixtures/values-gcs.yaml
vendored
Normal file
9
helm-chart/tests/fixtures/values-gcs.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: gcs
|
||||
prefix: snapshots/
|
||||
gcs:
|
||||
bucket: kubeshark-snapshots
|
||||
project: my-gcp-project
|
||||
credentialsJson: '{"type":"service_account","project_id":"my-gcp-project"}'
|
||||
10
helm-chart/tests/fixtures/values-s3.yaml
vendored
Normal file
10
helm-chart/tests/fixtures/values-s3.yaml
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
tap:
|
||||
snapshots:
|
||||
cloud:
|
||||
provider: s3
|
||||
prefix: snapshots/
|
||||
s3:
|
||||
bucket: kubeshark-snapshots
|
||||
region: us-east-1
|
||||
accessKey: AKIAIOSFODNN7EXAMPLE
|
||||
secretKey: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY
|
||||
167
helm-chart/tests/hub_deployment_test.yaml
Normal file
167
helm-chart/tests/hub_deployment_test.yaml
Normal file
@@ -0,0 +1,167 @@
|
||||
suite: hub deployment cloud integration
|
||||
templates:
|
||||
- templates/04-hub-deployment.yaml
|
||||
tests:
|
||||
- it: should not render envFrom with default values
|
||||
asserts:
|
||||
- isKind:
|
||||
of: Deployment
|
||||
- notContains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
any: true
|
||||
content:
|
||||
configMapRef:
|
||||
name: RELEASE-NAME-cloud-config
|
||||
|
||||
- it: should render envFrom with inline S3 config
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
tap.snapshots.cloud.s3.region: us-east-1
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: RELEASE-NAME-cloud-config
|
||||
|
||||
- it: should render envFrom secret ref with inline credentials
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
tap.snapshots.cloud.s3.accessKey: AKIAIOSFODNN7EXAMPLE
|
||||
tap.snapshots.cloud.s3.secretKey: secret
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: RELEASE-NAME-cloud-config
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
secretRef:
|
||||
name: RELEASE-NAME-cloud-secret
|
||||
|
||||
- it: should render envFrom with inline GCS config
|
||||
set:
|
||||
tap.snapshots.cloud.gcs.bucket: my-gcs-bucket
|
||||
tap.snapshots.cloud.gcs.project: my-gcp-project
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: RELEASE-NAME-cloud-config
|
||||
|
||||
- it: should render envFrom secret ref with inline GCS credentials
|
||||
set:
|
||||
tap.snapshots.cloud.gcs.bucket: my-gcs-bucket
|
||||
tap.snapshots.cloud.gcs.credentialsJson: '{"type":"service_account"}'
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: RELEASE-NAME-cloud-config
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
secretRef:
|
||||
name: RELEASE-NAME-cloud-secret
|
||||
|
||||
- it: should render cloud-storage-provider arg when provider is gcs
|
||||
set:
|
||||
tap.snapshots.cloud.provider: gcs
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].command
|
||||
content: -cloud-storage-provider
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].command
|
||||
content: gcs
|
||||
|
||||
- it: should render envFrom with external configMaps
|
||||
set:
|
||||
tap.snapshots.cloud.configMaps:
|
||||
- my-cloud-config
|
||||
- my-other-config
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: my-cloud-config
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: my-other-config
|
||||
|
||||
- it: should render envFrom with external secrets
|
||||
set:
|
||||
tap.snapshots.cloud.secrets:
|
||||
- my-cloud-secret
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
secretRef:
|
||||
name: my-cloud-secret
|
||||
|
||||
- it: should render cloud-storage-provider arg when provider is set
|
||||
set:
|
||||
tap.snapshots.cloud.provider: s3
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].command
|
||||
content: -cloud-storage-provider
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].command
|
||||
content: s3
|
||||
|
||||
- it: should not render cloud-storage-provider arg with default values
|
||||
asserts:
|
||||
- notContains:
|
||||
path: spec.template.spec.containers[0].command
|
||||
content: -cloud-storage-provider
|
||||
|
||||
- it: should render envFrom with tap.secrets
|
||||
set:
|
||||
tap.secrets:
|
||||
- my-existing-secret
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
secretRef:
|
||||
name: my-existing-secret
|
||||
|
||||
- it: should render both inline and external refs together
|
||||
set:
|
||||
tap.snapshots.cloud.s3.bucket: my-bucket
|
||||
tap.snapshots.cloud.s3.accessKey: key
|
||||
tap.snapshots.cloud.s3.secretKey: secret
|
||||
tap.snapshots.cloud.configMaps:
|
||||
- ext-config
|
||||
tap.snapshots.cloud.secrets:
|
||||
- ext-secret
|
||||
asserts:
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: ext-config
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
secretRef:
|
||||
name: ext-secret
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
configMapRef:
|
||||
name: RELEASE-NAME-cloud-config
|
||||
- contains:
|
||||
path: spec.template.spec.containers[0].envFrom
|
||||
content:
|
||||
secretRef:
|
||||
name: RELEASE-NAME-cloud-secret
|
||||
@@ -26,24 +26,46 @@ tap:
|
||||
excludedNamespaces: []
|
||||
bpfOverride: ""
|
||||
capture:
|
||||
stopped: false
|
||||
stopAfter: 5m
|
||||
dissection:
|
||||
enabled: true
|
||||
stopAfter: 5m
|
||||
captureSelf: false
|
||||
raw:
|
||||
enabled: true
|
||||
storageSize: 1Gi
|
||||
dbMaxSize: 500Mi
|
||||
delayedDissection:
|
||||
image: kubeshark/worker:master
|
||||
cpu: "1"
|
||||
memory: 4Gi
|
||||
snapshots:
|
||||
storageClass: ""
|
||||
storageSize: 20Gi
|
||||
local:
|
||||
storageClass: ""
|
||||
storageSize: 20Gi
|
||||
cloud:
|
||||
provider: ""
|
||||
prefix: ""
|
||||
configMaps: []
|
||||
secrets: []
|
||||
s3:
|
||||
bucket: ""
|
||||
region: ""
|
||||
accessKey: ""
|
||||
secretKey: ""
|
||||
roleArn: ""
|
||||
externalId: ""
|
||||
azblob:
|
||||
storageAccount: ""
|
||||
container: ""
|
||||
storageKey: ""
|
||||
gcs:
|
||||
bucket: ""
|
||||
project: ""
|
||||
credentialsJson: ""
|
||||
release:
|
||||
repo: https://helm.kubeshark.com
|
||||
name: kubeshark
|
||||
namespace: default
|
||||
helmChartPath: ""
|
||||
persistentStorage: false
|
||||
persistentStorageStatic: false
|
||||
persistentStoragePvcVolumeMode: FileSystem
|
||||
@@ -145,6 +167,7 @@ tap:
|
||||
canDelete: true
|
||||
canUpdateTargetedPods: true
|
||||
canStopTrafficCapturing: true
|
||||
canControlDissection: true
|
||||
showAdminConsoleLink: true
|
||||
ingress:
|
||||
enabled: false
|
||||
@@ -162,6 +185,7 @@ tap:
|
||||
dashboard:
|
||||
streamingType: connect-rpc
|
||||
completeStreamingEnabled: true
|
||||
clusterWideMapEnabled: false
|
||||
telemetry:
|
||||
enabled: true
|
||||
resourceGuard:
|
||||
@@ -174,7 +198,6 @@ tap:
|
||||
enabled: false
|
||||
environment: production
|
||||
defaultFilter: ""
|
||||
liveConfigMapChangesDisabled: false
|
||||
globalFilter: ""
|
||||
enabledDissectors:
|
||||
- amqp
|
||||
@@ -189,8 +212,8 @@ tap:
|
||||
- diameter
|
||||
- udp-flow
|
||||
- tcp-flow
|
||||
- tcp-flow-full
|
||||
- udp-flow-full
|
||||
- udp-conn
|
||||
- tcp-conn
|
||||
portMapping:
|
||||
http:
|
||||
- 80
|
||||
@@ -226,6 +249,8 @@ tap:
|
||||
duplicateTimeframe: 200ms
|
||||
detectDuplicates: false
|
||||
staleTimeoutSeconds: 30
|
||||
tcpFlowTimeout: 1200
|
||||
udpFlowTimeout: 1200
|
||||
securityContext:
|
||||
privileged: true
|
||||
appArmorProfile:
|
||||
@@ -268,13 +293,14 @@ kube:
|
||||
dumpLogs: false
|
||||
headless: false
|
||||
license: ""
|
||||
cloudApiUrl: "https://api.kubeshark.com"
|
||||
cloudApiUrl: https://api.kubeshark.com
|
||||
cloudLicenseEnabled: true
|
||||
demoModeEnabled: false
|
||||
supportChatEnabled: false
|
||||
betaEnabled: false
|
||||
internetConnectivity: true
|
||||
scripting:
|
||||
enabled: false
|
||||
env: {}
|
||||
source: ""
|
||||
sources: []
|
||||
|
||||
@@ -67,7 +67,10 @@ func (h *Helm) Install() (rel *release.Release, err error) {
|
||||
client.Namespace = h.releaseNamespace
|
||||
client.ReleaseName = h.releaseName
|
||||
|
||||
chartPath := os.Getenv(fmt.Sprintf("%s_HELM_CHART_PATH", strings.ToUpper(misc.Program)))
|
||||
chartPath := config.Config.Tap.Release.HelmChartPath
|
||||
if chartPath == "" {
|
||||
chartPath = os.Getenv(fmt.Sprintf("%s_HELM_CHART_PATH", strings.ToUpper(misc.Program)))
|
||||
}
|
||||
if chartPath == "" {
|
||||
var chartURL string
|
||||
chartURL, err = repo.FindChartInRepoURL(h.repo, h.releaseName, "", "", "", "", getter.All(&cli.EnvSettings{}))
|
||||
|
||||
@@ -4,10 +4,10 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-hub-network-policy
|
||||
namespace: default
|
||||
@@ -33,10 +33,10 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-front-network-policy
|
||||
@@ -60,10 +60,10 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-dex-network-policy
|
||||
@@ -87,10 +87,10 @@ apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-worker-network-policy
|
||||
@@ -116,10 +116,10 @@ apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-service-account
|
||||
namespace: default
|
||||
@@ -132,10 +132,10 @@ metadata:
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
LICENSE: ''
|
||||
@@ -151,10 +151,10 @@ metadata:
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
AUTH_SAML_X509_CRT: |
|
||||
@@ -167,10 +167,10 @@ metadata:
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
stringData:
|
||||
AUTH_SAML_X509_KEY: |
|
||||
@@ -182,10 +182,10 @@ metadata:
|
||||
name: kubeshark-nginx-config-map
|
||||
namespace: default
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
data:
|
||||
default.conf: |
|
||||
@@ -209,8 +209,10 @@ data:
|
||||
proxy_set_header Authorization $http_authorization;
|
||||
proxy_pass_header Authorization;
|
||||
proxy_connect_timeout 4s;
|
||||
proxy_read_timeout 120s;
|
||||
proxy_send_timeout 12s;
|
||||
# Disable buffering for gRPC/Connect streaming
|
||||
client_max_body_size 0;
|
||||
proxy_request_buffering off;
|
||||
proxy_buffering off;
|
||||
proxy_pass_request_headers on;
|
||||
}
|
||||
|
||||
@@ -246,17 +248,18 @@ metadata:
|
||||
namespace: default
|
||||
labels:
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
data:
|
||||
POD_REGEX: '.*'
|
||||
NAMESPACES: ''
|
||||
EXCLUDED_NAMESPACES: ''
|
||||
BPF_OVERRIDE: ''
|
||||
STOPPED: 'false'
|
||||
DISSECTION_ENABLED: 'true'
|
||||
CAPTURE_SELF: 'false'
|
||||
SCRIPTING_SCRIPTS: '{}'
|
||||
SCRIPTING_ACTIVE_SCRIPTS: ''
|
||||
INGRESS_ENABLED: 'false'
|
||||
@@ -266,7 +269,7 @@ data:
|
||||
AUTH_TYPE: 'default'
|
||||
AUTH_SAML_IDP_METADATA_URL: ''
|
||||
AUTH_SAML_ROLE_ATTRIBUTE: 'role'
|
||||
AUTH_SAML_ROLES: '{"admin":{"canDownloadPCAP":true,"canStopTrafficCapturing":true,"canUpdateTargetedPods":true,"canUseScripting":true,"filter":"","scriptingPermissions":{"canActivate":true,"canDelete":true,"canSave":true},"showAdminConsoleLink":true}}'
|
||||
AUTH_SAML_ROLES: '{"admin":{"canControlDissection":true,"canDownloadPCAP":true,"canStopTrafficCapturing":true,"canUpdateTargetedPods":true,"canUseScripting":true,"filter":"","scriptingPermissions":{"canActivate":true,"canDelete":true,"canSave":true},"showAdminConsoleLink":true}}'
|
||||
AUTH_OIDC_ISSUER: 'not set'
|
||||
AUTH_OIDC_REFRESH_TOKEN_LIFETIME: '3960h'
|
||||
AUTH_OIDC_STATE_PARAM_EXPIRY: '10m'
|
||||
@@ -276,7 +279,7 @@ data:
|
||||
TARGETED_PODS_UPDATE_DISABLED: ''
|
||||
PRESET_FILTERS_CHANGING_ENABLED: 'true'
|
||||
RECORDING_DISABLED: ''
|
||||
STOP_TRAFFIC_CAPTURING_DISABLED: 'false'
|
||||
DISSECTION_CONTROL_ENABLED: 'true'
|
||||
GLOBAL_FILTER: ""
|
||||
DEFAULT_FILTER: ""
|
||||
TRAFFIC_SAMPLE_RATE: '100'
|
||||
@@ -285,9 +288,8 @@ data:
|
||||
PCAP_ERROR_TTL: '0'
|
||||
TIMEZONE: ' '
|
||||
CLOUD_LICENSE_ENABLED: 'true'
|
||||
AI_ASSISTANT_ENABLED: 'true'
|
||||
DUPLICATE_TIMEFRAME: '200ms'
|
||||
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,ws,ldap,radius,diameter,udp-flow,tcp-flow'
|
||||
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,ws,ldap,radius,diameter,udp-flow,tcp-flow,udp-conn,tcp-conn'
|
||||
CUSTOM_MACROS: '{"https":"tls and (http or http2)"}'
|
||||
DISSECTORS_UPDATING_ENABLED: 'true'
|
||||
DETECT_DUPLICATES: 'false'
|
||||
@@ -296,7 +298,7 @@ data:
|
||||
PCAP_MAX_TIME: '1h'
|
||||
PCAP_MAX_SIZE: '500MB'
|
||||
PORT_MAPPING: '{"amqp":[5671,5672],"diameter":[3868],"http":[80,443,8080],"kafka":[9092],"ldap":[389],"redis":[6379]}'
|
||||
RAW_CAPTURE: 'true'
|
||||
RAW_CAPTURE_ENABLED: 'true'
|
||||
RAW_CAPTURE_STORAGE_SIZE: '1Gi'
|
||||
---
|
||||
# Source: kubeshark/templates/02-cluster-role.yaml
|
||||
@@ -304,10 +306,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-cluster-role-default
|
||||
namespace: default
|
||||
@@ -351,10 +353,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-cluster-role-binding-default
|
||||
namespace: default
|
||||
@@ -372,10 +374,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-self-config-role
|
||||
@@ -422,10 +424,10 @@ apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
name: kubeshark-self-config-role-binding
|
||||
@@ -445,10 +447,10 @@ kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-hub
|
||||
namespace: default
|
||||
@@ -466,10 +468,10 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-front
|
||||
namespace: default
|
||||
@@ -487,10 +489,10 @@ kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
@@ -500,10 +502,10 @@ metadata:
|
||||
spec:
|
||||
selector:
|
||||
app.kubeshark.com/app: worker
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
ports:
|
||||
- name: metrics
|
||||
@@ -516,10 +518,10 @@ kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
@@ -529,10 +531,10 @@ metadata:
|
||||
spec:
|
||||
selector:
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
ports:
|
||||
- name: metrics
|
||||
@@ -547,10 +549,10 @@ metadata:
|
||||
labels:
|
||||
app.kubeshark.com/app: worker
|
||||
sidecar.istio.io/inject: "false"
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: default
|
||||
@@ -564,10 +566,10 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.com/app: worker
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-worker-daemon-set
|
||||
namespace: kubeshark
|
||||
@@ -577,7 +579,7 @@ spec:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- mkdir -p /sys/fs/bpf && mount | grep -q '/sys/fs/bpf' || mount -t bpf bpf /sys/fs/bpf
|
||||
image: 'docker.io/kubeshark/worker:v52.12'
|
||||
image: 'docker.io/kubeshark/worker:v53.1'
|
||||
imagePullPolicy: Always
|
||||
name: mount-bpf
|
||||
securityContext:
|
||||
@@ -606,11 +608,17 @@ spec:
|
||||
- 'auto'
|
||||
- -staletimeout
|
||||
- '30'
|
||||
- -tcp-flow-full-timeout
|
||||
- '1200'
|
||||
- -udp-flow-full-timeout
|
||||
- '1200'
|
||||
- -storage-size
|
||||
- '10Gi'
|
||||
- -capture-db-max-size
|
||||
- '500Mi'
|
||||
image: 'docker.io/kubeshark/worker:v52.12'
|
||||
- -cloud-api-url
|
||||
- 'https://api.kubeshark.com'
|
||||
image: 'docker.io/kubeshark/worker:v53.1'
|
||||
imagePullPolicy: Always
|
||||
name: sniffer
|
||||
ports:
|
||||
@@ -630,8 +638,6 @@ spec:
|
||||
value: '10000'
|
||||
- name: TCP_STREAM_CHANNEL_TIMEOUT_SHOW
|
||||
value: 'false'
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubeshark.com'
|
||||
- name: PROFILING_ENABLED
|
||||
value: 'false'
|
||||
- name: SENTRY_ENABLED
|
||||
@@ -684,7 +690,7 @@ spec:
|
||||
- -disable-tls-log
|
||||
- -loglevel
|
||||
- 'warning'
|
||||
image: 'docker.io/kubeshark/worker:v52.12'
|
||||
image: 'docker.io/kubeshark/worker:v53.1'
|
||||
imagePullPolicy: Always
|
||||
name: tracer
|
||||
env:
|
||||
@@ -776,10 +782,10 @@ kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-hub
|
||||
namespace: default
|
||||
@@ -794,10 +800,10 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.com/app: hub
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
@@ -813,13 +819,15 @@ spec:
|
||||
- -capture-stop-after
|
||||
- "5m"
|
||||
- -snapshot-size-limit
|
||||
- '20Gi'
|
||||
- ''
|
||||
- -dissector-image
|
||||
- 'kubeshark/worker:master'
|
||||
- 'docker.io/kubeshark/worker:v53.1'
|
||||
- -dissector-cpu
|
||||
- '1'
|
||||
- -dissector-memory
|
||||
- '4Gi'
|
||||
- -cloud-api-url
|
||||
- 'https://api.kubeshark.com'
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
@@ -833,11 +841,9 @@ spec:
|
||||
value: 'false'
|
||||
- name: SENTRY_ENVIRONMENT
|
||||
value: 'production'
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubeshark.com'
|
||||
- name: PROFILING_ENABLED
|
||||
value: 'false'
|
||||
image: 'docker.io/kubeshark/hub:v52.12'
|
||||
image: 'docker.io/kubeshark/hub:v53.1'
|
||||
imagePullPolicy: Always
|
||||
readinessProbe:
|
||||
periodSeconds: 5
|
||||
@@ -905,10 +911,10 @@ kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.com/app: front
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
name: kubeshark-front
|
||||
namespace: default
|
||||
@@ -923,10 +929,10 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubeshark.com/app: front
|
||||
helm.sh/chart: kubeshark-52.12.0
|
||||
helm.sh/chart: kubeshark-53.1.0
|
||||
app.kubernetes.io/name: kubeshark
|
||||
app.kubernetes.io/instance: kubeshark
|
||||
app.kubernetes.io/version: "52.12.0"
|
||||
app.kubernetes.io/version: "53.1.0"
|
||||
app.kubernetes.io/managed-by: Helm
|
||||
spec:
|
||||
containers:
|
||||
@@ -943,6 +949,8 @@ spec:
|
||||
value: ' '
|
||||
- name: REACT_APP_TIMEZONE
|
||||
value: ' '
|
||||
- name: REACT_APP_SCRIPTING_HIDDEN
|
||||
value: 'true'
|
||||
- name: REACT_APP_SCRIPTING_DISABLED
|
||||
value: 'false'
|
||||
- name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED
|
||||
@@ -953,11 +961,11 @@ spec:
|
||||
value: 'true'
|
||||
- name: REACT_APP_RECORDING_DISABLED
|
||||
value: 'false'
|
||||
- name: REACT_APP_STOP_TRAFFIC_CAPTURING_DISABLED
|
||||
value: 'false'
|
||||
- name: 'REACT_APP_CLOUD_LICENSE_ENABLED'
|
||||
- name: REACT_APP_DISSECTION_ENABLED
|
||||
value: 'true'
|
||||
- name: 'REACT_APP_AI_ASSISTANT_ENABLED'
|
||||
- name: REACT_APP_DISSECTION_CONTROL_ENABLED
|
||||
value: 'true'
|
||||
- name: 'REACT_APP_CLOUD_LICENSE_ENABLED'
|
||||
value: 'true'
|
||||
- name: REACT_APP_SUPPORT_CHAT_ENABLED
|
||||
value: 'false'
|
||||
@@ -971,7 +979,7 @@ spec:
|
||||
value: 'false'
|
||||
- name: REACT_APP_SENTRY_ENVIRONMENT
|
||||
value: 'production'
|
||||
image: 'docker.io/kubeshark/front:v52.12'
|
||||
image: 'docker.io/kubeshark/front:v53.1'
|
||||
imagePullPolicy: Always
|
||||
name: kubeshark-front
|
||||
livenessProbe:
|
||||
|
||||
@@ -188,7 +188,7 @@ http and src.namespace == "default" and response.status == 500
|
||||
|
||||
## MCP Registry
|
||||
|
||||
Kubeshark is published to the [MCP Registry](https://registry.mcp.io) automatically on each release.
|
||||
Kubeshark is published to the [MCP Registry](https://registry.modelcontextprotocol.io/) automatically on each release.
|
||||
|
||||
The `server.json` in this directory is a reference file. The actual registry metadata (version, SHA256 hashes) is auto-generated during the release workflow. See [`.github/workflows/release.yml`](../.github/workflows/release.yml) for details.
|
||||
|
||||
@@ -197,7 +197,7 @@ The `server.json` in this directory is a reference file. The actual registry met
|
||||
- [Documentation](https://docs.kubeshark.com/en/mcp)
|
||||
- [GitHub](https://github.com/kubeshark/kubeshark)
|
||||
- [Website](https://kubeshark.com)
|
||||
- [MCP Registry](https://registry.mcp.io)
|
||||
- [MCP Registry](https://registry.modelcontextprotocol.io/)
|
||||
|
||||
## License
|
||||
|
||||
|
||||
331
skills/kfl/SKILL.md
Normal file
331
skills/kfl/SKILL.md
Normal file
@@ -0,0 +1,331 @@
|
||||
---
|
||||
name: kfl
|
||||
description: >
|
||||
KFL2 (Kubeshark Filter Language) expert. Use this skill whenever the user needs to
|
||||
write, debug, or optimize KFL filters for Kubeshark traffic queries. Trigger on any
|
||||
mention of KFL, CEL filters, traffic filtering, display filters, query syntax,
|
||||
filter expressions, "how do I filter", "show me only", "find traffic where",
|
||||
protocol-specific queries (HTTP status codes, DNS lookups, Redis commands, Kafka topics),
|
||||
Kubernetes-aware filtering (by namespace, pod, service, label, annotation),
|
||||
L4 connection/flow filters, capture source filters, time-based queries, or any
|
||||
request to slice/search/narrow network traffic in Kubeshark. Also trigger when other
|
||||
skills need help constructing filters — KFL is the query language for all Kubeshark
|
||||
traffic analysis.
|
||||
---
|
||||
|
||||
# KFL2 — Kubeshark Filter Language
|
||||
|
||||
You are a KFL2 expert. KFL2 is built on Google's CEL (Common Expression Language)
|
||||
and is the query language for all Kubeshark traffic analysis. It operates as a
|
||||
**display filter** — it doesn't affect what's captured, only what you see.
|
||||
|
||||
Think of KFL the way you think of SQL for databases or Google search syntax for
|
||||
the web. Kubeshark captures and indexes all cluster traffic; KFL is how you
|
||||
search it.
|
||||
|
||||
For the complete variable and field reference, see `references/kfl2-reference.md`.
|
||||
|
||||
## Core Syntax
|
||||
|
||||
KFL expressions are boolean CEL expressions. An empty filter matches everything.
|
||||
|
||||
### Operators
|
||||
|
||||
| Category | Operators |
|
||||
|----------|-----------|
|
||||
| Comparison | `==`, `!=`, `<`, `<=`, `>`, `>=` |
|
||||
| Logical | `&&`, `\|\|`, `!` |
|
||||
| Arithmetic | `+`, `-`, `*`, `/`, `%` |
|
||||
| Membership | `in` |
|
||||
| Ternary | `condition ? true_val : false_val` |
|
||||
|
||||
### String Functions
|
||||
|
||||
```
|
||||
str.contains(substring) // Substring search
|
||||
str.startsWith(prefix) // Prefix match
|
||||
str.endsWith(suffix) // Suffix match
|
||||
str.matches(regex) // Regex match
|
||||
size(str) // String length
|
||||
```
|
||||
|
||||
### Collection Functions
|
||||
|
||||
```
|
||||
size(collection) // List/map/string length
|
||||
key in map // Key existence
|
||||
map[key] // Value access
|
||||
map_get(map, key, default) // Safe access with default
|
||||
value in list // List membership
|
||||
```
|
||||
|
||||
### Time Functions
|
||||
|
||||
```
|
||||
timestamp("2026-03-14T22:00:00Z") // Parse ISO timestamp
|
||||
duration("5m") // Parse duration
|
||||
now() // Current time (snapshot at filter creation)
|
||||
```
|
||||
|
||||
## Protocol Detection
|
||||
|
||||
Boolean flags that indicate which protocol was detected. Use these as the first
|
||||
filter term — they're fast and narrow the search space immediately.
|
||||
|
||||
| Flag | Protocol | Flag | Protocol |
|
||||
|------|----------|------|----------|
|
||||
| `http` | HTTP/1.1, HTTP/2 | `redis` | Redis |
|
||||
| `dns` | DNS | `kafka` | Kafka |
|
||||
| `tls` | TLS/SSL | `amqp` | AMQP |
|
||||
| `tcp` | TCP | `ldap` | LDAP |
|
||||
| `udp` | UDP | `ws` | WebSocket |
|
||||
| `sctp` | SCTP | `gql` | GraphQL (v1+v2) |
|
||||
| `icmp` | ICMP | `gqlv1` / `gqlv2` | GraphQL version-specific |
|
||||
| `radius` | RADIUS | `conn` / `flow` | L4 connection/flow tracking |
|
||||
| `diameter` | Diameter | `tcp_conn` / `udp_conn` | Transport-specific connections |
|
||||
|
||||
## Kubernetes Context
|
||||
|
||||
The most common starting point. Filter by where traffic originates or terminates.
|
||||
|
||||
### Pod and Service Fields
|
||||
|
||||
```
|
||||
src.pod.name == "orders-594487879c-7ddxf"
|
||||
dst.pod.namespace == "production"
|
||||
src.service.name == "api-gateway"
|
||||
dst.service.namespace == "payments"
|
||||
```
|
||||
|
||||
Pod fields fall back to service data when pod info is unavailable, so
|
||||
`dst.pod.namespace` works even for service-level entries.
|
||||
|
||||
### Aggregate Collections
|
||||
|
||||
Match against any direction (src or dst):
|
||||
|
||||
```
|
||||
"production" in namespaces // Any namespace match
|
||||
"orders" in pods // Any pod name match
|
||||
"api-gateway" in services // Any service name match
|
||||
```
|
||||
|
||||
### Labels and Annotations
|
||||
|
||||
```
|
||||
local_labels["app"] == "checkout"
|
||||
remote_labels["version"] == "canary"
|
||||
"tier" in local_labels // Label existence
|
||||
map_get(local_labels, "env", "") == "prod" // Safe access
|
||||
```
|
||||
|
||||
### Node and Process
|
||||
|
||||
```
|
||||
node_name == "ip-10-0-25-170.ec2.internal"
|
||||
local_process_name == "nginx"
|
||||
remote_process_name.contains("postgres")
|
||||
```
|
||||
|
||||
### DNS Resolution
|
||||
|
||||
```
|
||||
src.dns == "api.example.com"
|
||||
dst.dns.contains("redis")
|
||||
```
|
||||
|
||||
## HTTP Filtering
|
||||
|
||||
HTTP is the most common protocol for API-level investigation.
|
||||
|
||||
### Fields
|
||||
|
||||
| Field | Type | Example |
|
||||
|-------|------|---------|
|
||||
| `method` | string | `"GET"`, `"POST"`, `"PUT"`, `"DELETE"` |
|
||||
| `url` | string | Full path + query: `"/api/users?id=123"` |
|
||||
| `path` | string | Path only: `"/api/users"` |
|
||||
| `status_code` | int | `200`, `404`, `500` |
|
||||
| `http_version` | string | `"HTTP/1.1"`, `"HTTP/2"` |
|
||||
| `request.headers` | map | `request.headers["content-type"]` |
|
||||
| `response.headers` | map | `response.headers["server"]` |
|
||||
| `request.cookies` | map | `request.cookies["session"]` |
|
||||
| `response.cookies` | map | `response.cookies["token"]` |
|
||||
| `query_string` | map | `query_string["id"]` |
|
||||
| `request_body_size` | int | Request body bytes |
|
||||
| `response_body_size` | int | Response body bytes |
|
||||
| `elapsed_time` | int | Duration in **microseconds** |
|
||||
|
||||
### Common Patterns
|
||||
|
||||
```
|
||||
// Error investigation
|
||||
http && status_code >= 500 // Server errors
|
||||
http && status_code == 429 // Rate limiting
|
||||
http && status_code >= 400 && status_code < 500 // Client errors
|
||||
|
||||
// Endpoint targeting
|
||||
http && method == "POST" && path.contains("/orders")
|
||||
http && url.matches(".*/api/v[0-9]+/users.*")
|
||||
|
||||
// Performance
|
||||
http && elapsed_time > 5000000 // > 5 seconds
|
||||
http && response_body_size > 1000000 // > 1MB responses
|
||||
|
||||
// Header inspection
|
||||
http && "authorization" in request.headers
|
||||
http && request.headers["content-type"] == "application/json"
|
||||
|
||||
// GraphQL (subset of HTTP)
|
||||
gql && method == "POST" && status_code >= 400
|
||||
```
|
||||
|
||||
## DNS Filtering
|
||||
|
||||
DNS issues are often the hidden root cause of outages.
|
||||
|
||||
| Field | Type | Description |
|
||||
|-------|------|-------------|
|
||||
| `dns_questions` | []string | Question domain names |
|
||||
| `dns_answers` | []string | Answer domain names |
|
||||
| `dns_question_types` | []string | Record types: A, AAAA, CNAME, MX, TXT, SRV, PTR |
|
||||
| `dns_request` | bool | Is request |
|
||||
| `dns_response` | bool | Is response |
|
||||
| `dns_request_length` | int | Request size |
|
||||
| `dns_response_length` | int | Response size |
|
||||
|
||||
```
|
||||
dns && "api.external-service.com" in dns_questions
|
||||
dns && dns_response && status_code != 0 // Failed lookups
|
||||
dns && "A" in dns_question_types // A record queries
|
||||
dns && size(dns_questions) > 1 // Multi-question
|
||||
```
|
||||
|
||||
## Database and Messaging Protocols
|
||||
|
||||
### Redis
|
||||
|
||||
```
|
||||
redis && redis_type == "GET" // Command type
|
||||
redis && redis_key.startsWith("session:") // Key pattern
|
||||
redis && redis_command.contains("DEL") // Command search
|
||||
redis && redis_total_size > 10000 // Large operations
|
||||
```
|
||||
|
||||
### Kafka
|
||||
|
||||
```
|
||||
kafka && kafka_api_key_name == "PRODUCE" // Produce operations
|
||||
kafka && kafka_client_id == "payment-processor" // Client filtering
|
||||
kafka && kafka_request_summary.contains("orders") // Topic filtering
|
||||
kafka && kafka_size > 10000 // Large messages
|
||||
```
|
||||
|
||||
### AMQP
|
||||
|
||||
```
|
||||
amqp && amqp_method == "basic.publish"
|
||||
amqp && amqp_summary.contains("order")
|
||||
```
|
||||
|
||||
### LDAP
|
||||
|
||||
```
|
||||
ldap && ldap_type == "bind" // Bind requests
|
||||
ldap && ldap_summary.contains("search")
|
||||
```
|
||||
|
||||
## Transport Layer (L4)
|
||||
|
||||
### TCP/UDP Fields
|
||||
|
||||
```
|
||||
tcp && tcp_error_type != "" // TCP errors
|
||||
udp && udp_length > 1000 // Large UDP packets
|
||||
```
|
||||
|
||||
### Connection Tracking
|
||||
|
||||
```
|
||||
conn && conn_state == "open" // Active connections
|
||||
conn && conn_local_bytes > 1000000 // High-volume
|
||||
conn && "HTTP" in conn_l7_detected // L7 protocol detection
|
||||
tcp_conn && conn_state == "closed" // Closed TCP connections
|
||||
```
|
||||
|
||||
### Flow Tracking (with Rate Metrics)
|
||||
|
||||
```
|
||||
flow && flow_local_pps > 1000 // High packet rate
|
||||
flow && flow_local_bps > 1000000 // High bandwidth
|
||||
flow && flow_state == "closed" && "TLS" in flow_l7_detected
|
||||
tcp_flow && flow_local_bps > 5000000 // High-throughput TCP
|
||||
```
|
||||
|
||||
## Network Layer
|
||||
|
||||
```
|
||||
src.ip == "10.0.53.101"
|
||||
dst.ip.startsWith("192.168.")
|
||||
src.port == 8080
|
||||
dst.port >= 8000 && dst.port <= 9000
|
||||
```
|
||||
|
||||
## Capture Source
|
||||
|
||||
Filter by how traffic was captured:
|
||||
|
||||
```
|
||||
capture_source == "ebpf" // eBPF captured
|
||||
capture_source == "ebpf_tls" // TLS decryption via eBPF
|
||||
capture_source == "af_packet" // AF_PACKET captured
|
||||
capture_backend == "ebpf" // eBPF backend family
|
||||
```
|
||||
|
||||
## Time-Based Filtering
|
||||
|
||||
```
|
||||
timestamp > timestamp("2026-03-14T22:00:00Z")
|
||||
timestamp >= timestamp("2026-03-14T22:00:00Z") && timestamp <= timestamp("2026-03-14T23:00:00Z")
|
||||
timestamp > now() - duration("5m") // Last 5 minutes
|
||||
elapsed_time > 2000000 // Older than 2 seconds
|
||||
```
|
||||
|
||||
## Building Filters: Progressive Narrowing
|
||||
|
||||
The most effective investigation technique — start broad, add constraints:
|
||||
|
||||
```
|
||||
// Step 1: Protocol + namespace
|
||||
http && dst.pod.namespace == "production"
|
||||
|
||||
// Step 2: Add error condition
|
||||
http && dst.pod.namespace == "production" && status_code >= 500
|
||||
|
||||
// Step 3: Narrow to service
|
||||
http && dst.pod.namespace == "production" && status_code >= 500 && dst.service.name == "payment-service"
|
||||
|
||||
// Step 4: Narrow to endpoint
|
||||
http && dst.pod.namespace == "production" && status_code >= 500 && dst.service.name == "payment-service" && path.contains("/charge")
|
||||
|
||||
// Step 5: Add timing
|
||||
http && dst.pod.namespace == "production" && status_code >= 500 && dst.service.name == "payment-service" && path.contains("/charge") && elapsed_time > 2000000
|
||||
```
|
||||
|
||||
## Performance Tips
|
||||
|
||||
1. **Protocol flags first** — `http && ...` is faster than `... && http`
|
||||
2. **`startsWith`/`endsWith` over `contains`** — prefix/suffix checks are faster
|
||||
3. **Specific ports before string ops** — `dst.port == 80` is cheaper than `url.contains(...)`
|
||||
4. **Use `map_get` for labels** — avoids errors on missing keys
|
||||
5. **Keep filters simple** — CEL short-circuits on `&&`, so put cheap checks first
|
||||
|
||||
## Type Safety
|
||||
|
||||
KFL2 is statically typed. Common gotchas:
|
||||
|
||||
- `status_code` is `int`, not string — use `status_code == 200`, not `"200"`
|
||||
- `elapsed_time` is in **microseconds** — 5 seconds = `5000000`
|
||||
- `timestamp` requires `timestamp()` function — not a raw string
|
||||
- Map access on missing keys errors — use `key in map` or `map_get()` first
|
||||
- List membership uses `value in list` — not `list.contains(value)`
|
||||
375
skills/kfl/references/kfl2-reference.md
Normal file
375
skills/kfl/references/kfl2-reference.md
Normal file
@@ -0,0 +1,375 @@
|
||||
# KFL2 Complete Variable and Field Reference
|
||||
|
||||
This is the exhaustive reference for every variable available in KFL2 filters.
|
||||
KFL2 is built on Google's CEL (Common Expression Language) and evaluates against
|
||||
Kubeshark's protobuf-based `BaseEntry` structure.
|
||||
|
||||
## Network-Level Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `src.ip` | string | Source IP address | `"10.0.53.101"` |
|
||||
| `dst.ip` | string | Destination IP address | `"192.168.1.1"` |
|
||||
| `src.port` | int | Source port number | `43210` |
|
||||
| `dst.port` | int | Destination port number | `8080` |
|
||||
| `protocol` | string | Detected protocol type | `"HTTP"`, `"DNS"` |
|
||||
|
||||
## Identity and Metadata Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `id` | int | BaseEntry unique identifier (assigned by sniffer) |
|
||||
| `node_id` | string | Node identifier (assigned by hub) |
|
||||
| `index` | int | Entry index for stream uniqueness |
|
||||
| `stream` | string | Stream identifier (hex string) |
|
||||
| `timestamp` | timestamp | Event time (UTC), use with `timestamp()` function |
|
||||
| `elapsed_time` | int | Age since timestamp in microseconds |
|
||||
| `worker` | string | Worker identifier |
|
||||
|
||||
## Cross-Reference Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `conn_id` | int | L7 to L4 connection cross-reference ID |
|
||||
| `flow_id` | int | L7 to L4 flow cross-reference ID |
|
||||
| `has_pcap` | bool | Whether PCAP data is available for this entry |
|
||||
|
||||
## Capture Source Variables
|
||||
|
||||
| Variable | Type | Description | Values |
|
||||
|----------|------|-------------|--------|
|
||||
| `capture_source` | string | Canonical capture source | `"unspecified"`, `"af_packet"`, `"ebpf"`, `"ebpf_tls"` |
|
||||
| `capture_backend` | string | Backend family | `"af_packet"`, `"ebpf"` |
|
||||
| `capture_source_code` | int | Numeric enum | 0=unspecified, 1=af_packet, 2=ebpf, 3=ebpf_tls |
|
||||
| `capture` | map | Nested map access | `capture["source"]`, `capture["backend"]` |
|
||||
|
||||
## Protocol Detection Flags
|
||||
|
||||
Boolean variables indicating detected protocol. Use as first filter term for performance.
|
||||
|
||||
| Variable | Protocol | Variable | Protocol |
|
||||
|----------|----------|----------|----------|
|
||||
| `http` | HTTP/1.1, HTTP/2 | `redis` | Redis |
|
||||
| `dns` | DNS | `kafka` | Kafka |
|
||||
| `tls` | TLS/SSL handshake | `amqp` | AMQP messaging |
|
||||
| `tcp` | TCP transport | `ldap` | LDAP directory |
|
||||
| `udp` | UDP transport | `ws` | WebSocket |
|
||||
| `sctp` | SCTP streaming | `gql` | GraphQL (v1 or v2) |
|
||||
| `icmp` | ICMP | `gqlv1` | GraphQL v1 only |
|
||||
| `radius` | RADIUS auth | `gqlv2` | GraphQL v2 only |
|
||||
| `diameter` | Diameter | `conn` | L4 connection tracking |
|
||||
| `flow` | L4 flow tracking | `tcp_conn` | TCP connection tracking |
|
||||
| `tcp_flow` | TCP flow tracking | `udp_conn` | UDP connection tracking |
|
||||
| `udp_flow` | UDP flow tracking | | |
|
||||
|
||||
## HTTP Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `method` | string | HTTP method | `"GET"`, `"POST"`, `"PUT"`, `"DELETE"`, `"PATCH"` |
|
||||
| `url` | string | Full URL path and query string | `"/api/users?id=123"` |
|
||||
| `path` | string | URL path component (no query) | `"/api/users"` |
|
||||
| `status_code` | int | HTTP response status code | `200`, `404`, `500` |
|
||||
| `http_version` | string | HTTP protocol version | `"HTTP/1.1"`, `"HTTP/2"` |
|
||||
| `query_string` | map[string]string | Parsed URL query parameters | `query_string["id"]` → `"123"` |
|
||||
| `request.headers` | map[string]string | Request HTTP headers | `request.headers["content-type"]` |
|
||||
| `response.headers` | map[string]string | Response HTTP headers | `response.headers["server"]` |
|
||||
| `request.cookies` | map[string]string | Request cookies | `request.cookies["session"]` |
|
||||
| `response.cookies` | map[string]string | Response cookies | `response.cookies["token"]` |
|
||||
| `request_headers_size` | int | Request headers size in bytes | |
|
||||
| `request_body_size` | int | Request body size in bytes | |
|
||||
| `response_headers_size` | int | Response headers size in bytes | |
|
||||
| `response_body_size` | int | Response body size in bytes | |
|
||||
|
||||
GraphQL requests have `gql` (or `gqlv1`/`gqlv2`) set to true and all HTTP
|
||||
variables available.
|
||||
|
||||
## DNS Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `dns_questions` | []string | Question domain names (request + response) | `["example.com"]` |
|
||||
| `dns_answers` | []string | Answer domain names | `["1.2.3.4"]` |
|
||||
| `dns_question_types` | []string | Record types in questions | `["A"]`, `["AAAA"]`, `["CNAME"]` |
|
||||
| `dns_request` | bool | Is DNS request message | |
|
||||
| `dns_response` | bool | Is DNS response message | |
|
||||
| `dns_request_length` | int | DNS request size in bytes (0 if absent) | |
|
||||
| `dns_response_length` | int | DNS response size in bytes (0 if absent) | |
|
||||
| `dns_total_size` | int | Sum of request + response sizes | |
|
||||
|
||||
Supported question types: A, AAAA, NS, CNAME, SOA, MX, TXT, SRV, PTR, ANY.
|
||||
|
||||
## TLS Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `tls` | bool | TLS payload detected | |
|
||||
| `tls_summary` | string | TLS handshake summary | `"ClientHello"`, `"ServerHello"` |
|
||||
| `tls_info` | string | TLS connection details | `"TLS 1.3, AES-256-GCM"` |
|
||||
| `tls_request_size` | int | TLS request size in bytes | |
|
||||
| `tls_response_size` | int | TLS response size in bytes | |
|
||||
| `tls_total_size` | int | Sum of request + response (computed if not provided) | |
|
||||
|
||||
## TCP Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `tcp` | bool | TCP payload detected |
|
||||
| `tcp_method` | string | TCP method information |
|
||||
| `tcp_payload` | bytes | Raw TCP payload data |
|
||||
| `tcp_error_type` | string | TCP error type (empty if none) |
|
||||
| `tcp_error_message` | string | TCP error message (empty if none) |
|
||||
|
||||
## UDP Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `udp` | bool | UDP payload detected |
|
||||
| `udp_length` | int | UDP packet length |
|
||||
| `udp_checksum` | int | UDP checksum value |
|
||||
| `udp_payload` | bytes | Raw UDP payload data |
|
||||
|
||||
## SCTP Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `sctp` | bool | SCTP payload detected |
|
||||
| `sctp_checksum` | int | SCTP checksum value |
|
||||
| `sctp_chunk_type` | string | SCTP chunk type |
|
||||
| `sctp_length` | int | SCTP chunk length |
|
||||
|
||||
## ICMP Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `icmp` | bool | ICMP payload detected |
|
||||
| `icmp_type` | string | ICMP type code |
|
||||
| `icmp_version` | int | ICMP version (4 or 6) |
|
||||
| `icmp_length` | int | ICMP message length |
|
||||
|
||||
## WebSocket Variables
|
||||
|
||||
| Variable | Type | Description | Values |
|
||||
|----------|------|-------------|--------|
|
||||
| `ws` | bool | WebSocket payload detected | |
|
||||
| `ws_opcode` | string | WebSocket operation code | `"text"`, `"binary"`, `"close"`, `"ping"`, `"pong"` |
|
||||
| `ws_request` | bool | Is WebSocket request | |
|
||||
| `ws_response` | bool | Is WebSocket response | |
|
||||
| `ws_request_payload_data` | string | Request payload (safely truncated) | |
|
||||
| `ws_request_payload_length` | int | Request payload length in bytes | |
|
||||
| `ws_response_payload_length` | int | Response payload length in bytes | |
|
||||
|
||||
## Redis Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `redis` | bool | Redis payload detected | |
|
||||
| `redis_type` | string | Redis command verb | `"GET"`, `"SET"`, `"DEL"`, `"HGET"` |
|
||||
| `redis_command` | string | Full Redis command line | `"GET session:1234"` |
|
||||
| `redis_key` | string | Key (truncated to 64 bytes) | `"session:1234"` |
|
||||
| `redis_request_size` | int | Request size (0 if absent) | |
|
||||
| `redis_response_size` | int | Response size (0 if absent) | |
|
||||
| `redis_total_size` | int | Sum of request + response | |
|
||||
|
||||
## Kafka Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `kafka` | bool | Kafka payload detected | |
|
||||
| `kafka_api_key` | int | Kafka API key number | 0=FETCH, 1=PRODUCE |
|
||||
| `kafka_api_key_name` | string | Human-readable API operation | `"PRODUCE"`, `"FETCH"` |
|
||||
| `kafka_client_id` | string | Kafka client identifier | `"payment-processor"` |
|
||||
| `kafka_size` | int | Message size (request preferred, else response) | |
|
||||
| `kafka_request` | bool | Is Kafka request | |
|
||||
| `kafka_response` | bool | Is Kafka response | |
|
||||
| `kafka_request_summary` | string | Request summary/topic | `"orders-topic"` |
|
||||
| `kafka_request_size` | int | Request size (0 if absent) | |
|
||||
| `kafka_response_size` | int | Response size (0 if absent) | |
|
||||
|
||||
## AMQP Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `amqp` | bool | AMQP payload detected | |
|
||||
| `amqp_method` | string | AMQP method name | `"basic.publish"`, `"channel.open"` |
|
||||
| `amqp_summary` | string | Operation summary | |
|
||||
| `amqp_request` | bool | Is AMQP request | |
|
||||
| `amqp_response` | bool | Is AMQP response | |
|
||||
| `amqp_request_length` | int | Request length (0 if absent) | |
|
||||
| `amqp_response_length` | int | Response length (0 if absent) | |
|
||||
| `amqp_total_size` | int | Sum of request + response | |
|
||||
|
||||
## LDAP Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `ldap` | bool | LDAP payload detected |
|
||||
| `ldap_type` | string | LDAP operation type (request preferred) |
|
||||
| `ldap_summary` | string | Operation summary |
|
||||
| `ldap_request` | bool | Is LDAP request |
|
||||
| `ldap_response` | bool | Is LDAP response |
|
||||
| `ldap_request_length` | int | Request length (0 if absent) |
|
||||
| `ldap_response_length` | int | Response length (0 if absent) |
|
||||
| `ldap_total_size` | int | Sum of request + response |
|
||||
|
||||
## RADIUS Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `radius` | bool | RADIUS payload detected | |
|
||||
| `radius_code` | int | RADIUS code (request preferred) | |
|
||||
| `radius_code_name` | string | Code name | `"Access-Request"` |
|
||||
| `radius_request` | bool | Is RADIUS request | |
|
||||
| `radius_response` | bool | Is RADIUS response | |
|
||||
| `radius_request_authenticator` | string | Request authenticator (hex) | |
|
||||
| `radius_request_length` | int | Request size (0 if absent) | |
|
||||
| `radius_response_length` | int | Response size (0 if absent) | |
|
||||
| `radius_total_size` | int | Sum of request + response | |
|
||||
|
||||
## Diameter Variables
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `diameter` | bool | Diameter payload detected |
|
||||
| `diameter_method` | string | Method name (request preferred) |
|
||||
| `diameter_summary` | string | Operation summary |
|
||||
| `diameter_request` | bool | Is Diameter request |
|
||||
| `diameter_response` | bool | Is Diameter response |
|
||||
| `diameter_request_length` | int | Request size (0 if absent) |
|
||||
| `diameter_response_length` | int | Response size (0 if absent) |
|
||||
| `diameter_total_size` | int | Sum of request + response |
|
||||
|
||||
## L4 Connection Tracking Variables
|
||||
|
||||
| Variable | Type | Description | Example |
|
||||
|----------|------|-------------|---------|
|
||||
| `conn` | bool | Connection tracking entry | |
|
||||
| `conn_state` | string | Connection state | `"open"`, `"in_progress"`, `"closed"` |
|
||||
| `conn_local_pkts` | int | Packets from local peer | |
|
||||
| `conn_local_bytes` | int | Bytes from local peer | |
|
||||
| `conn_remote_pkts` | int | Packets from remote peer | |
|
||||
| `conn_remote_bytes` | int | Bytes from remote peer | |
|
||||
| `conn_l7_detected` | []string | L7 protocols detected on connection | `["HTTP", "TLS"]` |
|
||||
| `conn_group_id` | int | Connection group identifier | |
|
||||
|
||||
## L4 Flow Tracking Variables
|
||||
|
||||
Flows extend connections with rate metrics (packets/bytes per second).
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `flow` | bool | Flow tracking entry |
|
||||
| `flow_state` | string | Flow state (`"open"`, `"in_progress"`, `"closed"`) |
|
||||
| `flow_local_pkts` | int | Packets from local peer |
|
||||
| `flow_local_bytes` | int | Bytes from local peer |
|
||||
| `flow_remote_pkts` | int | Packets from remote peer |
|
||||
| `flow_remote_bytes` | int | Bytes from remote peer |
|
||||
| `flow_local_pps` | int | Local packets per second |
|
||||
| `flow_local_bps` | int | Local bytes per second |
|
||||
| `flow_remote_pps` | int | Remote packets per second |
|
||||
| `flow_remote_bps` | int | Remote bytes per second |
|
||||
| `flow_l7_detected` | []string | L7 protocols detected on flow |
|
||||
| `flow_group_id` | int | Flow group identifier |
|
||||
|
||||
## Kubernetes Variables
|
||||
|
||||
### Pod and Service (Directional)
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `src.pod.name` | string | Source pod name |
|
||||
| `src.pod.namespace` | string | Source pod namespace |
|
||||
| `dst.pod.name` | string | Destination pod name |
|
||||
| `dst.pod.namespace` | string | Destination pod namespace |
|
||||
| `src.service.name` | string | Source service name |
|
||||
| `src.service.namespace` | string | Source service namespace |
|
||||
| `dst.service.name` | string | Destination service name |
|
||||
| `dst.service.namespace` | string | Destination service namespace |
|
||||
|
||||
**Fallback behavior**: Pod namespace/name fields automatically fall back to
|
||||
service data when pod info is unavailable. This means `dst.pod.namespace` works
|
||||
even when only service-level resolution exists.
|
||||
|
||||
### Aggregate Collections (Non-Directional)
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `namespaces` | []string | All namespaces (src + dst, pod + service) |
|
||||
| `pods` | []string | All pod names (src + dst) |
|
||||
| `services` | []string | All service names (src + dst) |
|
||||
|
||||
### Labels and Annotations
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `local_labels` | map[string]string | Kubernetes labels of local peer |
|
||||
| `local_annotations` | map[string]string | Kubernetes annotations of local peer |
|
||||
| `remote_labels` | map[string]string | Kubernetes labels of remote peer |
|
||||
| `remote_annotations` | map[string]string | Kubernetes annotations of remote peer |
|
||||
|
||||
Use `map_get(local_labels, "key", "default")` for safe access that won't error
|
||||
on missing keys.
|
||||
|
||||
### Node Information
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `node` | map | Nested: `node["name"]`, `node["ip"]` |
|
||||
| `node_name` | string | Node name (flat alias) |
|
||||
| `node_ip` | string | Node IP (flat alias) |
|
||||
| `local_node_name` | string | Node name of local peer |
|
||||
| `remote_node_name` | string | Node name of remote peer |
|
||||
|
||||
### Process Information
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `local_process_name` | string | Process name on local peer |
|
||||
| `remote_process_name` | string | Process name on remote peer |
|
||||
|
||||
### DNS Resolution
|
||||
|
||||
| Variable | Type | Description |
|
||||
|----------|------|-------------|
|
||||
| `src.dns` | string | DNS resolution of source IP |
|
||||
| `dst.dns` | string | DNS resolution of destination IP |
|
||||
| `dns_resolutions` | []string | All DNS resolutions (deduplicated) |
|
||||
|
||||
### Resolution Status
|
||||
|
||||
| Variable | Type | Values |
|
||||
|----------|------|--------|
|
||||
| `local_resolution_status` | string | `""` (resolved), `"no_node_mapping"`, `"rpc_error"`, `"rpc_empty"`, `"cache_miss"`, `"queue_full"` |
|
||||
| `remote_resolution_status` | string | Same as above |
|
||||
|
||||
## Default Values
|
||||
|
||||
When a variable is not present in an entry, KFL2 uses these defaults:
|
||||
|
||||
| Type | Default |
|
||||
|------|---------|
|
||||
| string | `""` |
|
||||
| int | `0` |
|
||||
| bool | `false` |
|
||||
| list | `[]` |
|
||||
| map | `{}` |
|
||||
| bytes | `[]` |
|
||||
|
||||
## Protocol Variable Precedence
|
||||
|
||||
For protocols with request/response pairs (Kafka, RADIUS, Diameter), merged
|
||||
fields prefer the **request** side. If no request exists, the response value
|
||||
is used. Size totals are always computed as `request_size + response_size`.
|
||||
|
||||
## CEL Language Features
|
||||
|
||||
KFL2 supports the full CEL specification:
|
||||
|
||||
- **Short-circuit evaluation**: `&&` stops on first false, `||` stops on first true
|
||||
- **Ternary**: `condition ? value_if_true : value_if_false`
|
||||
- **Regex**: `str.matches("pattern")` uses RE2 syntax
|
||||
- **Type coercion**: Timestamps require `timestamp()`, durations require `duration()`
|
||||
- **Null safety**: Use `in` operator or `map_get()` before accessing map keys
|
||||
|
||||
For the full CEL specification, see the
|
||||
[CEL Language Definition](https://github.com/google/cel-spec/blob/master/doc/langdef.md).
|
||||
409
skills/network-rca/SKILL.md
Normal file
409
skills/network-rca/SKILL.md
Normal file
@@ -0,0 +1,409 @@
|
||||
---
|
||||
name: network-rca
|
||||
description: >
|
||||
Kubernetes network root cause analysis skill powered by Kubeshark MCP. Use this skill
|
||||
whenever the user wants to investigate past incidents, perform retrospective traffic
|
||||
analysis, take or manage traffic snapshots, extract PCAPs, dissect L7 API calls from
|
||||
historical captures, compare traffic patterns over time, detect drift or anomalies
|
||||
between snapshots, or do any kind of forensic network analysis in Kubernetes.
|
||||
Also trigger when the user mentions snapshots, raw capture, PCAP extraction,
|
||||
traffic replay, postmortem analysis, "what happened yesterday/last week",
|
||||
root cause analysis, RCA, cloud snapshot storage, snapshot dissection, or KFL filters
|
||||
for historical traffic. Even if the user just says "figure out what went wrong"
|
||||
or "compare today's traffic to yesterday" in a Kubernetes context, use this skill.
|
||||
---
|
||||
|
||||
# Network Root Cause Analysis with Kubeshark MCP
|
||||
|
||||
You are a Kubernetes network forensics specialist. Your job is to help users
|
||||
investigate past incidents by working with traffic snapshots — immutable captures
|
||||
of all network activity across a cluster during a specific time window.
|
||||
|
||||
Kubeshark is a search engine for network traffic. Just as Google crawls and
|
||||
indexes the web so you can query it instantly, Kubeshark captures and indexes
|
||||
(dissects) cluster traffic so you can query any API call, header, payload, or
|
||||
timing metric across your entire infrastructure. Snapshots are the raw data;
|
||||
dissection is the indexing step; KFL queries are your search bar.
|
||||
|
||||
Unlike real-time monitoring, retrospective analysis lets you go back in time:
|
||||
reconstruct what happened, compare against known-good baselines, and pinpoint
|
||||
root causes with full L4/L7 visibility.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before starting any analysis, verify the environment is ready.
|
||||
|
||||
### Kubeshark MCP Health Check
|
||||
|
||||
Confirm the Kubeshark MCP is accessible and tools are available. Look for tools
|
||||
like `list_api_calls`, `list_l4_flows`, `create_snapshot`, etc.
|
||||
|
||||
**Tool**: `check_kubeshark_status`
|
||||
|
||||
If tools like `list_api_calls` or `list_l4_flows` are missing from the response,
|
||||
something is wrong with the MCP connection. Guide the user through setup
|
||||
(see Setup Reference at the bottom).
|
||||
|
||||
### Raw Capture Must Be Enabled
|
||||
|
||||
Retrospective analysis depends on raw capture — Kubeshark's kernel-level (eBPF)
|
||||
packet recording that stores traffic at the node level. Without it, snapshots
|
||||
have nothing to work with.
|
||||
|
||||
Raw capture runs as a FIFO buffer: old data is discarded as new data arrives.
|
||||
The buffer size determines how far back you can go. Larger buffer = wider
|
||||
snapshot window.
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
capture:
|
||||
raw:
|
||||
enabled: true
|
||||
storageSize: 10Gi # Per-node FIFO buffer
|
||||
```
|
||||
|
||||
If raw capture isn't enabled, inform the user that retrospective analysis
|
||||
requires it and share the configuration above.
|
||||
|
||||
### Snapshot Storage
|
||||
|
||||
Snapshots are assembled on the Hub's storage, which is ephemeral by default.
|
||||
For serious forensic work, persistent storage is recommended:
|
||||
|
||||
```yaml
|
||||
tap:
|
||||
snapshots:
|
||||
local:
|
||||
storageClass: gp2
|
||||
storageSize: 1000Gi
|
||||
```
|
||||
|
||||
## Core Workflow
|
||||
|
||||
The general flow for any RCA investigation:
|
||||
|
||||
1. **Determine time window** — When did the issue occur? Use `get_data_boundaries`
|
||||
to see what raw capture data is available.
|
||||
2. **Create or locate a snapshot** — Either take a new snapshot covering the
|
||||
incident window, or find an existing one with `list_snapshots`.
|
||||
3. **Dissect the snapshot** — Activate L7 dissection so you can query API calls,
|
||||
not just raw packets.
|
||||
4. **Investigate** — Use KFL filters to slice through the traffic. Start broad,
|
||||
narrow progressively.
|
||||
5. **Extract evidence** — Export filtered PCAPs, resolve workload IPs, pull
|
||||
specific API call details.
|
||||
6. **Compare** (optional) — Diff against a known-good snapshot to identify
|
||||
what changed.
|
||||
|
||||
## Snapshot Operations
|
||||
|
||||
### Check Data Boundaries
|
||||
|
||||
Before creating a snapshot, check what raw capture data exists across the cluster.
|
||||
|
||||
**Tool**: `get_data_boundaries`
|
||||
|
||||
This returns the time window available per node. You can only create snapshots
|
||||
within these boundaries — data outside the window has already been rotated out
|
||||
of the FIFO buffer.
|
||||
|
||||
**Example response**:
|
||||
```
|
||||
Cluster-wide:
|
||||
Oldest: 2026-03-14 16:12:34 UTC
|
||||
Newest: 2026-03-14 18:05:20 UTC
|
||||
|
||||
Per node:
|
||||
┌─────────────────────────────┬──────────┬──────────┐
|
||||
│ Node │ Oldest │ Newest │
|
||||
├─────────────────────────────┼──────────┼──────────┤
|
||||
│ ip-10-0-25-170.ec2.internal │ 16:12:34 │ 18:03:39 │
|
||||
│ ip-10-0-32-115.ec2.internal │ 16:13:45 │ 18:05:20 │
|
||||
└─────────────────────────────┴──────────┴──────────┘
|
||||
```
|
||||
|
||||
If the user's incident falls outside the available window, let them know the
|
||||
data has been rotated out. Suggest increasing `storageSize` for future coverage.
|
||||
|
||||
### Create a Snapshot
|
||||
|
||||
**Tool**: `create_snapshot`
|
||||
|
||||
Specify nodes (or cluster-wide) and a time window within the data boundaries.
|
||||
Snapshots include everything needed to reconstruct the traffic picture:
|
||||
raw capture files, Kubernetes pod events, and eBPF cgroup events.
|
||||
|
||||
Snapshots take time to build. After creating one, check its status.
|
||||
|
||||
**Tool**: `get_snapshot`
|
||||
|
||||
Wait until status is `completed` before proceeding with dissection or PCAP export.
|
||||
|
||||
### List Existing Snapshots
|
||||
|
||||
**Tool**: `list_snapshots`
|
||||
|
||||
Shows all snapshots on the local Hub, with name, size, status, and node count.
|
||||
Use this when the user wants to work with a previously captured snapshot.
|
||||
|
||||
### Cloud Storage
|
||||
|
||||
Snapshots on the Hub are ephemeral and space-limited. Cloud storage (S3, GCS,
|
||||
Azure Blob) provides long-term retention. Snapshots can be downloaded to any
|
||||
cluster with Kubeshark — not necessarily the original cluster. This means you can
|
||||
download a production snapshot to a local KinD cluster for safe analysis.
|
||||
|
||||
**Check cloud status**: `get_cloud_storage_status`
|
||||
**Upload to cloud**: `upload_snapshot_to_cloud`
|
||||
**Download from cloud**: `download_snapshot_from_cloud`
|
||||
|
||||
When cloud storage is configured, recommend uploading snapshots after analysis
|
||||
for long-term retention, especially for compliance or post-mortem documentation.
|
||||
|
||||
## L7 API Dissection
|
||||
|
||||
Think of dissection the way a search engine thinks of indexing. A raw snapshot
|
||||
is like the raw internet — billions of packets, impossible to query efficiently.
|
||||
Dissection indexes that traffic: it reconstructs packets into structured L7 API
|
||||
calls, builds a queryable database of every request, response, header, payload,
|
||||
and timing metric. Once dissected, Kubeshark becomes a search engine for your
|
||||
network traffic — you type a query (using KFL filters), and get instant,
|
||||
precise answers from terabytes of captured data.
|
||||
|
||||
Without dissection, you have PCAPs. With dissection, you have answers.
|
||||
|
||||
### Activate Dissection
|
||||
|
||||
**Tool**: `start_snapshot_dissection`
|
||||
|
||||
Dissection takes time proportional to the snapshot size — it's parsing every
|
||||
packet, reassembling streams, and building the index. After it completes,
|
||||
the full query engine is available:
|
||||
- `list_api_calls` — Search API transactions with filters (the "Google search" for your traffic)
|
||||
- `get_api_call` — Drill into a specific call (headers, body, timing)
|
||||
- `get_api_stats` — Aggregated statistics (throughput, error rates, latency)
|
||||
|
||||
### Investigation Strategy
|
||||
|
||||
Start broad, then narrow:
|
||||
|
||||
1. `get_api_stats` — Get the overall picture: error rates, latency percentiles,
|
||||
throughput. Look for spikes or anomalies.
|
||||
2. `list_api_calls` filtered by error codes (4xx, 5xx) or high latency — find
|
||||
the problematic transactions.
|
||||
3. `get_api_call` on specific calls — inspect headers, bodies, timing to
|
||||
understand what went wrong.
|
||||
4. Use KFL filters (see below) to slice the traffic by namespace, service,
|
||||
protocol, or any combination.
|
||||
|
||||
## PCAP Extraction
|
||||
|
||||
Sometimes you need the raw packets — for Wireshark analysis, sharing with
|
||||
network teams, or compliance evidence.
|
||||
|
||||
### Export a PCAP
|
||||
|
||||
**Tool**: `export_snapshot_pcap`
|
||||
|
||||
You can export the full snapshot or filter it down using:
|
||||
- **Nodes** — specific nodes only
|
||||
- **Time** — sub-window within the snapshot
|
||||
- **BPF filter** — standard Berkeley Packet Filter syntax (e.g., `host 10.0.53.101`,
|
||||
`port 8080`, `net 10.0.0.0/16`)
|
||||
|
||||
### Resolve Workload IPs
|
||||
|
||||
When you care about specific workloads but don't have their IPs, resolve them
|
||||
from the snapshot's metadata. Snapshots preserve the pod-to-IP mappings from
|
||||
capture time, so you get accurate resolution even if pods have since been
|
||||
rescheduled.
|
||||
|
||||
**Tool**: `resolve_workload`
|
||||
|
||||
**Example**: Resolve the IP of `orders-594487879c-7ddxf` from snapshot `slim-timestamp`
|
||||
→ Returns `10.0.53.101`
|
||||
|
||||
Then use that IP in a BPF filter to extract only that workload's traffic:
|
||||
`export_snapshot_pcap` with BPF `host 10.0.53.101`
|
||||
|
||||
## KFL — Kubeshark Filter Language
|
||||
|
||||
KFL2 is the query language for slicing through dissected traffic. For the
|
||||
complete KFL2 reference (all variables, operators, protocol fields, and examples),
|
||||
see the **KFL skill** (`skills/kfl/`).
|
||||
|
||||
### RCA-Specific Filter Patterns
|
||||
|
||||
Layer filters progressively when investigating an incident:
|
||||
|
||||
```
|
||||
// Step 1: Protocol + namespace
|
||||
http && dst.pod.namespace == "production"
|
||||
|
||||
// Step 2: Add error condition
|
||||
http && dst.pod.namespace == "production" && status_code >= 500
|
||||
|
||||
// Step 3: Narrow to service
|
||||
http && dst.pod.namespace == "production" && status_code >= 500 && dst.service.name == "payment-service"
|
||||
|
||||
// Step 4: Narrow to endpoint
|
||||
http && dst.pod.namespace == "production" && status_code >= 500 && dst.service.name == "payment-service" && path.contains("/charge")
|
||||
|
||||
// Step 5: Add timing
|
||||
http && dst.pod.namespace == "production" && status_code >= 500 && dst.service.name == "payment-service" && path.contains("/charge") && elapsed_time > 2000000
|
||||
```
|
||||
|
||||
Other common RCA filters:
|
||||
|
||||
```
|
||||
dns && dns_response && status_code != 0 // Failed DNS lookups
|
||||
src.service.namespace != dst.service.namespace // Cross-namespace traffic
|
||||
http && elapsed_time > 5000000 // Slow transactions (> 5s)
|
||||
conn && conn_state == "open" && conn_local_bytes > 1000000 // High-volume connections
|
||||
```
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Post-Incident RCA
|
||||
|
||||
The primary use case. Something broke, it's been resolved, and now you need
|
||||
to understand why.
|
||||
|
||||
1. Identify the incident time window from alerts, logs, or user reports
|
||||
2. Check `get_data_boundaries` — is the window still in raw capture?
|
||||
3. `create_snapshot` covering the incident window (add buffer: 15 minutes
|
||||
before and after the reported time)
|
||||
4. `start_snapshot_dissection`
|
||||
5. `get_api_stats` — look for error rate spikes, latency jumps
|
||||
6. `list_api_calls` filtered to errors — identify the failing service chain
|
||||
7. `get_api_call` on specific failures — read headers, bodies, timing
|
||||
8. Follow the dependency chain upstream until you find the originating failure
|
||||
9. Export relevant PCAPs for the post-mortem document
|
||||
|
||||
### Trend Analysis and Drift Detection
|
||||
|
||||
Take snapshots at regular intervals (daily, weekly) with consistent parameters.
|
||||
Compare them to detect:
|
||||
|
||||
- **Latency drift** — p95 latency creeping up over days
|
||||
- **API surface changes** — new endpoints appearing, old ones disappearing
|
||||
- **Error rate trends** — gradual increase in 5xx responses
|
||||
- **Traffic pattern shifts** — new service-to-service connections, volume changes
|
||||
- **Security posture regression** — unencrypted traffic appearing, new external
|
||||
connections
|
||||
|
||||
**Workflow**:
|
||||
1. `create_snapshot` with consistent parameters (same time-of-day, same duration)
|
||||
2. `start_snapshot_dissection` on each
|
||||
3. `get_api_stats` on each — compare metrics side by side
|
||||
4. `list_api_calls` with targeted KFL filters — diff the results
|
||||
5. Flag anomalies and regressions
|
||||
|
||||
This is powerful when combined with scheduled tasks — automate daily snapshot
|
||||
creation and comparison to catch drift before it becomes an incident.
|
||||
|
||||
### Forensic Evidence Preservation
|
||||
|
||||
For compliance, legal, or audit requirements:
|
||||
|
||||
1. `create_snapshot` immediately when an incident is detected
|
||||
2. `upload_snapshot_to_cloud` — immutable copy in long-term storage
|
||||
3. Document the snapshot ID, time window, and chain of custody
|
||||
4. The snapshot can be downloaded to any Kubeshark cluster for later analysis,
|
||||
even months later, even on a completely different cluster
|
||||
|
||||
### Production-to-Local Replay
|
||||
|
||||
Investigate production issues safely on a local cluster:
|
||||
|
||||
1. `create_snapshot` on the production cluster
|
||||
2. `upload_snapshot_to_cloud`
|
||||
3. On a local KinD/minikube cluster with Kubeshark: `download_snapshot_from_cloud`
|
||||
4. `start_snapshot_dissection` — full L7 analysis locally
|
||||
5. Investigate without touching production
|
||||
|
||||
## Composability
|
||||
|
||||
This skill is designed to work alongside other Kubeshark-powered skills:
|
||||
|
||||
- **API Security Skill** — Run security scans against a snapshot's dissected traffic.
|
||||
Take daily snapshots and diff security findings to detect posture drift.
|
||||
- **Incident Response Skill** — Use this skill's snapshot workflow as the evidence
|
||||
preservation and forensic analysis layer within the IR methodology.
|
||||
- **Network Engineering Skill** — Use snapshots for baseline traffic characterization
|
||||
and architecture reviews.
|
||||
|
||||
When multiple skills are loaded, they share context. A snapshot created here
|
||||
can be analyzed by the security skill's OWASP scans or the IR skill's
|
||||
7-phase methodology.
|
||||
|
||||
## Setup Reference
|
||||
|
||||
### Installing the CLI
|
||||
|
||||
**Homebrew (macOS)**:
|
||||
```bash
|
||||
brew install kubeshark
|
||||
```
|
||||
|
||||
**Linux**:
|
||||
```bash
|
||||
sh <(curl -Ls https://kubeshark.com/install)
|
||||
```
|
||||
|
||||
**From source**:
|
||||
```bash
|
||||
git clone https://github.com/kubeshark/kubeshark
|
||||
cd kubeshark && make
|
||||
```
|
||||
|
||||
### MCP Configuration
|
||||
|
||||
**Claude Desktop / Cowork** (`claude_desktop_config.json`):
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Claude Code (CLI)**:
|
||||
```bash
|
||||
claude mcp add kubeshark -- kubeshark mcp
|
||||
```
|
||||
|
||||
**Without kubectl access** (direct URL mode):
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"kubeshark": {
|
||||
"command": "kubeshark",
|
||||
"args": ["mcp", "--url", "https://kubeshark.example.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```bash
|
||||
# Claude Code equivalent:
|
||||
claude mcp add kubeshark -- kubeshark mcp --url https://kubeshark.example.com
|
||||
```
|
||||
|
||||
### Verification
|
||||
|
||||
- Claude Code: `/mcp` to check connection status
|
||||
- Terminal: `kubeshark mcp --list-tools`
|
||||
- Cluster: `kubectl get pods -l app=kubeshark-hub`
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
- **Binary not found** → Install via Homebrew or the install script above
|
||||
- **Connection refused** → Deploy Kubeshark first: `kubeshark tap`
|
||||
- **No L7 data** → Check `get_dissection_status` and `enable_dissection`
|
||||
- **Snapshot creation fails** → Verify raw capture is enabled in Kubeshark config
|
||||
- **Empty snapshot** → Check `get_data_boundaries` — the requested window may
|
||||
fall outside available data
|
||||
Reference in New Issue
Block a user