Compare commits

...

14 Commits

Author SHA1 Message Date
Alon Girmonsky
3d36c65bb0 Update .github/workflows/release-tag.yml
Co-authored-by: Volodymyr Stoiko <me@volodymyrstoiko.com>
2026-03-04 21:50:44 -08:00
Alon Girmonsky
76bcf4368c Fold release-helm into release-pr for a 2-step workflow 2026-03-04 13:20:30 -08:00
Alon Girmonsky
0af0c0f49f Fix reviewer username typo: corst -> corest 2026-03-04 13:17:53 -08:00
Alon Girmonsky
fd84bd8044 🔖 Bump the Helm chart version to 53.1.0 2026-03-04 13:14:50 -08:00
Alon Girmonsky
eb7dc42b6e Add get_file_url and download_file MCP tools (#1853)
* Reapply "Add get_file_url and download_file MCP tools"

This reverts commit a46f05c4aa.

* Use dedicated HTTP client for file downloads to support large files

The default httpClient has a 30s total timeout that would fail for
large PCAP downloads (up to 10GB). Use a separate client with only
connection-level timeouts (TLS handshake, response headers) so the
body can stream without a deadline.
2026-03-04 09:17:23 -08:00
Volodymyr Stoiko
d266408377 Add snapshots cloud storage (#1852)
* add testing values for helm chart

* Add readme updates for cloud storage

* fixes

* cloud-storage-docs

---------

Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2026-03-04 08:50:45 -08:00
sunnyraindy
40ae6c626b chore: remove duplicate package import (#1800)
Signed-off-by: sunnyraindy <sunnyraindy@outlook.com>
Co-authored-by: Volodymyr Stoiko <me@volodymyrstoiko.com>
Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
2026-03-04 08:39:32 -08:00
Alon Girmonsky
e3283327f9 Add --release-helmChartPath CLI flag for local Helm chart support (#1851)
Allow users to specify a local Helm chart folder via CLI flag or config,
which takes precedence over the KUBESHARK_HELM_CHART_PATH env variable and
the remote Helm repo. Also update nginx proxy config to disable buffering
for better streaming and large snapshot support.
2026-03-04 08:29:04 -08:00
Alon Girmonsky
a46f05c4aa Revert "Add get_file_url and download_file MCP tools"
This reverts commit dbfd17d901.
2026-03-03 15:06:52 -08:00
Alon Girmonsky
dbfd17d901 Add get_file_url and download_file MCP tools
When tools like export_snapshot_pcap return a relative file path,
the MCP client needs a way to resolve it to a full URL or download
the file locally. These two new tools bridge that gap.
2026-03-03 14:54:39 -08:00
Volodymyr Stoiko
95c18b57a4 Use dissection image tag from worker (#1850) 2026-02-25 11:41:50 -08:00
Alon Girmonsky
6fd2e4b1b2 updated gitignore (#1849) 2026-02-18 11:52:13 -08:00
Volodymyr Stoiko
686c7eba54 Adjust nginx config to work with large download/upload snapshots (#1848)
* adjust-nginx

* cleanup

* improve

* streaming
2026-02-18 10:48:57 -08:00
Ilya Gavrilov
1ad61798f6 Set tcp and udp flows timeouts. Default is 20 minutes (#1847)
* Set tcp and udp flows timeouts. Default is 10 minutes

* fix make test
2026-02-17 16:50:13 -08:00
18 changed files with 826 additions and 117 deletions

24
.github/workflows/release-tag.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: Auto-tag release
on:
pull_request:
types: [closed]
branches: [master]
jobs:
tag:
if: github.event.pull_request.merged == true && startsWith(github.event.pull_request.head.ref, 'release/v')
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- uses: actions/checkout@v5
with:
fetch-depth: 0
- name: Create and push tag
run: |
VERSION="${GITHUB_HEAD_REF#release/}"
echo "Creating tag $VERSION on master"
git tag "$VERSION"
git push origin "$VERSION"

5
.gitignore vendored
View File

@@ -63,4 +63,7 @@ bin
scripts/
# CWD config YAML
kubeshark.yaml
kubeshark.yaml
# Claude Code
CLAUDE.md

View File

@@ -242,31 +242,75 @@ proxy:
port-forward:
kubectl port-forward $$(kubectl get pods | awk '$$1 ~ /^$(POD_PREFIX)/' | awk 'END {print $$1}') $(SRC_PORT):$(DST_PORT)
release:
release: ## Print release workflow instructions.
@echo "Release workflow (2 steps):"
@echo ""
@echo " 1. make release-pr VERSION=x.y.z"
@echo " Tags sibling repos, bumps version, creates PRs"
@echo " (kubeshark + kubeshark.github.io helm chart)."
@echo " Review and merge both PRs manually."
@echo ""
@echo " 2. (automatic) Tag is created when release PR merges."
@echo " Fallback: make release-tag VERSION=x.y.z"
release-pr: ## Step 1: Tag sibling repos, bump version, create release PR.
@cd ../worker && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
@cd ../tracer && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
@cd ../hub && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
@cd ../front && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
@cd ../kubeshark && git checkout master && git pull && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make
@cd ../kubeshark && git checkout master && git pull
@sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml
@$(MAKE) build VER=$(VERSION)
@if [ "$(shell uname)" = "Darwin" ]; then \
codesign --sign - --force --preserve-metadata=entitlements,requirements,flags,runtime ./bin/kubeshark__; \
fi
@make generate-helm-values && make generate-manifests
@git add -A . && git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)" && git push
@git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
@rm -rf ../kubeshark.github.io/charts/chart && mkdir ../kubeshark.github.io/charts/chart && cp -r helm-chart/ ../kubeshark.github.io/charts/chart/
@cd ../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push
@$(MAKE) generate-helm-values && $(MAKE) generate-manifests
@git checkout -b release/v$(VERSION)
@git add -A .
@git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)"
@git push -u origin release/v$(VERSION)
@gh pr create --title ":bookmark: Release v$(VERSION)" \
--body "Automated release PR for v$(VERSION)." \
--base master \
--reviewer corest
@rm -rf ../kubeshark.github.io/charts/chart
@mkdir ../kubeshark.github.io/charts/chart
@cp -r helm-chart/ ../kubeshark.github.io/charts/chart/
@cd ../kubeshark.github.io && git checkout master && git pull \
&& git checkout -b helm-v$(VERSION) \
&& git add -A . \
&& git commit -m ":sparkles: Update the Helm chart to v$(VERSION)" \
&& git push -u origin helm-v$(VERSION) \
&& gh pr create --title ":sparkles: Helm chart v$(VERSION)" \
--body "Update Helm chart for release v$(VERSION)." \
--base master \
--reviewer corest
@cd ../kubeshark
@echo ""
@echo "Release PRs created:"
@echo " - kubeshark: Review and merge the release PR."
@echo " - kubeshark.github.io: Review and merge the helm chart PR."
@echo "Tag will be created automatically, or run: make release-tag VERSION=$(VERSION)"
release-tag: ## Step 2 (fallback): Tag master after release PR is merged.
@echo "Verifying release PR was merged..."
@if ! gh pr list --state merged --head release/v$(VERSION) --json number --jq '.[0].number' | grep -q .; then \
echo "Error: No merged PR found for release/v$(VERSION). Merge the PR first."; \
exit 1; \
fi
@git checkout master && git pull
@git tag -d v$(VERSION) 2>/dev/null; git tag v$(VERSION) && git push origin --tags
@echo ""
@echo "Tagged v$(VERSION) on master. GitHub Actions will build the release."
release-dry-run:
@cd ../worker && git checkout master && git pull
@cd ../tracer && git checkout master && git pull
# @cd ../tracer && git checkout master && git pull
@cd ../hub && git checkout master && git pull
@cd ../front && git checkout master && git pull
@cd ../kubeshark && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make
@if [ "$(shell uname)" = "Darwin" ]; then \
codesign --sign - --force --preserve-metadata=entitlements,requirements,flags,runtime ./bin/kubeshark__; \
fi
# @if [ "$(shell uname)" = "Darwin" ]; then \
# codesign --sign - --force --preserve-metadata=entitlements,requirements,flags,runtime ./bin/kubeshark__; \
# fi
@make generate-helm-values && make generate-manifests
@rm -rf ../kubeshark.github.io/charts/chart && mkdir ../kubeshark.github.io/charts/chart && cp -r helm-chart/ ../kubeshark.github.io/charts/chart/
@cd ../kubeshark.github.io/

View File

@@ -10,6 +10,7 @@ import (
"net/http"
"os"
"os/exec"
"path"
"strings"
"sync"
"time"
@@ -324,6 +325,16 @@ func (s *mcpServer) invalidateHubMCPCache() {
s.cachedHubMCP = nil
}
// getBaseURL returns the hub API base URL by stripping /mcp from hubBaseURL.
// The hub URL is always the frontend URL + /api, and hubBaseURL is frontendURL/api/mcp.
// Ensures backend connection is established first.
func (s *mcpServer) getBaseURL() (string, error) {
if errMsg := s.ensureBackendConnection(); errMsg != "" {
return "", fmt.Errorf("%s", errMsg)
}
return strings.TrimSuffix(s.hubBaseURL, "/mcp"), nil
}
func writeErrorToStderr(format string, args ...any) {
fmt.Fprintf(os.Stderr, format+"\n", args...)
}
@@ -379,6 +390,14 @@ func (s *mcpServer) handleRequest(req *jsonRPCRequest) {
func (s *mcpServer) handleInitialize(req *jsonRPCRequest) {
var instructions string
fileDownloadInstructions := `
Downloading files (e.g., PCAP exports):
When a tool like export_snapshot_pcap returns a relative file path, you MUST use the file tools to retrieve the file:
- get_file_url: Resolves the relative path to a full download URL you can share with the user.
- download_file: Downloads the file to the local filesystem so it can be opened or analyzed.
Typical workflow: call export_snapshot_pcap → receive a relative path → call download_file with that path → share the local file path with the user.`
if s.urlMode {
instructions = fmt.Sprintf(`Kubeshark MCP Server - Connected to: %s
@@ -392,7 +411,7 @@ Available tools for traffic analysis:
- get_api_stats: Get aggregated API statistics
- And more - use tools/list to see all available tools
Use the MCP tools directly - do NOT use kubectl or curl to access Kubeshark.`, s.directURL)
Use the MCP tools directly - do NOT use kubectl or curl to access Kubeshark.`, s.directURL) + fileDownloadInstructions
} else if s.allowDestructive {
instructions = `Kubeshark MCP Server - Proxy Mode (Destructive Operations ENABLED)
@@ -410,7 +429,7 @@ Safe operations:
Traffic analysis tools (require Kubeshark to be running):
- list_workloads, list_api_calls, list_l4_flows, get_api_stats, and more
Use the MCP tools - do NOT use kubectl, helm, or curl directly.`
Use the MCP tools - do NOT use kubectl, helm, or curl directly.` + fileDownloadInstructions
} else {
instructions = `Kubeshark MCP Server - Proxy Mode (Read-Only)
@@ -425,7 +444,7 @@ Available operations:
Traffic analysis tools (require Kubeshark to be running):
- list_workloads, list_api_calls, list_l4_flows, get_api_stats, and more
Use the MCP tools - do NOT use kubectl, helm, or curl directly.`
Use the MCP tools - do NOT use kubectl, helm, or curl directly.` + fileDownloadInstructions
}
result := mcpInitializeResult{
@@ -456,6 +475,40 @@ func (s *mcpServer) handleListTools(req *jsonRPCRequest) {
}`),
})
// Add file URL and download tools - available in all modes
tools = append(tools, mcpTool{
Name: "get_file_url",
Description: "When a tool (e.g., export_snapshot_pcap) returns a relative file path, use this tool to resolve it into a fully-qualified download URL. The URL can be shared with the user for manual download.",
InputSchema: json.RawMessage(`{
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "The relative file path returned by a Hub tool (e.g., '/snapshots/abc/data.pcap')"
}
},
"required": ["path"]
}`),
})
tools = append(tools, mcpTool{
Name: "download_file",
Description: "When a tool (e.g., export_snapshot_pcap) returns a relative file path, use this tool to download the file to the local filesystem. This is the preferred way to retrieve PCAP exports and other files from Kubeshark.",
InputSchema: json.RawMessage(`{
"type": "object",
"properties": {
"path": {
"type": "string",
"description": "The relative file path returned by a Hub tool (e.g., '/snapshots/abc/data.pcap')"
},
"dest": {
"type": "string",
"description": "Local destination file path. If not provided, uses the filename from the path in the current directory."
}
},
"required": ["path"]
}`),
})
// Add destructive tools only if --allow-destructive flag was set (and not in URL mode)
if !s.urlMode && s.allowDestructive {
tools = append(tools, mcpTool{
@@ -653,6 +706,20 @@ func (s *mcpServer) handleCallTool(req *jsonRPCRequest) {
IsError: isError,
})
return
case "get_file_url":
result, isError = s.callGetFileURL(params.Arguments)
s.sendResult(req.ID, mcpCallToolResult{
Content: []mcpContent{{Type: "text", Text: result}},
IsError: isError,
})
return
case "download_file":
result, isError = s.callDownloadFile(params.Arguments)
s.sendResult(req.ID, mcpCallToolResult{
Content: []mcpContent{{Type: "text", Text: result}},
IsError: isError,
})
return
}
// Forward Hub tools to the API
@@ -706,6 +773,91 @@ func (s *mcpServer) callHubTool(toolName string, args map[string]any) (string, b
}
func (s *mcpServer) callGetFileURL(args map[string]any) (string, bool) {
filePath, _ := args["path"].(string)
if filePath == "" {
return "Error: 'path' parameter is required", true
}
baseURL, err := s.getBaseURL()
if err != nil {
return fmt.Sprintf("Error: %v", err), true
}
// Ensure path starts with /
if !strings.HasPrefix(filePath, "/") {
filePath = "/" + filePath
}
fullURL := strings.TrimSuffix(baseURL, "/") + filePath
return fullURL, false
}
func (s *mcpServer) callDownloadFile(args map[string]any) (string, bool) {
filePath, _ := args["path"].(string)
if filePath == "" {
return "Error: 'path' parameter is required", true
}
baseURL, err := s.getBaseURL()
if err != nil {
return fmt.Sprintf("Error: %v", err), true
}
// Ensure path starts with /
if !strings.HasPrefix(filePath, "/") {
filePath = "/" + filePath
}
fullURL := strings.TrimSuffix(baseURL, "/") + filePath
// Determine destination file path
dest, _ := args["dest"].(string)
if dest == "" {
dest = path.Base(filePath)
}
// Use a dedicated HTTP client for file downloads.
// The default s.httpClient has a 30s total timeout which would fail for large files (up to 10GB).
// This client sets only connection-level timeouts and lets the body stream without a deadline.
downloadClient := &http.Client{
Transport: &http.Transport{
TLSHandshakeTimeout: 10 * time.Second,
ResponseHeaderTimeout: 30 * time.Second,
},
}
resp, err := downloadClient.Get(fullURL)
if err != nil {
return fmt.Sprintf("Error downloading file: %v", err), true
}
defer func() { _ = resp.Body.Close() }()
if resp.StatusCode >= 400 {
return fmt.Sprintf("Error downloading file: HTTP %d", resp.StatusCode), true
}
// Write to destination
outFile, err := os.Create(dest)
if err != nil {
return fmt.Sprintf("Error creating file %s: %v", dest, err), true
}
defer func() { _ = outFile.Close() }()
written, err := io.Copy(outFile, resp.Body)
if err != nil {
return fmt.Sprintf("Error writing file %s: %v", dest, err), true
}
result := map[string]any{
"url": fullURL,
"path": dest,
"size": written,
}
resultBytes, _ := json.MarshalIndent(result, "", " ")
return string(resultBytes), false
}
func (s *mcpServer) callStartKubeshark(args map[string]any) (string, bool) {
// Build the kubeshark tap command
cmdArgs := []string{"tap"}
@@ -913,6 +1065,11 @@ func listMCPTools(directURL string) {
fmt.Printf("URL Mode: %s\n\n", directURL)
fmt.Println("Cluster management tools disabled (Kubeshark managed externally)")
fmt.Println()
fmt.Println("Local Tools:")
fmt.Println(" check_kubeshark_status Check if Kubeshark is running")
fmt.Println(" get_file_url Resolve a relative path to a full download URL")
fmt.Println(" download_file Download a file from Kubeshark to local disk")
fmt.Println()
hubURL := strings.TrimSuffix(directURL, "/") + "/api/mcp"
fetchAndDisplayTools(hubURL, 30*time.Second)
@@ -925,6 +1082,10 @@ func listMCPTools(directURL string) {
fmt.Println(" start_kubeshark Start Kubeshark to capture traffic")
fmt.Println(" stop_kubeshark Stop Kubeshark and clean up resources")
fmt.Println()
fmt.Println("File Tools:")
fmt.Println(" get_file_url Resolve a relative path to a full download URL")
fmt.Println(" download_file Download a file from Kubeshark to local disk")
fmt.Println()
// Establish proxy connection to Kubeshark
fmt.Println("Connecting to Kubeshark...")

View File

@@ -5,6 +5,8 @@ import (
"encoding/json"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"strings"
"testing"
)
@@ -126,8 +128,18 @@ func TestMCP_ToolsList_CLIOnly(t *testing.T) {
t.Fatalf("Unexpected error: %v", resp.Error)
}
tools := resp.Result.(map[string]any)["tools"].([]any)
if len(tools) != 1 || tools[0].(map[string]any)["name"] != "check_kubeshark_status" {
t.Error("Expected only check_kubeshark_status tool")
// Should have check_kubeshark_status + get_file_url + download_file = 3 tools
if len(tools) != 3 {
t.Errorf("Expected 3 tools, got %d", len(tools))
}
toolNames := make(map[string]bool)
for _, tool := range tools {
toolNames[tool.(map[string]any)["name"].(string)] = true
}
for _, expected := range []string{"check_kubeshark_status", "get_file_url", "download_file"} {
if !toolNames[expected] {
t.Errorf("Missing expected tool: %s", expected)
}
}
}
@@ -163,9 +175,9 @@ func TestMCP_ToolsList_WithHubBackend(t *testing.T) {
t.Fatalf("Unexpected error: %v", resp.Error)
}
tools := resp.Result.(map[string]any)["tools"].([]any)
// Should have CLI tools (3) + Hub tools (2) = 5 tools
if len(tools) < 5 {
t.Errorf("Expected at least 5 tools, got %d", len(tools))
// Should have CLI tools (3) + file tools (2) + Hub tools (2) = 7 tools
if len(tools) < 7 {
t.Errorf("Expected at least 7 tools, got %d", len(tools))
}
}
@@ -218,7 +230,7 @@ func newTestMCPServerWithMockBackend(handler http.HandlerFunc) (*mcpServer, *htt
}
type hubToolCallRequest struct {
Tool string `json:"tool"`
Tool string `json:"name"`
Arguments map[string]any `json:"arguments"`
}
@@ -463,6 +475,187 @@ func TestMCP_BackendInitialization_Concurrent(t *testing.T) {
}
}
func TestMCP_GetFileURL_ProxyMode(t *testing.T) {
s := &mcpServer{
httpClient: &http.Client{},
stdin: &bytes.Buffer{},
stdout: &bytes.Buffer{},
hubBaseURL: "http://127.0.0.1:8899/api/mcp",
backendInitialized: true,
}
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
Name: "get_file_url",
Arguments: map[string]any{"path": "/snapshots/abc/data.pcap"},
}))
if resp.Error != nil {
t.Fatalf("Unexpected error: %v", resp.Error)
}
text := resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string)
expected := "http://127.0.0.1:8899/api/snapshots/abc/data.pcap"
if text != expected {
t.Errorf("Expected %q, got %q", expected, text)
}
}
func TestMCP_GetFileURL_URLMode(t *testing.T) {
s := &mcpServer{
httpClient: &http.Client{},
stdin: &bytes.Buffer{},
stdout: &bytes.Buffer{},
hubBaseURL: "https://kubeshark.example.com/api/mcp",
backendInitialized: true,
urlMode: true,
directURL: "https://kubeshark.example.com",
}
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
Name: "get_file_url",
Arguments: map[string]any{"path": "/snapshots/xyz/export.pcap"},
}))
if resp.Error != nil {
t.Fatalf("Unexpected error: %v", resp.Error)
}
text := resp.Result.(map[string]any)["content"].([]any)[0].(map[string]any)["text"].(string)
expected := "https://kubeshark.example.com/api/snapshots/xyz/export.pcap"
if text != expected {
t.Errorf("Expected %q, got %q", expected, text)
}
}
func TestMCP_GetFileURL_MissingPath(t *testing.T) {
s := &mcpServer{
httpClient: &http.Client{},
stdin: &bytes.Buffer{},
stdout: &bytes.Buffer{},
hubBaseURL: "http://127.0.0.1:8899/api/mcp",
backendInitialized: true,
}
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
Name: "get_file_url",
Arguments: map[string]any{},
}))
result := resp.Result.(map[string]any)
if !result["isError"].(bool) {
t.Error("Expected isError=true when path is missing")
}
text := result["content"].([]any)[0].(map[string]any)["text"].(string)
if !strings.Contains(text, "path") {
t.Error("Error message should mention 'path'")
}
}
func TestMCP_DownloadFile(t *testing.T) {
fileContent := "test pcap data content"
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/api/snapshots/abc/data.pcap" {
_, _ = w.Write([]byte(fileContent))
} else {
w.WriteHeader(http.StatusNotFound)
}
}))
defer mockServer.Close()
// Use temp dir for download destination
tmpDir := t.TempDir()
dest := filepath.Join(tmpDir, "downloaded.pcap")
s := &mcpServer{
httpClient: &http.Client{},
stdin: &bytes.Buffer{},
stdout: &bytes.Buffer{},
hubBaseURL: mockServer.URL + "/api/mcp",
backendInitialized: true,
}
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
Name: "download_file",
Arguments: map[string]any{"path": "/snapshots/abc/data.pcap", "dest": dest},
}))
if resp.Error != nil {
t.Fatalf("Unexpected error: %v", resp.Error)
}
result := resp.Result.(map[string]any)
if result["isError"] != nil && result["isError"].(bool) {
t.Fatalf("Expected no error, got: %v", result["content"])
}
text := result["content"].([]any)[0].(map[string]any)["text"].(string)
var downloadResult map[string]any
if err := json.Unmarshal([]byte(text), &downloadResult); err != nil {
t.Fatalf("Failed to parse download result JSON: %v", err)
}
if downloadResult["path"] != dest {
t.Errorf("Expected path %q, got %q", dest, downloadResult["path"])
}
if downloadResult["size"].(float64) != float64(len(fileContent)) {
t.Errorf("Expected size %d, got %v", len(fileContent), downloadResult["size"])
}
// Verify the file was actually written
content, err := os.ReadFile(dest)
if err != nil {
t.Fatalf("Failed to read downloaded file: %v", err)
}
if string(content) != fileContent {
t.Errorf("Expected file content %q, got %q", fileContent, string(content))
}
}
func TestMCP_DownloadFile_CustomDest(t *testing.T) {
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
_, _ = w.Write([]byte("data"))
}))
defer mockServer.Close()
tmpDir := t.TempDir()
customDest := filepath.Join(tmpDir, "custom-name.pcap")
s := &mcpServer{
httpClient: &http.Client{},
stdin: &bytes.Buffer{},
stdout: &bytes.Buffer{},
hubBaseURL: mockServer.URL + "/api/mcp",
backendInitialized: true,
}
resp := parseResponse(t, sendRequest(s, "tools/call", 1, mcpCallToolParams{
Name: "download_file",
Arguments: map[string]any{"path": "/snapshots/abc/export.pcap", "dest": customDest},
}))
result := resp.Result.(map[string]any)
if result["isError"] != nil && result["isError"].(bool) {
t.Fatalf("Expected no error, got: %v", result["content"])
}
text := result["content"].([]any)[0].(map[string]any)["text"].(string)
var downloadResult map[string]any
if err := json.Unmarshal([]byte(text), &downloadResult); err != nil {
t.Fatalf("Failed to parse download result JSON: %v", err)
}
if downloadResult["path"] != customDest {
t.Errorf("Expected path %q, got %q", customDest, downloadResult["path"])
}
if _, err := os.Stat(customDest); os.IsNotExist(err) {
t.Error("Expected file to exist at custom destination")
}
}
func TestMCP_ToolsList_IncludesFileTools(t *testing.T) {
s := newTestMCPServer()
resp := parseResponse(t, sendRequest(s, "tools/list", 1, nil))
if resp.Error != nil {
t.Fatalf("Unexpected error: %v", resp.Error)
}
tools := resp.Result.(map[string]any)["tools"].([]any)
toolNames := make(map[string]bool)
for _, tool := range tools {
toolNames[tool.(map[string]any)["name"].(string)] = true
}
for _, expected := range []string{"get_file_url", "download_file"} {
if !toolNames[expected] {
t.Errorf("Missing expected tool: %s", expected)
}
}
}
func TestMCP_FullConversation(t *testing.T) {
mockServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {

View File

@@ -18,7 +18,6 @@ import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
clientk8s "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
)
@@ -39,7 +38,7 @@ type PodFileInfo struct {
}
// listWorkerPods fetches all worker pods from multiple namespaces
func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespaces []string) ([]*PodFileInfo, error) {
func listWorkerPods(ctx context.Context, clientset *kubernetes.Clientset, namespaces []string) ([]*PodFileInfo, error) {
var podFileInfos []*PodFileInfo
var errs []error
labelSelector := label
@@ -65,7 +64,7 @@ func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespa
}
// listFilesInPodDir lists all files in the specified directory inside the pod across multiple namespaces
func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, config *rest.Config, pod *PodFileInfo, cutoffTime *time.Time) error {
func listFilesInPodDir(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, pod *PodFileInfo, cutoffTime *time.Time) error {
nodeName := pod.Pod.Spec.NodeName
srcFilePath := filepath.Join("data", nodeName, srcDir)

View File

@@ -62,4 +62,5 @@ func init() {
tapCmd.Flags().Bool(configStructs.TelemetryEnabledLabel, defaultTapConfig.Telemetry.Enabled, "Enable/disable Telemetry")
tapCmd.Flags().Bool(configStructs.ResourceGuardEnabledLabel, defaultTapConfig.ResourceGuard.Enabled, "Enable/disable resource guard")
tapCmd.Flags().Bool(configStructs.WatchdogEnabled, defaultTapConfig.Watchdog.Enabled, "Enable/disable watchdog")
tapCmd.Flags().String(configStructs.HelmChartPathLabel, defaultTapConfig.Release.HelmChartPath, "Path to a local Helm chart folder (overrides the remote Helm repo)")
}

View File

@@ -45,6 +45,7 @@ const (
PcapDumpEnabled = "enabled"
PcapTime = "time"
WatchdogEnabled = "watchdogEnabled"
HelmChartPathLabel = "release-helmChartPath"
)
type ResourceLimitsHub struct {
@@ -211,6 +212,7 @@ type ReleaseConfig struct {
Repo string `yaml:"repo" json:"repo" default:"https://helm.kubeshark.com"`
Name string `yaml:"name" json:"name" default:"kubeshark"`
Namespace string `yaml:"namespace" json:"namespace" default:"default"`
HelmChartPath string `yaml:"helmChartPath" json:"helmChartPath" default:""`
}
type TelemetryConfig struct {
@@ -261,6 +263,8 @@ type MiscConfig struct {
DuplicateTimeframe string `yaml:"duplicateTimeframe" json:"duplicateTimeframe" default:"200ms"`
DetectDuplicates bool `yaml:"detectDuplicates" json:"detectDuplicates" default:"false"`
StaleTimeoutSeconds int `yaml:"staleTimeoutSeconds" json:"staleTimeoutSeconds" default:"30"`
TcpFlowTimeout int `yaml:"tcpFlowTimeout" json:"tcpFlowTimeout" default:"1200"`
UdpFlowTimeout int `yaml:"udpFlowTimeout" json:"udpFlowTimeout" default:"1200"`
}
type PcapDumpConfig struct {
@@ -306,13 +310,23 @@ type RawCaptureConfig struct {
StorageSize string `yaml:"storageSize" json:"storageSize" default:"1Gi"`
}
type SnapshotsConfig struct {
type SnapshotsLocalConfig struct {
StorageClass string `yaml:"storageClass" json:"storageClass" default:""`
StorageSize string `yaml:"storageSize" json:"storageSize" default:"20Gi"`
}
type SnapshotsCloudConfig struct {
Provider string `yaml:"provider" json:"provider" default:""`
ConfigMaps []string `yaml:"configMaps" json:"configMaps" default:"[]"`
Secrets []string `yaml:"secrets" json:"secrets" default:"[]"`
}
type SnapshotsConfig struct {
Local SnapshotsLocalConfig `yaml:"local" json:"local"`
Cloud SnapshotsCloudConfig `yaml:"cloud" json:"cloud"`
}
type DelayedDissectionConfig struct {
Image string `yaml:"image" json:"image" default:"kubeshark/worker:master"`
CPU string `yaml:"cpu" json:"cpu" default:"1"`
Memory string `yaml:"memory" json:"memory" default:"4Gi"`
}

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: kubeshark
version: "52.12.0"
version: "53.1.0"
description: The API Traffic Analyzer for Kubernetes
home: https://kubeshark.com
keywords:

View File

@@ -143,8 +143,11 @@ Example for overriding image names:
| `tap.capture.raw.enabled` | Enable raw capture of packets and syscalls to disk for offline analysis | `true` |
| `tap.capture.raw.storageSize` | Maximum storage size for raw capture files (supports K8s quantity format: `1Gi`, `500Mi`, etc.) | `1Gi` |
| `tap.capture.dbMaxSize` | Maximum size for capture database (e.g., `4Gi`, `2000Mi`). When empty, automatically uses 80% of allocated storage (`tap.storageLimit`). | `""` |
| `tap.snapshots.storageClass` | Storage class for snapshots volume. When empty, uses `emptyDir`. When set, creates a PVC with this storage class | `""` |
| `tap.snapshots.storageSize` | Storage size for snapshots volume (supports K8s quantity format: `1Gi`, `500Mi`, etc.) | `10Gi` |
| `tap.snapshots.local.storageClass` | Storage class for local snapshots volume. When empty, uses `emptyDir`. When set, creates a PVC with this storage class | `""` |
| `tap.snapshots.local.storageSize` | Storage size for local snapshots volume (supports K8s quantity format: `1Gi`, `500Mi`, etc.) | `20Gi` |
| `tap.snapshots.cloud.provider` | Cloud storage provider for snapshots: `s3` or `azblob`. Empty string disables cloud storage. See [Cloud Storage docs](docs/snapshots_cloud_storage.md). | `""` |
| `tap.snapshots.cloud.configMaps` | Names of ConfigMaps containing cloud storage environment variables. See [Cloud Storage docs](docs/snapshots_cloud_storage.md). | `[]` |
| `tap.snapshots.cloud.secrets` | Names of Secrets containing cloud storage credentials. See [Cloud Storage docs](docs/snapshots_cloud_storage.md). | `[]` |
| `tap.release.repo` | URL of the Helm chart repository | `https://helm.kubeshark.com` |
| `tap.release.name` | Helm release name | `kubeshark` |
| `tap.release.namespace` | Helm release namespace | `default` |
@@ -220,6 +223,8 @@ Example for overriding image names:
| `tap.mountBpf` | BPF filesystem needs to be mounted for eBPF to work properly. This helm value determines whether Kubeshark will attempt to mount the filesystem. This option is not required if filesystem is already mounts. │ `true`|
| `tap.hostNetwork` | Enable host network mode for worker DaemonSet pods. When enabled, worker pods use the host's network namespace for direct network access. | `true` |
| `tap.gitops.enabled` | Enable GitOps functionality. This will allow you to use GitOps to manage your Kubeshark configuration. | `false` |
| `tap.misc.tcpFlowTimeout` | TCP flow aggregation timeout in seconds. Controls how long the worker waits before finalizing a TCP flow. | `1200` |
| `tap.misc.udpFlowTimeout` | UDP flow aggregation timeout in seconds. Controls how long the worker waits before finalizing a UDP flow. | `1200` |
| `logs.file` | Logs dump path | `""` |
| `pcapdump.enabled` | Enable recording of all traffic captured according to other parameters. Whatever Kubeshark captures, considering pod targeting rules, will be stored in pcap files ready to be viewed by tools | `false` |
| `pcapdump.maxTime` | The time window into the past that will be stored. Older traffic will be discarded. | `2h` |

View File

@@ -0,0 +1,226 @@
# Cloud Storage for Snapshots
Kubeshark can upload and download snapshots to cloud object storage, enabling cross-cluster sharing, backup/restore, and long-term retention.
Supported providers: **Amazon S3** (`s3`) and **Azure Blob Storage** (`azblob`).
## Helm Values
```yaml
tap:
snapshots:
cloud:
provider: "" # "s3" or "azblob" (empty = disabled)
configMaps: [] # names of pre-existing ConfigMaps with cloud config env vars
secrets: [] # names of pre-existing Secrets with cloud credentials
```
- `provider` selects which cloud backend to use. Leave empty to disable cloud storage.
- `configMaps` and `secrets` are lists of names of existing ConfigMap/Secret resources. They are mounted as `envFrom` on the hub pod, injecting all their keys as environment variables.
---
## Amazon S3
### Environment Variables
| Variable | Required | Description |
|----------|----------|-------------|
| `SNAPSHOT_AWS_BUCKET` | Yes | S3 bucket name |
| `SNAPSHOT_AWS_REGION` | No | AWS region (uses SDK default if empty) |
| `SNAPSHOT_AWS_ACCESS_KEY` | No | Static access key ID (empty = use default credential chain) |
| `SNAPSHOT_AWS_SECRET_KEY` | No | Static secret access key |
| `SNAPSHOT_AWS_ROLE_ARN` | No | IAM role ARN to assume via STS (for cross-account access) |
| `SNAPSHOT_AWS_EXTERNAL_ID` | No | External ID for the STS AssumeRole call |
| `SNAPSHOT_CLOUD_PREFIX` | No | Key prefix in the bucket (e.g. `snapshots/`) |
### Authentication Methods
Credentials are resolved in this order:
1. **Static credentials** -- If `SNAPSHOT_AWS_ACCESS_KEY` is set, static credentials are used directly.
2. **STS AssumeRole** -- If `SNAPSHOT_AWS_ROLE_ARN` is also set, the static (or default) credentials are used to assume the given IAM role. This is useful for cross-account S3 access.
3. **AWS default credential chain** -- When no static credentials are provided, the SDK default chain is used:
- **IRSA** (EKS service account token) -- recommended for production on EKS
- EC2 instance profile
- Standard AWS environment variables (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, etc.)
- Shared credentials file (`~/.aws/credentials`)
The provider validates bucket access on startup via `HeadBucket`. If the bucket is inaccessible, the hub will fail to start.
### Example: IRSA (recommended for EKS)
Create a ConfigMap with bucket configuration:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: kubeshark-s3-config
data:
SNAPSHOT_AWS_BUCKET: my-kubeshark-snapshots
SNAPSHOT_AWS_REGION: us-east-1
```
Set Helm values:
```yaml
tap:
snapshots:
cloud:
provider: "s3"
configMaps:
- kubeshark-s3-config
```
The hub pod's service account must be annotated for IRSA with an IAM role that has S3 access to the bucket.
### Example: Static Credentials
Create a Secret with credentials:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: kubeshark-s3-creds
type: Opaque
stringData:
SNAPSHOT_AWS_ACCESS_KEY: AKIA...
SNAPSHOT_AWS_SECRET_KEY: wJal...
```
Create a ConfigMap with bucket configuration:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: kubeshark-s3-config
data:
SNAPSHOT_AWS_BUCKET: my-kubeshark-snapshots
SNAPSHOT_AWS_REGION: us-east-1
```
Set Helm values:
```yaml
tap:
snapshots:
cloud:
provider: "s3"
configMaps:
- kubeshark-s3-config
secrets:
- kubeshark-s3-creds
```
### Example: Cross-Account Access via AssumeRole
Add the role ARN to your ConfigMap:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: kubeshark-s3-config
data:
SNAPSHOT_AWS_BUCKET: other-account-bucket
SNAPSHOT_AWS_REGION: eu-west-1
SNAPSHOT_AWS_ROLE_ARN: arn:aws:iam::123456789012:role/KubesharkCrossAccountRole
SNAPSHOT_AWS_EXTERNAL_ID: my-external-id # optional, if required by the trust policy
```
The hub will first authenticate using its own credentials (IRSA, static, or default chain), then assume the specified role to access the bucket.
---
## Azure Blob Storage
### Environment Variables
| Variable | Required | Description |
|----------|----------|-------------|
| `SNAPSHOT_AZBLOB_STORAGE_ACCOUNT` | Yes | Azure storage account name |
| `SNAPSHOT_AZBLOB_CONTAINER` | Yes | Blob container name |
| `SNAPSHOT_AZBLOB_STORAGE_KEY` | No | Storage account access key (empty = use DefaultAzureCredential) |
| `SNAPSHOT_CLOUD_PREFIX` | No | Key prefix in the container (e.g. `snapshots/`) |
### Authentication Methods
Credentials are resolved in this order:
1. **Shared Key** -- If `SNAPSHOT_AZBLOB_STORAGE_KEY` is set, the storage account key is used directly.
2. **DefaultAzureCredential** -- When no storage key is provided, the Azure SDK default credential chain is used:
- **Workload Identity** (AKS pod identity) -- recommended for production on AKS
- Managed Identity (system or user-assigned)
- Azure CLI credentials
- Environment variables (`AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, `AZURE_CLIENT_SECRET`)
The provider validates container access on startup via `GetProperties`. If the container is inaccessible, the hub will fail to start.
### Example: Workload Identity (recommended for AKS)
Create a ConfigMap with storage configuration:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: kubeshark-azblob-config
data:
SNAPSHOT_AZBLOB_STORAGE_ACCOUNT: mykubesharksa
SNAPSHOT_AZBLOB_CONTAINER: snapshots
```
Set Helm values:
```yaml
tap:
snapshots:
cloud:
provider: "azblob"
configMaps:
- kubeshark-azblob-config
```
The hub pod's service account must be configured for AKS Workload Identity with a managed identity that has the **Storage Blob Data Contributor** role on the container.
### Example: Storage Account Key
Create a Secret with the storage key:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: kubeshark-azblob-creds
type: Opaque
stringData:
SNAPSHOT_AZBLOB_STORAGE_KEY: "base64-encoded-storage-key..."
```
Create a ConfigMap with storage configuration:
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: kubeshark-azblob-config
data:
SNAPSHOT_AZBLOB_STORAGE_ACCOUNT: mykubesharksa
SNAPSHOT_AZBLOB_CONTAINER: snapshots
```
Set Helm values:
```yaml
tap:
snapshots:
cloud:
provider: "azblob"
configMaps:
- kubeshark-azblob-config
secrets:
- kubeshark-azblob-creds
```

View File

@@ -40,10 +40,14 @@ spec:
- "{{ if hasKey .Values.tap.capture.dissection "stopAfter" }}{{ .Values.tap.capture.dissection.stopAfter }}{{ else }}5m{{ end }}"
- -snapshot-size-limit
- '{{ .Values.tap.snapshots.storageSize }}'
{{- if .Values.tap.delayedDissection.image }}
- -dissector-image
- '{{ .Values.tap.delayedDissection.image }}'
{{- end }}
{{- if .Values.tap.docker.overrideImage.worker }}
- '{{ .Values.tap.docker.overrideImage.worker }}'
{{- else if .Values.tap.docker.overrideTag.worker }}
- '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}'
{{- else }}
- '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'
{{- end }}
{{- if .Values.tap.delayedDissection.cpu }}
- -dissector-cpu
- '{{ .Values.tap.delayedDissection.cpu }}'
@@ -57,12 +61,24 @@ spec:
{{- end }}
- -cloud-api-url
- '{{ .Values.cloudApiUrl }}'
{{- if .Values.tap.secrets }}
{{- if .Values.tap.snapshots.cloud.provider }}
- -cloud-storage-provider
- '{{ .Values.tap.snapshots.cloud.provider }}'
{{- end }}
{{- if or .Values.tap.secrets .Values.tap.snapshots.cloud.configMaps .Values.tap.snapshots.cloud.secrets }}
envFrom:
{{- range .Values.tap.secrets }}
- secretRef:
name: {{ . }}
{{- end }}
{{- range .Values.tap.snapshots.cloud.configMaps }}
- configMapRef:
name: {{ . }}
{{- end }}
{{- range .Values.tap.snapshots.cloud.secrets }}
- secretRef:
name: {{ . }}
{{- end }}
{{- end }}
env:
- name: POD_NAME
@@ -184,10 +200,10 @@ spec:
- key: AUTH_SAML_X509_KEY
path: kubeshark.key
- name: snapshots-volume
{{- if .Values.tap.snapshots.storageClass }}
{{- if .Values.tap.snapshots.local.storageClass }}
persistentVolumeClaim:
claimName: {{ include "kubeshark.name" . }}-snapshots-pvc
{{- else }}
emptyDir:
sizeLimit: {{ .Values.tap.snapshots.storageSize }}
sizeLimit: {{ .Values.tap.snapshots.local.storageSize }}
{{- end }}

View File

@@ -1,5 +1,5 @@
---
{{- if .Values.tap.snapshots.storageClass }}
{{- if .Values.tap.snapshots.local.storageClass }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
@@ -16,7 +16,7 @@ spec:
- ReadWriteOnce
resources:
requests:
storage: {{ .Values.tap.snapshots.storageSize }}
storageClassName: {{ .Values.tap.snapshots.storageClass }}
storage: {{ .Values.tap.snapshots.local.storageSize }}
storageClassName: {{ .Values.tap.snapshots.local.storageClass }}
status: {}
{{- end }}

View File

@@ -99,6 +99,10 @@ spec:
- '{{ .Values.tap.misc.resolutionStrategy }}'
- -staletimeout
- '{{ .Values.tap.misc.staleTimeoutSeconds }}'
- -tcp-flow-full-timeout
- '{{ .Values.tap.misc.tcpFlowTimeout }}'
- -udp-flow-full-timeout
- '{{ .Values.tap.misc.udpFlowTimeout }}'
- -storage-size
- '{{ .Values.tap.storageLimit }}'
- -capture-db-max-size

View File

@@ -30,8 +30,10 @@ data:
proxy_set_header Authorization $http_authorization;
proxy_pass_header Authorization;
proxy_connect_timeout 4s;
proxy_read_timeout 120s;
proxy_send_timeout 12s;
# Disable buffering for gRPC/Connect streaming
client_max_body_size 0;
proxy_request_buffering off;
proxy_buffering off;
proxy_pass_request_headers on;
}
@@ -86,4 +88,3 @@ data:
root /usr/share/nginx/html;
}
}

View File

@@ -35,16 +35,21 @@ tap:
storageSize: 1Gi
dbMaxSize: 500Mi
delayedDissection:
image: kubeshark/worker:master
cpu: "1"
memory: 4Gi
snapshots:
storageClass: ""
storageSize: 20Gi
local:
storageClass: ""
storageSize: 20Gi
cloud:
provider: ""
configMaps: []
secrets: []
release:
repo: https://helm.kubeshark.com
name: kubeshark
namespace: default
helmChartPath: ""
persistentStorage: false
persistentStorageStatic: false
persistentStoragePvcVolumeMode: FileSystem
@@ -191,8 +196,8 @@ tap:
- diameter
- udp-flow
- tcp-flow
- tcp-conn
- udp-conn
- tcp-conn
portMapping:
http:
- 80
@@ -228,6 +233,8 @@ tap:
duplicateTimeframe: 200ms
detectDuplicates: false
staleTimeoutSeconds: 30
tcpFlowTimeout: 1200
udpFlowTimeout: 1200
securityContext:
privileged: true
appArmorProfile:
@@ -270,7 +277,7 @@ kube:
dumpLogs: false
headless: false
license: ""
cloudApiUrl: "https://api.kubeshark.com"
cloudApiUrl: https://api.kubeshark.com
cloudLicenseEnabled: true
demoModeEnabled: false
supportChatEnabled: false

View File

@@ -67,7 +67,10 @@ func (h *Helm) Install() (rel *release.Release, err error) {
client.Namespace = h.releaseNamespace
client.ReleaseName = h.releaseName
chartPath := os.Getenv(fmt.Sprintf("%s_HELM_CHART_PATH", strings.ToUpper(misc.Program)))
chartPath := config.Config.Tap.Release.HelmChartPath
if chartPath == "" {
chartPath = os.Getenv(fmt.Sprintf("%s_HELM_CHART_PATH", strings.ToUpper(misc.Program)))
}
if chartPath == "" {
var chartURL string
chartURL, err = repo.FindChartInRepoURL(h.repo, h.releaseName, "", "", "", "", getter.All(&cli.EnvSettings{}))

View File

@@ -4,10 +4,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
name: kubeshark-hub-network-policy
namespace: default
@@ -33,10 +33,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front-network-policy
@@ -60,10 +60,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-dex-network-policy
@@ -87,10 +87,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-worker-network-policy
@@ -116,10 +116,10 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
name: kubeshark-service-account
namespace: default
@@ -132,10 +132,10 @@ metadata:
namespace: default
labels:
app.kubeshark.com/app: hub
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
stringData:
LICENSE: ''
@@ -151,10 +151,10 @@ metadata:
namespace: default
labels:
app.kubeshark.com/app: hub
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
stringData:
AUTH_SAML_X509_CRT: |
@@ -167,10 +167,10 @@ metadata:
namespace: default
labels:
app.kubeshark.com/app: hub
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
stringData:
AUTH_SAML_X509_KEY: |
@@ -182,10 +182,10 @@ metadata:
name: kubeshark-nginx-config-map
namespace: default
labels:
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
data:
default.conf: |
@@ -209,8 +209,10 @@ data:
proxy_set_header Authorization $http_authorization;
proxy_pass_header Authorization;
proxy_connect_timeout 4s;
proxy_read_timeout 120s;
proxy_send_timeout 12s;
# Disable buffering for gRPC/Connect streaming
client_max_body_size 0;
proxy_request_buffering off;
proxy_buffering off;
proxy_pass_request_headers on;
}
@@ -246,10 +248,10 @@ metadata:
namespace: default
labels:
app.kubeshark.com/app: hub
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
data:
POD_REGEX: '.*'
@@ -257,6 +259,7 @@ data:
EXCLUDED_NAMESPACES: ''
BPF_OVERRIDE: ''
DISSECTION_ENABLED: 'true'
CAPTURE_SELF: 'false'
SCRIPTING_SCRIPTS: '{}'
SCRIPTING_ACTIVE_SCRIPTS: ''
INGRESS_ENABLED: 'false'
@@ -266,7 +269,7 @@ data:
AUTH_TYPE: 'default'
AUTH_SAML_IDP_METADATA_URL: ''
AUTH_SAML_ROLE_ATTRIBUTE: 'role'
AUTH_SAML_ROLES: '{"admin":{"canDownloadPCAP":true,"canStopTrafficCapturing":true,"canUpdateTargetedPods":true,"canUseScripting":true,"filter":"","scriptingPermissions":{"canActivate":true,"canDelete":true,"canSave":true},"showAdminConsoleLink":true}}'
AUTH_SAML_ROLES: '{"admin":{"canControlDissection":true,"canDownloadPCAP":true,"canStopTrafficCapturing":true,"canUpdateTargetedPods":true,"canUseScripting":true,"filter":"","scriptingPermissions":{"canActivate":true,"canDelete":true,"canSave":true},"showAdminConsoleLink":true}}'
AUTH_OIDC_ISSUER: 'not set'
AUTH_OIDC_REFRESH_TOKEN_LIFETIME: '3960h'
AUTH_OIDC_STATE_PARAM_EXPIRY: '10m'
@@ -285,7 +288,6 @@ data:
PCAP_ERROR_TTL: '0'
TIMEZONE: ' '
CLOUD_LICENSE_ENABLED: 'true'
AI_ASSISTANT_ENABLED: 'true'
DUPLICATE_TIMEFRAME: '200ms'
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,ws,ldap,radius,diameter,udp-flow,tcp-flow,udp-conn,tcp-conn'
CUSTOM_MACROS: '{"https":"tls and (http or http2)"}'
@@ -304,10 +306,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
name: kubeshark-cluster-role-default
namespace: default
@@ -351,10 +353,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
name: kubeshark-cluster-role-binding-default
namespace: default
@@ -372,10 +374,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-config-role
@@ -422,10 +424,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-config-role-binding
@@ -445,10 +447,10 @@ kind: Service
metadata:
labels:
app.kubeshark.com/app: hub
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
name: kubeshark-hub
namespace: default
@@ -466,10 +468,10 @@ apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
name: kubeshark-front
namespace: default
@@ -487,10 +489,10 @@ kind: Service
apiVersion: v1
metadata:
labels:
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
annotations:
prometheus.io/scrape: 'true'
@@ -500,10 +502,10 @@ metadata:
spec:
selector:
app.kubeshark.com/app: worker
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
ports:
- name: metrics
@@ -516,10 +518,10 @@ kind: Service
apiVersion: v1
metadata:
labels:
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
annotations:
prometheus.io/scrape: 'true'
@@ -529,10 +531,10 @@ metadata:
spec:
selector:
app.kubeshark.com/app: hub
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
ports:
- name: metrics
@@ -547,10 +549,10 @@ metadata:
labels:
app.kubeshark.com/app: worker
sidecar.istio.io/inject: "false"
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
name: kubeshark-worker-daemon-set
namespace: default
@@ -564,10 +566,10 @@ spec:
metadata:
labels:
app.kubeshark.com/app: worker
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
name: kubeshark-worker-daemon-set
namespace: kubeshark
@@ -577,7 +579,7 @@ spec:
- /bin/sh
- -c
- mkdir -p /sys/fs/bpf && mount | grep -q '/sys/fs/bpf' || mount -t bpf bpf /sys/fs/bpf
image: 'docker.io/kubeshark/worker:v52.12'
image: 'docker.io/kubeshark/worker:v53.1'
imagePullPolicy: Always
name: mount-bpf
securityContext:
@@ -606,11 +608,17 @@ spec:
- 'auto'
- -staletimeout
- '30'
- -tcp-flow-full-timeout
- '1200'
- -udp-flow-full-timeout
- '1200'
- -storage-size
- '10Gi'
- -capture-db-max-size
- '500Mi'
image: 'docker.io/kubeshark/worker:v52.12'
- -cloud-api-url
- 'https://api.kubeshark.com'
image: 'docker.io/kubeshark/worker:v53.1'
imagePullPolicy: Always
name: sniffer
ports:
@@ -630,8 +638,6 @@ spec:
value: '10000'
- name: TCP_STREAM_CHANNEL_TIMEOUT_SHOW
value: 'false'
- name: KUBESHARK_CLOUD_API_URL
value: 'https://api.kubeshark.com'
- name: PROFILING_ENABLED
value: 'false'
- name: SENTRY_ENABLED
@@ -684,7 +690,7 @@ spec:
- -disable-tls-log
- -loglevel
- 'warning'
image: 'docker.io/kubeshark/worker:v52.12'
image: 'docker.io/kubeshark/worker:v53.1'
imagePullPolicy: Always
name: tracer
env:
@@ -776,10 +782,10 @@ kind: Deployment
metadata:
labels:
app.kubeshark.com/app: hub
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
name: kubeshark-hub
namespace: default
@@ -794,10 +800,10 @@ spec:
metadata:
labels:
app.kubeshark.com/app: hub
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
spec:
dnsPolicy: ClusterFirstWithHostNet
@@ -813,13 +819,15 @@ spec:
- -capture-stop-after
- "5m"
- -snapshot-size-limit
- '20Gi'
- ''
- -dissector-image
- 'kubeshark/worker:master'
- 'docker.io/kubeshark/worker:v53.1'
- -dissector-cpu
- '1'
- -dissector-memory
- '4Gi'
- -cloud-api-url
- 'https://api.kubeshark.com'
env:
- name: POD_NAME
valueFrom:
@@ -833,11 +841,9 @@ spec:
value: 'false'
- name: SENTRY_ENVIRONMENT
value: 'production'
- name: KUBESHARK_CLOUD_API_URL
value: 'https://api.kubeshark.com'
- name: PROFILING_ENABLED
value: 'false'
image: 'docker.io/kubeshark/hub:v52.12'
image: 'docker.io/kubeshark/hub:v53.1'
imagePullPolicy: Always
readinessProbe:
periodSeconds: 5
@@ -905,10 +911,10 @@ kind: Deployment
metadata:
labels:
app.kubeshark.com/app: front
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
name: kubeshark-front
namespace: default
@@ -923,10 +929,10 @@ spec:
metadata:
labels:
app.kubeshark.com/app: front
helm.sh/chart: kubeshark-52.12.0
helm.sh/chart: kubeshark-53.1.0
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.12.0"
app.kubernetes.io/version: "53.1.0"
app.kubernetes.io/managed-by: Helm
spec:
containers:
@@ -943,6 +949,8 @@ spec:
value: ' '
- name: REACT_APP_TIMEZONE
value: ' '
- name: REACT_APP_SCRIPTING_HIDDEN
value: 'true'
- name: REACT_APP_SCRIPTING_DISABLED
value: 'false'
- name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED
@@ -953,12 +961,12 @@ spec:
value: 'true'
- name: REACT_APP_RECORDING_DISABLED
value: 'false'
- name: REACT_APP_DISSECTION_ENABLED
value: 'true'
- name: REACT_APP_DISSECTION_CONTROL_ENABLED
value: 'true'
- name: 'REACT_APP_CLOUD_LICENSE_ENABLED'
value: 'true'
- name: 'REACT_APP_AI_ASSISTANT_ENABLED'
value: 'true'
- name: REACT_APP_SUPPORT_CHAT_ENABLED
value: 'false'
- name: REACT_APP_BETA_ENABLED
@@ -971,7 +979,7 @@ spec:
value: 'false'
- name: REACT_APP_SENTRY_ENVIRONMENT
value: 'production'
image: 'docker.io/kubeshark/front:v52.12'
image: 'docker.io/kubeshark/front:v53.1'
imagePullPolicy: Always
name: kubeshark-front
livenessProbe: