diff --git a/.gitignore b/.gitignore index 6ec498773..0fca55afb 100644 --- a/.gitignore +++ b/.gitignore @@ -63,4 +63,4 @@ bin scripts/ # CWD config YAML -kubeshark.yaml +kubeshark.yaml \ No newline at end of file diff --git a/Makefile b/Makefile index c24b4ae0f..0823a2e96 100644 --- a/Makefile +++ b/Makefile @@ -84,7 +84,8 @@ kubectl-view-kubeshark-resources: ## This command outputs all Kubernetes resourc ./kubectl.sh view-kubeshark-resources generate-helm-values: ## Generate the Helm values from config.yaml - ./bin/kubeshark__ config > ./helm-chart/values.yaml && sed -i 's/^license:.*/license: ""/' helm-chart/values.yaml && sed -i '1i # find a detailed description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md' helm-chart/values.yaml + mv ~/.kubeshark/config.yaml ~/.kubeshark/config.yaml.old; bin/kubeshark__ config>helm-chart/values.yaml;mv ~/.kubeshark/config.yaml.old ~/.kubeshark/config.yaml + sed -i 's/^license:.*/license: ""/' helm-chart/values.yaml && sed -i '1i # find a detailed description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md' helm-chart/values.yaml generate-manifests: ## Generate the manifests from the Helm chart using default configuration helm template kubeshark -n default ./helm-chart > ./manifests/complete.yaml @@ -181,13 +182,22 @@ release: @cd ../tracer && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags @cd ../hub && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags @cd ../front && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags - @cd ../kubeshark && git checkout master && git pull && sed -i 's/^version:.*/version: "$(VERSION)"/' helm-chart/Chart.yaml && make && make generate-helm-values && make generate-manifests + @cd ../kubeshark && git checkout master && git pull && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make + @if [ "$(shell uname)" = "Darwin" ]; then \ + codesign --sign - --force --preserve-metadata=entitlements,requirements,flags,runtime ./bin/kubeshark__; \ + fi + @make generate-helm-values && make generate-manifests @git add -A . && git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)" && git push @git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags - @cd helm-chart && cp -r . ../../kubeshark.github.io/charts/chart + @cd helm-chart && rm -r ../../kubeshark.github.io/charts/chart/* && cp -r . ../../kubeshark.github.io/charts/chart @cd ../../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push @cd ../kubeshark +release-dry-run: + @cd ../kubeshark && git checkout master && git pull && sed -i "s/^version:.*/version: \"$(shell echo $(VERSION) | sed -E 's/^([0-9]+\.[0-9]+)\..*/\1/')\"/" helm-chart/Chart.yaml && make && make generate-helm-values && make generate-manifests + @cd helm-chart && rm -r ../../kubeshark.github.io/charts/chart/* && cp -r . ../../kubeshark.github.io/charts/chart + @cd ../kubeshark + branch: @cd ../worker && git checkout master && git pull && git checkout -b $(name); git push --set-upstream origin $(name) @cd ../hub && git checkout master && git pull && git checkout -b $(name); git push --set-upstream origin $(name) diff --git a/README.md b/README.md index acc2f35e1..56ea824af 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ Image size - + Discord @@ -22,60 +22,75 @@

- Want to see Kubeshark in action, right now? Visit this - live demo deployment of Kubeshark. + Want to see Kubeshark in action right now? Visit this + live demo deployment of Kubeshark.

-**Kubeshark** is an API Traffic Analyzer for [**Kubernetes**](https://kubernetes.io/) providing real-time, protocol-level visibility into Kubernetes’ internal network, capturing and monitoring all traffic and payloads going in, out and across containers, pods, nodes and clusters. +**Kubeshark** is a network observability platform for [**Kubernetes**](https://kubernetes.io/), providing real-time, protocol-level visibility into Kubernetes’ network. It enables users to inspect all internal and external cluster connections, API calls, and data in transit. Additionally, Kubeshark detects suspicious network behaviors, triggers automated actions, and provides deep insights into the network. ![Simple UI](https://github.com/kubeshark/assets/raw/master/png/kubeshark-ui.png) -Think [TCPDump](https://en.wikipedia.org/wiki/Tcpdump) and [Wireshark](https://www.wireshark.org/) re-invented for Kubernetes +Think [TCPDump](https://en.wikipedia.org/wiki/Tcpdump) and [Wireshark](https://www.wireshark.org/) reimagined for Kubernetes. ## Getting Started - -Download **Kubeshark**'s binary distribution [latest release](https://github.com/kubeshark/kubeshark/releases/latest) and run following one of these examples: - -```shell -kubeshark tap -``` - -```shell -kubeshark tap -n sock-shop "(catalo*|front-end*)" -``` - -Running any of the :point_up: above commands will open the [Web UI](https://docs.kubeshark.co/en/ui) in your browser which streams the traffic in your Kubernetes cluster in real-time. +Download **Kubeshark**'s binary distribution [latest release](https://github.com/kubeshark/kubeshark/releases/latest) or use one of the following methods to deploy **Kubeshark**. The [web-based dashboard](https://docs.kubeshark.co/en/ui) should open in your browser, showing a real-time view of your cluster's traffic. ### Homebrew -[Homebrew](https://brew.sh/) :beer: users install Kubeshark CLI with: +[Homebrew](https://brew.sh/) :beer: users can install the Kubeshark CLI with: ```shell brew install kubeshark +kubeshark tap +``` + +To clean up: +```shell +kubeshark clean ``` ### Helm -Add the helm repository and install the chart: +Add the Helm repository and install the chart: ```shell helm repo add kubeshark https://helm.kubeshark.co -‍helm install kubeshark kubeshark/kubeshark +helm install kubeshark kubeshark/kubeshark +``` +Follow the on-screen instructions how to connect to the dashboard. + +To clean up: +```shell +helm uninstall kubeshark ``` ## Building From Source -Clone this repository and run `make` command to build it. After the build is complete, the executable can be found at `./bin/kubeshark__`. +Clone this repository and run the `make` command to build it. After the build is complete, the executable can be found at `./bin/kubeshark`. ## Documentation To learn more, read the [documentation](https://docs.kubeshark.co). +## Additional Use Cases + +### Dump All Cluster-wide Traffic into a Single PCAP File + +Record **all** cluster traffic and consolidate it into a single PCAP file (tcpdump-style). + +Run Kubeshark to start capturing traffic: +```shell +kubeshark tap --set headless=true +``` +> You can press `^C` to stop the command. Kubeshark will continue running in the background. + +Take a snapshot of traffic (e.g., from the past 5 minutes): +```shell +kubeshark pcapdump --time 5m +``` +> Read more [here](https://docs.kubeshark.co/en/pcapdump). + ## Contributing We :heart: pull requests! See [CONTRIBUTING.md](CONTRIBUTING.md) for the contribution guide. - -## Code of Conduct - -This project is for everyone. We ask that our users and contributors take a few minutes to review our [Code of Conduct](CODE_OF_CONDUCT.md). diff --git a/RELEASE.md.TEMPLATE b/RELEASE.md.TEMPLATE index c785940c2..455e18f69 100644 --- a/RELEASE.md.TEMPLATE +++ b/RELEASE.md.TEMPLATE @@ -10,7 +10,7 @@ curl -Lo kubeshark https://github.com/kubeshark/kubeshark/releases/download/_VER **Mac** (AArch64/Apple M1 silicon) ``` -rm -f kubeshark && curl -Lo kubeshark https://github.com/kubeshark/kubeshark/releases/download/_VER_/kubeshark_darwin_arm64 && chmod 755 kubeshark +curl -Lo kubeshark https://github.com/kubeshark/kubeshark/releases/download/_VER_/kubeshark_darwin_arm64 && chmod 755 kubeshark ``` **Linux** (x86-64) diff --git a/cmd/export.go b/cmd/export.go deleted file mode 100644 index 1de20c3db..000000000 --- a/cmd/export.go +++ /dev/null @@ -1,62 +0,0 @@ -package cmd - -import ( - "fmt" - "net/http" - "os" - "path/filepath" - "time" - - "github.com/creasty/defaults" - "github.com/kubeshark/kubeshark/config/configStructs" - "github.com/kubeshark/kubeshark/internal/connect" - "github.com/kubeshark/kubeshark/kubernetes" - "github.com/kubeshark/kubeshark/utils" - "github.com/rs/zerolog/log" - "github.com/spf13/cobra" -) - -var exportCmd = &cobra.Command{ - Use: "export", - Short: "Exports the captured traffic into a TAR file that contains PCAP files", - RunE: func(cmd *cobra.Command, args []string) error { - runExport() - return nil - }, -} - -func init() { - rootCmd.AddCommand(exportCmd) - - defaultTapConfig := configStructs.TapConfig{} - if err := defaults.Set(&defaultTapConfig); err != nil { - log.Debug().Err(err).Send() - } - - exportCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the Kubeshark") - exportCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Kubeshark") - exportCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark") -} - -func runExport() { - hubUrl := kubernetes.GetHubUrl() - response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl)) - if err != nil || response.StatusCode != 200 { - log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy...")) - runProxy(false, true) - } - - dstPath, err := filepath.Abs(fmt.Sprintf("./%d.tar.gz", time.Now().Unix())) - if err != nil { - panic(err) - } - - out, err := os.Create(dstPath) - if err != nil { - panic(err) - } - defer out.Close() - - connector := connect.NewConnector(kubernetes.GetHubUrl(), connect.DefaultRetries, connect.DefaultTimeout) - connector.PostPcapsMerge(out) -} diff --git a/cmd/pcapDump.go b/cmd/pcapDump.go index b2d9558ea..5653642b6 100644 --- a/cmd/pcapDump.go +++ b/cmd/pcapDump.go @@ -2,10 +2,14 @@ package cmd import ( "errors" + "fmt" + "os" "path/filepath" + "time" "github.com/creasty/defaults" "github.com/kubeshark/kubeshark/config/configStructs" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "k8s.io/client-go/kubernetes" @@ -16,7 +20,7 @@ import ( // pcapDumpCmd represents the consolidated pcapdump command var pcapDumpCmd = &cobra.Command{ Use: "pcapdump", - Short: "Manage PCAP dump operations: start, stop, or copy PCAP files", + Short: "Store all captured traffic (including decrypted TLS) in a PCAP file.", RunE: func(cmd *cobra.Command, args []string) error { // Retrieve the kubeconfig path from the flag kubeconfig, _ := cmd.Flags().GetString(configStructs.PcapKubeconfig) @@ -30,54 +34,61 @@ var pcapDumpCmd = &cobra.Command{ } } + debugEnabled, _ := cmd.Flags().GetBool("debug") + if debugEnabled { + zerolog.SetGlobalLevel(zerolog.DebugLevel) + log.Debug().Msg("Debug logging enabled") + } else { + zerolog.SetGlobalLevel(zerolog.InfoLevel) + } + // Use the current context in kubeconfig config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) if err != nil { - log.Error().Err(err).Msg("Error building kubeconfig") - return err + return fmt.Errorf("Error building kubeconfig: %w", err) } clientset, err := kubernetes.NewForConfig(config) if err != nil { - log.Error().Err(err).Msg("Error creating Kubernetes client") - return err + return fmt.Errorf("Error creating Kubernetes client: %w", err) } - // Handle copy operation if the copy string is provided - - if !cmd.Flags().Changed(configStructs.PcapDumpEnabled) { - destDir, _ := cmd.Flags().GetString(configStructs.PcapDest) - log.Info().Msg("Copying PCAP files") - err = copyPcapFiles(clientset, config, destDir) + // Parse the `--time` flag + timeIntervalStr, _ := cmd.Flags().GetString("time") + var cutoffTime *time.Time // Use a pointer to distinguish between provided and not provided + if timeIntervalStr != "" { + duration, err := time.ParseDuration(timeIntervalStr) if err != nil { - log.Error().Err(err).Msg("Error copying PCAP files") - return err + return fmt.Errorf("Invalid format %w", err) } - } else { - // Handle start operation if the start string is provided + tempCutoffTime := time.Now().Add(-duration) + cutoffTime = &tempCutoffTime + } - enabled, err := cmd.Flags().GetBool(configStructs.PcapDumpEnabled) + // Test the dest dir if provided + destDir, _ := cmd.Flags().GetString(configStructs.PcapDest) + if destDir != "" { + info, err := os.Stat(destDir) + if os.IsNotExist(err) { + return fmt.Errorf("Directory does not exist: %s", destDir) + } if err != nil { - log.Error().Err(err).Msg("Error getting pcapdump enable flag") - return err + return fmt.Errorf("Error checking dest directory: %w", err) } - timeInterval, _ := cmd.Flags().GetString(configStructs.PcapTimeInterval) - maxTime, _ := cmd.Flags().GetString(configStructs.PcapMaxTime) - maxSize, _ := cmd.Flags().GetString(configStructs.PcapMaxSize) - err = startStopPcap(clientset, enabled, timeInterval, maxTime, maxSize) + if !info.IsDir() { + return fmt.Errorf("Dest path is not a directory: %s", destDir) + } + tempFile, err := os.CreateTemp(destDir, "write-test-*") if err != nil { - log.Error().Err(err).Msg("Error starting/stopping PCAP dump") - return err - } - - if enabled { - log.Info().Msg("Pcapdump started successfully") - return nil - } else { - log.Info().Msg("Pcapdump stopped successfully") - return nil + return fmt.Errorf("Directory %s is not writable", destDir) } + _ = os.Remove(tempFile.Name()) + } + log.Info().Msg("Copying PCAP files") + err = copyPcapFiles(clientset, config, destDir, cutoffTime) + if err != nil { + return err } return nil @@ -92,10 +103,8 @@ func init() { log.Debug().Err(err).Send() } - pcapDumpCmd.Flags().String(configStructs.PcapTimeInterval, defaultPcapDumpConfig.PcapTimeInterval, "Time interval for PCAP file rotation (used with --start)") - pcapDumpCmd.Flags().String(configStructs.PcapMaxTime, defaultPcapDumpConfig.PcapMaxTime, "Maximum time for retaining old PCAP files (used with --start)") - pcapDumpCmd.Flags().String(configStructs.PcapMaxSize, defaultPcapDumpConfig.PcapMaxSize, "Maximum size of PCAP files before deletion (used with --start)") + pcapDumpCmd.Flags().String(configStructs.PcapTime, "", "Time interval (e.g., 10m, 1h) in the past for which the pcaps are copied") pcapDumpCmd.Flags().String(configStructs.PcapDest, "", "Local destination path for copied PCAP files (can not be used together with --enabled)") - pcapDumpCmd.Flags().String(configStructs.PcapKubeconfig, "", "Enabled/Disable to pcap dumps (can not be used together with --dest)") - + pcapDumpCmd.Flags().String(configStructs.PcapKubeconfig, "", "Path for kubeconfig (if not provided the default location will be checked)") + pcapDumpCmd.Flags().Bool("debug", false, "Enable debug logging") } diff --git a/cmd/pcapDumpRunner.go b/cmd/pcapDumpRunner.go index 684f8a14e..5aacb7023 100644 --- a/cmd/pcapDumpRunner.go +++ b/cmd/pcapDumpRunner.go @@ -1,16 +1,18 @@ package cmd import ( + "bufio" "bytes" "context" + "errors" "fmt" + "io" "os" "path/filepath" - "strconv" "strings" + "sync" + "time" - "github.com/kubeshark/gopacket" - "github.com/kubeshark/gopacket/layers" "github.com/kubeshark/gopacket/pcapgo" "github.com/rs/zerolog/log" corev1 "k8s.io/api/core/v1" @@ -21,20 +23,25 @@ import ( "k8s.io/client-go/tools/remotecommand" ) -const label = "app.kubeshark.co/app=worker" -const SELF_RESOURCES_PREFIX = "kubeshark-" -const SUFFIX_CONFIG_MAP = "config-map" +const ( + label = "app.kubeshark.co/app=worker" + srcDir = "pcapdump" + maxSnaplen uint32 = 262144 + maxTimePerFile = time.Minute * 5 +) -// NamespaceFiles represents the namespace and the files found in that namespace. -type NamespaceFiles struct { - Namespace string // The namespace in which the files were found - SrcDir string // The source directory from which the files were listed - Files []string // List of files found in the namespace +// PodFileInfo represents information about a pod, its namespace, and associated files +type PodFileInfo struct { + Pod corev1.Pod + SrcDir string + Files []string + CopiedFiles []string } // listWorkerPods fetches all worker pods from multiple namespaces -func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespaces []string) ([]corev1.Pod, error) { - var allPods []corev1.Pod +func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespaces []string) ([]*PodFileInfo, error) { + var podFileInfos []*PodFileInfo + var errs []error labelSelector := label for _, namespace := range namespaces { @@ -43,112 +50,30 @@ func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespa LabelSelector: labelSelector, }) if err != nil { - return nil, fmt.Errorf("failed to list worker pods in namespace %s: %w", namespace, err) - } - - // Accumulate the pods - allPods = append(allPods, pods.Items...) - } - - return allPods, nil -} - -// listFilesInPodDir lists all files in the specified directory inside the pod across multiple namespaces -func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, config *rest.Config, podName string, namespaces []string, configMapName, configMapKey string) ([]NamespaceFiles, error) { - var namespaceFilesList []NamespaceFiles - - for _, namespace := range namespaces { - // Attempt to get the ConfigMap in the current namespace - configMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, configMapName, metav1.GetOptions{}) - if err != nil { + errs = append(errs, fmt.Errorf("failed to list worker pods in namespace %s: %w", namespace, err)) continue } - // Check if the source directory exists in the ConfigMap - srcDir, ok := configMap.Data[configMapKey] - if !ok || srcDir == "" { - log.Error().Msgf("source directory not found in ConfigMap %s in namespace %s", configMapName, namespace) - continue - } - - // Attempt to get the pod in the current namespace - pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) - if err != nil { - log.Error().Err(err).Msgf("failed to get pod %s in namespace %s", podName, namespace) - continue - } - - nodeName := pod.Spec.NodeName - srcFilePath := filepath.Join("data", nodeName, srcDir) - - cmd := []string{"ls", srcFilePath} - req := clientset.CoreV1().RESTClient().Post(). - Resource("pods"). - Name(podName). - Namespace(namespace). - SubResource("exec"). - Param("container", "sniffer"). - Param("stdout", "true"). - Param("stderr", "true"). - Param("command", cmd[0]). - Param("command", cmd[1]) - - exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) - if err != nil { - log.Error().Err(err).Msgf("failed to initialize executor for pod %s in namespace %s", podName, namespace) - continue - } - - var stdoutBuf bytes.Buffer - var stderrBuf bytes.Buffer - - // Execute the command to list files - err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{ - Stdout: &stdoutBuf, - Stderr: &stderrBuf, - }) - if err != nil { - log.Error().Err(err).Msgf("error listing files in pod %s in namespace %s: %s", podName, namespace, stderrBuf.String()) - continue - } - - // Split the output (file names) into a list - files := strings.Split(strings.TrimSpace(stdoutBuf.String()), "\n") - if len(files) > 0 { - // Append the NamespaceFiles struct to the list - namespaceFilesList = append(namespaceFilesList, NamespaceFiles{ - Namespace: namespace, - SrcDir: srcDir, - Files: files, + for _, pod := range pods.Items { + podFileInfos = append(podFileInfos, &PodFileInfo{ + Pod: pod, }) } } - if len(namespaceFilesList) == 0 { - return nil, fmt.Errorf("no files found in pod %s across the provided namespaces", podName) - } - - return namespaceFilesList, nil + return podFileInfos, errors.Join(errs...) } -// copyFileFromPod copies a single file from a pod to a local destination -func copyFileFromPod(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, podName, namespace, srcDir, srcFile, destFile string) error { - // Get the pod to retrieve its node name - pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("failed to get pod %s in namespace %s: %w", podName, namespace, err) - } +// listFilesInPodDir lists all files in the specified directory inside the pod across multiple namespaces +func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, config *rest.Config, pod *PodFileInfo, cutoffTime *time.Time) error { + nodeName := pod.Pod.Spec.NodeName + srcFilePath := filepath.Join("data", nodeName, srcDir) - // Construct the complete path using /data, the node name, srcDir, and srcFile - nodeName := pod.Spec.NodeName - srcFilePath := filepath.Join("data", nodeName, srcDir, srcFile) - - // Execute the `cat` command to read the file at the srcFilePath - cmd := []string{"cat", srcFilePath} + cmd := []string{"ls", srcFilePath} req := clientset.CoreV1().RESTClient().Post(). Resource("pods"). - Name(podName). - Namespace(namespace). + Name(pod.Pod.Name). + Namespace(pod.Pod.Namespace). SubResource("exec"). Param("container", "sniffer"). Param("stdout", "true"). @@ -158,7 +83,81 @@ func copyFileFromPod(ctx context.Context, clientset *kubernetes.Clientset, confi exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) if err != nil { - return fmt.Errorf("failed to initialize executor for pod %s in namespace %s: %w", podName, namespace, err) + return err + } + + var stdoutBuf bytes.Buffer + var stderrBuf bytes.Buffer + + // Execute the command to list files + err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{ + Stdout: &stdoutBuf, + Stderr: &stderrBuf, + }) + if err != nil { + return err + } + + // Split the output (file names) into a list + files := strings.Split(strings.TrimSpace(stdoutBuf.String()), "\n") + if len(files) == 0 { + // No files were found in the target dir for this pod + return nil + } + + var filteredFiles []string + var fileProcessingErrs []error + // Filter files based on cutoff time if provided + for _, file := range files { + if cutoffTime != nil { + parts := strings.Split(file, "-") + if len(parts) < 2 { + continue + } + + timestampStr := parts[len(parts)-2] + parts[len(parts)-1][:6] // Extract YYYYMMDDHHMMSS + fileTime, err := time.Parse("20060102150405", timestampStr) + if err != nil { + fileProcessingErrs = append(fileProcessingErrs, fmt.Errorf("failed parse file timestamp %s: %w", file, err)) + continue + } + + if fileTime.Before(*cutoffTime) { + continue + } + } + // Add file to filtered list + filteredFiles = append(filteredFiles, file) + } + + pod.SrcDir = srcDir + pod.Files = filteredFiles + + return errors.Join(fileProcessingErrs...) +} + +// copyFileFromPod copies a single file from a pod to a local destination +func copyFileFromPod(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, pod *PodFileInfo, srcFile, destFile string) error { + // Construct the complete path using /data, the node name, srcDir, and srcFile + nodeName := pod.Pod.Spec.NodeName + srcFilePath := filepath.Join("data", nodeName, srcDir, srcFile) + + // Execute the `cat` command to read the file at the srcFilePath + cmd := []string{"cat", srcFilePath} + req := clientset.CoreV1().RESTClient().Post(). + Resource("pods"). + Name(pod.Pod.Name). + Namespace(pod.Pod.Namespace). + SubResource("exec"). + Param("container", "sniffer"). + Param("stdout", "true"). + Param("stderr", "true"). + Param("command", cmd[0]). + Param("command", cmd[1]) + + exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) + if err != nil { + return fmt.Errorf("failed to initialize executor for pod %s in namespace %s: %w", pod.Pod.Name, pod.Pod.Namespace, err) } // Create the local file to write the content to @@ -177,7 +176,7 @@ func copyFileFromPod(ctx context.Context, clientset *kubernetes.Clientset, confi Stderr: &stderrBuf, }) if err != nil { - return fmt.Errorf("error copying file from pod %s in namespace %s: %s", podName, namespace, stderrBuf.String()) + return err } return nil @@ -187,184 +186,186 @@ func mergePCAPs(outputFile string, inputFiles []string) error { // Create the output file f, err := os.Create(outputFile) if err != nil { - return err + return fmt.Errorf("failed to create output file: %w", err) } defer f.Close() - // Create a pcap writer for the output file - writer := pcapgo.NewWriter(f) - err = writer.WriteFileHeader(65536, layers.LinkTypeEthernet) // Snapshot length and LinkType + bufWriter := bufio.NewWriterSize(f, 4*1024*1024) + defer bufWriter.Flush() + + // Create the PCAP writer + writer := pcapgo.NewWriter(bufWriter) + err = writer.WriteFileHeader(maxSnaplen, 1) if err != nil { - return err + return fmt.Errorf("failed to write PCAP file header: %w", err) } + var mergingErrs []error + for _, inputFile := range inputFiles { - log.Info().Msgf("Merging %s int %s", inputFile, outputFile) - // Open each input file + // Open the input file file, err := os.Open(inputFile) if err != nil { - log.Error().Err(err).Msgf("Failed to open %v", inputFile) + mergingErrs = append(mergingErrs, fmt.Errorf("failed to open %s: %w", inputFile, err)) continue } - defer file.Close() + fileInfo, err := file.Stat() + if err != nil { + mergingErrs = append(mergingErrs, fmt.Errorf("failed to stat file %s: %w", inputFile, err)) + file.Close() + continue + } + + if fileInfo.Size() == 0 { + // Skip empty files + log.Debug().Msgf("Skipped empty file: %s", inputFile) + file.Close() + continue + } + + // Create the PCAP reader for the input file reader, err := pcapgo.NewReader(file) if err != nil { - log.Error().Err(err).Msgf("Failed to create pcapng reader for %v", file.Name()) + mergingErrs = append(mergingErrs, fmt.Errorf("failed to create pcapng reader for %v: %w", file.Name(), err)) + file.Close() continue } - // Create the packet source - packetSource := gopacket.NewPacketSource(reader, layers.LinkTypeEthernet) - - for packet := range packetSource.Packets() { - err := writer.WritePacket(packet.Metadata().CaptureInfo, packet.Data()) + for { + // Read packet data + data, ci, err := reader.ReadPacketData() if err != nil { - log.Error().Err(err).Msgf("Failed to write packet to %v", outputFile) - continue + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + break + } + mergingErrs = append(mergingErrs, fmt.Errorf("error reading packet from file %s: %w", file.Name(), err)) + break + } + + // Write the packet to the output file + err = writer.WritePacket(ci, data) + if err != nil { + log.Error().Err(err).Msgf("Error writing packet to output file") + mergingErrs = append(mergingErrs, fmt.Errorf("error writing packet to output file: %w", err)) + break } } + + file.Close() } + log.Debug().Err(errors.Join(mergingErrs...)) + return nil } -// setPcapConfigInKubernetes sets the PCAP config for all pods across multiple namespaces -func setPcapConfigInKubernetes(ctx context.Context, clientset *clientk8s.Clientset, podName string, namespaces []string, enabledPcap bool, timeInterval, maxTime, maxSize string) error { - for _, namespace := range namespaces { - // Load the existing ConfigMap in the current namespace - configMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, "kubeshark-config-map", metav1.GetOptions{}) - if err != nil { - log.Error().Err(err).Msgf("failed to get ConfigMap in namespace %s", namespace) - continue - } - - // Update the values with user-provided input - configMap.Data["PCAP_TIME_INTERVAL"] = timeInterval - configMap.Data["PCAP_MAX_SIZE"] = maxSize - configMap.Data["PCAP_MAX_TIME"] = maxTime - configMap.Data["PCAP_DUMP_ENABLE"] = strconv.FormatBool(enabledPcap) - - // Apply the updated ConfigMap back to the cluster in the current namespace - _, err = clientset.CoreV1().ConfigMaps(namespace).Update(ctx, configMap, metav1.UpdateOptions{}) - if err != nil { - log.Error().Err(err).Msgf("failed to update ConfigMap in namespace %s", namespace) - continue - } - } - - return nil -} - -// startPcap function for starting the PCAP capture -func startStopPcap(clientset *kubernetes.Clientset, pcapEnable bool, timeInterval, maxTime, maxSize string) error { - kubernetesProvider, err := getKubernetesProviderForCli(false, false) +func copyPcapFiles(clientset *kubernetes.Clientset, config *rest.Config, destDir string, cutoffTime *time.Time) error { + // List all namespaces + namespaceList, err := clientset.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) if err != nil { - log.Error().Err(err).Send() return err } - targetNamespaces := kubernetesProvider.GetNamespaces() + var targetNamespaces []string + for _, ns := range namespaceList.Items { + targetNamespaces = append(targetNamespaces, ns.Name) + } - // List worker pods + // List all worker pods workerPods, err := listWorkerPods(context.Background(), clientset, targetNamespaces) if err != nil { - log.Error().Err(err).Msg("Error listing worker pods") - return err - } - - // Iterate over each pod to start the PCAP capture by updating the configuration in Kubernetes - for _, pod := range workerPods { - err := setPcapConfigInKubernetes(context.Background(), clientset, pod.Name, targetNamespaces, pcapEnable, timeInterval, maxTime, maxSize) - if err != nil { - log.Error().Err(err).Msgf("Error setting PCAP config for pod %s", pod.Name) - continue + if len(workerPods) == 0 { + return err } - } - return nil -} - -// copyPcapFiles function for copying the PCAP files from the worker pods -func copyPcapFiles(clientset *kubernetes.Clientset, config *rest.Config, destDir string) error { - kubernetesProvider, err := getKubernetesProviderForCli(false, false) - if err != nil { - log.Error().Err(err).Send() - return err + log.Debug().Err(err).Msg("error while listing worker pods") } - targetNamespaces := kubernetesProvider.GetNamespaces() + var wg sync.WaitGroup - // List worker pods - workerPods, err := listWorkerPods(context.Background(), clientset, targetNamespaces) - if err != nil { - log.Error().Err(err).Msg("Error listing worker pods") - return err - } - var currentFiles []string - - // Iterate over each pod to get the PCAP directory from config and copy files + // Launch a goroutine for each pod for _, pod := range workerPods { - // Get the list of NamespaceFiles (files per namespace) and their source directories - namespaceFiles, err := listFilesInPodDir(context.Background(), clientset, config, pod.Name, targetNamespaces, SELF_RESOURCES_PREFIX+SUFFIX_CONFIG_MAP, "PCAP_SRC_DIR") - if err != nil { - log.Error().Err(err).Msgf("Error listing files in pod %s", pod.Name) - continue - } + wg.Add(1) - // Copy each file from the pod to the local destination for each namespace - for _, nsFiles := range namespaceFiles { - for _, file := range nsFiles.Files { + go func(pod *PodFileInfo) { + defer wg.Done() + + // List files for the current pod + err := listFilesInPodDir(context.Background(), clientset, config, pod, cutoffTime) + if err != nil { + log.Debug().Err(err).Msgf("error listing files in pod %s", pod.Pod.Name) + return + } + + // Copy files from the pod + for _, file := range pod.Files { destFile := filepath.Join(destDir, file) - // Pass the correct namespace and related details to the function - err = copyFileFromPod(context.Background(), clientset, config, pod.Name, nsFiles.Namespace, nsFiles.SrcDir, file, destFile) + // Add a timeout context for file copy + ctx, cancel := context.WithTimeout(context.Background(), maxTimePerFile) + err := copyFileFromPod(ctx, clientset, config, pod, file, destFile) + cancel() if err != nil { - log.Error().Err(err).Msgf("Error copying file from pod %s in namespace %s", pod.Name, nsFiles.Namespace) - } else { - log.Info().Msgf("Copied %s from %s to %s", file, pod.Name, destFile) + log.Debug().Err(err).Msgf("error copying file %s from pod %s in namespace %s", file, pod.Pod.Name, pod.Pod.Namespace) + continue } - currentFiles = append(currentFiles, destFile) + log.Info().Msgf("Copied file %s from pod %s to %s", file, pod.Pod.Name, destFile) + pod.CopiedFiles = append(pod.CopiedFiles, destFile) } - } - + }(pod) } - if len(currentFiles) == 0 { - log.Error().Msgf("No files to merge") + // Wait for all goroutines to complete + wg.Wait() + + var copiedFiles []string + for _, pod := range workerPods { + copiedFiles = append(copiedFiles, pod.CopiedFiles...) + } + + if len(copiedFiles) == 0 { + log.Info().Msg("No pcaps available to copy on the workers") return nil - // continue } - // Generate a temporary filename based on the first file - tempMergedFile := currentFiles[0] + "_temp" + // Generate a temporary filename for the merged file + tempMergedFile := copiedFiles[0] + "_temp" - // Merge the PCAPs into the temporary file - err = mergePCAPs(tempMergedFile, currentFiles) + // Merge PCAP files + err = mergePCAPs(tempMergedFile, copiedFiles) if err != nil { - log.Error().Err(err).Msgf("Error merging files") - return err - // continue + os.Remove(tempMergedFile) + return fmt.Errorf("error merging files: %w", err) } // Remove the original files after merging - for _, file := range currentFiles { - err := os.Remove(file) - if err != nil { - log.Error().Err(err).Msgf("Error removing file %s", file) + for _, file := range copiedFiles { + if err = os.Remove(file); err != nil { + log.Debug().Err(err).Msgf("error removing file %s", file) } } - // Rename the temp file to the final name (removing "_temp") - finalMergedFile := strings.TrimSuffix(tempMergedFile, "_temp") + clusterID, err := getClusterID(clientset) + if err != nil { + return fmt.Errorf("failed to get cluster ID: %w", err) + } + timestamp := time.Now().Format("2006-01-02_15-04") + // Rename the temp file to the final name + finalMergedFile := filepath.Join(destDir, fmt.Sprintf("%s-%s.pcap", clusterID, timestamp)) err = os.Rename(tempMergedFile, finalMergedFile) if err != nil { - log.Error().Err(err).Msgf("Error renaming merged file %s", tempMergedFile) - // continue return err } log.Info().Msgf("Merged file created: %s", finalMergedFile) - return nil } + +func getClusterID(clientset *kubernetes.Clientset) (string, error) { + namespace, err := clientset.CoreV1().Namespaces().Get(context.TODO(), "kube-system", metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("failed to get kube-system namespace UID: %w", err) + } + return string(namespace.UID), nil +} diff --git a/cmd/proxyRunner.go b/cmd/proxyRunner.go index 0b7804b92..2fa5e27af 100644 --- a/cmd/proxyRunner.go +++ b/cmd/proxyRunner.go @@ -92,13 +92,6 @@ func runProxy(block bool, noBrowser bool) { establishedProxy = true okToOpen("Kubeshark", frontUrl, noBrowser) } - if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts { - watchScripts(kubernetesProvider, false) - } - - if config.Config.Scripting.Console { - go runConsoleWithoutProxy() - } if establishedProxy && block { utils.WaitForTermination(ctx, cancel) } diff --git a/cmd/scripts.go b/cmd/scripts.go index 406b3a015..8959511fa 100644 --- a/cmd/scripts.go +++ b/cmd/scripts.go @@ -3,7 +3,12 @@ package cmd import ( "context" "encoding/json" + "errors" + "os" + "os/signal" "strings" + "sync" + "time" "github.com/creasty/defaults" "github.com/fsnotify/fsnotify" @@ -11,14 +16,16 @@ import ( "github.com/kubeshark/kubeshark/config/configStructs" "github.com/kubeshark/kubeshark/kubernetes" "github.com/kubeshark/kubeshark/misc" - "github.com/kubeshark/kubeshark/utils" "github.com/rs/zerolog/log" "github.com/spf13/cobra" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" ) var scriptsCmd = &cobra.Command{ Use: "scripts", - Short: "Watch the `scripting.source` directory for changes and update the scripts", + Short: "Watch the `scripting.source` and/or `scripting.sources` folders for changes and update the scripts", RunE: func(cmd *cobra.Command, args []string) error { runScripts() return nil @@ -39,8 +46,8 @@ func init() { } func runScripts() { - if config.Config.Scripting.Source == "" { - log.Error().Msg("`scripting.source` field is empty.") + if config.Config.Scripting.Source == "" && len(config.Config.Scripting.Sources) == 0 { + log.Error().Msg("Both `scripting.source` and `scripting.sources` fields are empty.") return } @@ -50,44 +57,82 @@ func runScripts() { return } - watchScripts(kubernetesProvider, true) + var wg sync.WaitGroup + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, os.Interrupt) + + wg.Add(1) + go func() { + defer wg.Done() + watchConfigMap(ctx, kubernetesProvider) + }() + + wg.Add(1) + go func() { + defer wg.Done() + watchScripts(ctx, kubernetesProvider, true) + }() + + go func() { + <-signalChan + log.Debug().Msg("Received interrupt, stopping watchers.") + cancel() + }() + + wg.Wait() + } func createScript(provider *kubernetes.Provider, script misc.ConfigMapScript) (index int64, err error) { + const maxRetries = 5 var scripts map[int64]misc.ConfigMapScript - scripts, err = kubernetes.ConfigGetScripts(provider) - if err != nil { - return - } - // Turn it into updateScript if there is a script with the same title - var setScript bool - if script.Title != "New Script" { - for i, v := range scripts { - if v.Title == script.Title { - scripts[i] = script - setScript = true + for i := 0; i < maxRetries; i++ { + scripts, err = kubernetes.ConfigGetScripts(provider) + if err != nil { + return + } + script.Active = kubernetes.IsActiveScript(provider, script.Title) + index = 0 + if script.Title != "New Script" { + for i, v := range scripts { + if index <= i { + index = i + 1 + } + if v.Title == script.Title { + index = int64(i) + } } } - } - - if !setScript { - index = int64(len(scripts)) scripts[index] = script + + log.Info().Str("title", script.Title).Bool("Active", script.Active).Int64("Index", index).Msg("Creating script") + var data []byte + data, err = json.Marshal(scripts) + if err != nil { + return + } + + _, err = kubernetes.SetConfig(provider, kubernetes.CONFIG_SCRIPTING_SCRIPTS, string(data)) + if err == nil { + return index, nil + } + + if k8serrors.IsConflict(err) { + log.Warn().Err(err).Msg("Conflict detected, retrying update...") + time.Sleep(500 * time.Millisecond) + continue + } + + return 0, err } - var data []byte - data, err = json.Marshal(scripts) - if err != nil { - return - } - - _, err = kubernetes.SetConfig(provider, kubernetes.CONFIG_SCRIPTING_SCRIPTS, string(data)) - if err != nil { - return - } - - return + log.Error().Msg("Max retries reached for creating script due to conflicts.") + return 0, errors.New("max retries reached due to conflicts while creating script") } func updateScript(provider *kubernetes.Provider, index int64, script misc.ConfigMapScript) (err error) { @@ -96,7 +141,7 @@ func updateScript(provider *kubernetes.Provider, index int64, script misc.Config if err != nil { return } - + script.Active = kubernetes.IsActiveScript(provider, script.Title) scripts[index] = script var data []byte @@ -119,7 +164,10 @@ func deleteScript(provider *kubernetes.Provider, index int64) (err error) { if err != nil { return } - + err = kubernetes.DeleteActiveScriptByTitle(provider, scripts[index].Title) + if err != nil { + return + } delete(scripts, index) var data []byte @@ -136,7 +184,7 @@ func deleteScript(provider *kubernetes.Provider, index int64) (err error) { return } -func watchScripts(provider *kubernetes.Provider, block bool) { +func watchScripts(ctx context.Context, provider *kubernetes.Provider, block bool) { files := make(map[string]int64) scripts, err := config.Config.Scripting.GetScripts() @@ -149,7 +197,7 @@ func watchScripts(provider *kubernetes.Provider, block bool) { index, err := createScript(provider, script.ConfigMap()) if err != nil { log.Error().Err(err).Send() - return + continue } files[script.Path] = index @@ -164,9 +212,31 @@ func watchScripts(provider *kubernetes.Provider, block bool) { defer watcher.Close() } + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + signalChan := make(chan os.Signal, 1) + signal.Notify(signalChan, os.Interrupt) + + go func() { + <-signalChan + log.Debug().Msg("Received interrupt, stopping script watch.") + cancel() + watcher.Close() + }() + + if err := watcher.Add(config.Config.Scripting.Source); err != nil { + log.Error().Err(err).Msg("Failed to add scripting source to watcher") + return + } + go func() { for { select { + case <-ctx.Done(): + log.Debug().Msg("Script watcher exiting gracefully.") + return + // watch for events case event := <-watcher.Events: if !strings.HasSuffix(event.Name, "js") { @@ -215,9 +285,12 @@ func watchScripts(provider *kubernetes.Provider, block bool) { // pass } - // watch for errors - case err := <-watcher.Errors: - log.Error().Err(err).Send() + case err, ok := <-watcher.Errors: + if !ok { + log.Info().Msg("Watcher errors channel closed.") + return + } + log.Error().Err(err).Msg("Watcher error encountered") } } }() @@ -226,11 +299,79 @@ func watchScripts(provider *kubernetes.Provider, block bool) { log.Error().Err(err).Send() } - log.Info().Str("directory", config.Config.Scripting.Source).Msg("Watching scripts against changes:") + for _, source := range config.Config.Scripting.Sources { + if err := watcher.Add(source); err != nil { + log.Error().Err(err).Send() + } + } + + log.Info().Str("folder", config.Config.Scripting.Source).Interface("folders", config.Config.Scripting.Sources).Msg("Watching scripts against changes:") if block { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - utils.WaitForTermination(ctx, cancel) + <-ctx.Done() } } + +func watchConfigMap(ctx context.Context, provider *kubernetes.Provider) { + clientset := provider.GetClientSet() + configMapName := kubernetes.SELF_RESOURCES_PREFIX + kubernetes.SUFFIX_CONFIG_MAP + + for { + select { + case <-ctx.Done(): + log.Info().Msg("ConfigMap watcher exiting gracefully.") + return + + default: + watcher, err := clientset.CoreV1().ConfigMaps(config.Config.Tap.Release.Namespace).Watch(context.TODO(), metav1.ListOptions{ + FieldSelector: "metadata.name=" + configMapName, + }) + if err != nil { + log.Warn().Err(err).Msg("ConfigMap not found, retrying in 5 seconds...") + time.Sleep(5 * time.Second) + continue + } + + for event := range watcher.ResultChan() { + select { + case <-ctx.Done(): + log.Info().Msg("ConfigMap watcher loop exiting gracefully.") + watcher.Stop() + return + + default: + if event.Type == watch.Added { + log.Info().Msg("ConfigMap created or modified") + runScriptsSync(provider) + } else if event.Type == watch.Deleted { + log.Warn().Msg("ConfigMap deleted, waiting for recreation...") + watcher.Stop() + break + } + } + } + + time.Sleep(5 * time.Second) + } + } +} + +func runScriptsSync(provider *kubernetes.Provider) { + files := make(map[string]int64) + + scripts, err := config.Config.Scripting.GetScripts() + if err != nil { + log.Error().Err(err).Send() + return + } + + for _, script := range scripts { + index, err := createScript(provider, script.ConfigMap()) + if err != nil { + log.Error().Err(err).Send() + continue + } + files[script.Path] = index + } + log.Info().Msg("Synchronized scripts with ConfigMap.") +} diff --git a/cmd/tap.go b/cmd/tap.go index dcc8f8662..e4a9101b5 100644 --- a/cmd/tap.go +++ b/cmd/tap.go @@ -58,7 +58,6 @@ func init() { tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them") tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS") tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries") - tapCmd.Flags().Bool(configStructs.IgnoreTaintedLabel, defaultTapConfig.IgnoreTainted, "Ignore tainted pods while running Worker DaemonSet") tapCmd.Flags().Bool(configStructs.IngressEnabledLabel, defaultTapConfig.Ingress.Enabled, "Enable Ingress") tapCmd.Flags().Bool(configStructs.TelemetryEnabledLabel, defaultTapConfig.Telemetry.Enabled, "Enable/disable Telemetry") tapCmd.Flags().Bool(configStructs.ResourceGuardEnabledLabel, defaultTapConfig.ResourceGuard.Enabled, "Enable/disable resource guard") diff --git a/cmd/tapRunner.go b/cmd/tapRunner.go index 7f2c0f684..f4f46aae9 100644 --- a/cmd/tapRunner.go +++ b/cmd/tapRunner.go @@ -424,8 +424,9 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid time.Sleep(100 * time.Millisecond) } - if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts { - watchScripts(kubernetesProvider, false) + + if (config.Config.Scripting.Source != "" || len(config.Config.Scripting.Sources) > 0) && config.Config.Scripting.WatchScripts { + watchScripts(ctx, kubernetesProvider, false) } if config.Config.Scripting.Console { diff --git a/config/config.go b/config/config.go index 8a37dffec..953b5e7a0 100644 --- a/config/config.go +++ b/config/config.go @@ -63,6 +63,9 @@ func InitConfig(cmd *cobra.Command) error { Config = CreateDefaultConfig() Config.Tap.Debug = DebugMode + if DebugMode { + Config.LogLevel = "debug" + } cmdName = cmd.Name() if utils.Contains([]string{ "clean", diff --git a/config/configStruct.go b/config/configStruct.go index 3993c6f9a..2ee6bf8e8 100644 --- a/config/configStruct.go +++ b/config/configStruct.go @@ -16,46 +16,78 @@ const ( func CreateDefaultConfig() ConfigStruct { return ConfigStruct{ Tap: configStructs.TapConfig{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: "kubernetes.io/os", - Operator: v1.NodeSelectorOpIn, - Values: []string{"linux"}, + NodeSelectorTerms: configStructs.NodeSelectorTermsConfig{ + Workers: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/os", + Operator: v1.NodeSelectorOpIn, + Values: []string{"linux"}, + }, + }, + }, + }, + Hub: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/os", + Operator: v1.NodeSelectorOpIn, + Values: []string{"linux"}, + }, + }, + }, + }, + Front: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "kubernetes.io/os", + Operator: v1.NodeSelectorOpIn, + Values: []string{"linux"}, + }, }, }, }, }, - Capabilities: configStructs.CapabilitiesConfig{ - NetworkCapture: []string{ - // NET_RAW is required to listen the network traffic - "NET_RAW", - // NET_ADMIN is required to listen the network traffic - "NET_ADMIN", + Tolerations: configStructs.TolerationsConfig{ + Workers: []v1.Toleration{ + { + Effect: v1.TaintEffect("NoExecute"), + Operator: v1.TolerationOpExists, + }, }, - ServiceMeshCapture: []string{ - // SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8) - "SYS_ADMIN", - // SYS_PTRACE is required to set netns to other process + to open libssl.so of other process - "SYS_PTRACE", - // DAC_OVERRIDE is required to read /proc/PID/environ - "DAC_OVERRIDE", - }, - KernelModule: []string{ - // SYS_MODULE is required to install kernel modules - "SYS_MODULE", - }, - EBPFCapture: []string{ - // SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8) - "SYS_ADMIN", - // SYS_PTRACE is required to set netns to other process + to open libssl.so of other process - "SYS_PTRACE", - // SYS_RESOURCE is required to change rlimits for eBPF - "SYS_RESOURCE", - // IPC_LOCK is required for ebpf perf buffers allocations after some amount of size buffer size: - // https://github.com/kubeshark/tracer/blob/13e24725ba8b98216dd0e553262e6d9c56dce5fa/main.go#L82) - "IPC_LOCK", + }, + SecurityContext: configStructs.SecurityContextConfig{ + Privileged: true, + // Capabilities used only when running in unprivileged mode + Capabilities: configStructs.CapabilitiesConfig{ + NetworkCapture: []string{ + // NET_RAW is required to listen the network traffic + "NET_RAW", + // NET_ADMIN is required to listen the network traffic + "NET_ADMIN", + }, + ServiceMeshCapture: []string{ + // SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8) + "SYS_ADMIN", + // SYS_PTRACE is required to set netns to other process + to open libssl.so of other process + "SYS_PTRACE", + // DAC_OVERRIDE is required to read /proc/PID/environ + "DAC_OVERRIDE", + }, + EBPFCapture: []string{ + // SYS_ADMIN is required to read /proc/PID/net/ns + to install eBPF programs (kernel < 5.8) + "SYS_ADMIN", + // SYS_PTRACE is required to set netns to other process + to open libssl.so of other process + "SYS_PTRACE", + // SYS_RESOURCE is required to change rlimits for eBPF + "SYS_RESOURCE", + // IPC_LOCK is required for ebpf perf buffers allocations after some amount of size buffer size: + // https://github.com/kubeshark/tracer/blob/13e24725ba8b98216dd0e553262e6d9c56dce5fa/main.go#L82) + "IPC_LOCK", + }, }, }, Auth: configStructs.AuthConfig{ @@ -63,9 +95,14 @@ func CreateDefaultConfig() ConfigStruct { RoleAttribute: "role", Roles: map[string]configStructs.Role{ "admin": { - Filter: "", - CanDownloadPCAP: true, - CanUseScripting: true, + Filter: "", + CanDownloadPCAP: true, + CanUseScripting: true, + ScriptingPermissions: configStructs.ScriptingPermissions{ + CanSave: true, + CanActivate: true, + CanDelete: true, + }, CanUpdateTargetedPods: true, CanStopTrafficCapturing: true, ShowAdminConsoleLink: true, @@ -82,10 +119,21 @@ func CreateDefaultConfig() ConfigStruct { "redis", "sctp", "syscall", - "tcp", - "udp", + // "tcp", + // "udp", "ws", - "tls", + // "tlsx", + "ldap", + "radius", + "diameter", + }, + PortMapping: configStructs.PortMapping{ + HTTP: []uint16{80, 443, 8080}, + AMQP: []uint16{5671, 5672}, + KAFKA: []uint16{9092}, + REDIS: []uint16{6379}, + LDAP: []uint16{389}, + DIAMETER: []uint16{3868}, }, }, } @@ -101,22 +149,24 @@ type ManifestsConfig struct { } type ConfigStruct struct { - Tap configStructs.TapConfig `yaml:"tap" json:"tap"` - Logs configStructs.LogsConfig `yaml:"logs" json:"logs"` - Config configStructs.ConfigConfig `yaml:"config,omitempty" json:"config,omitempty"` - PcapDump configStructs.PcapDumpConfig `yaml:"pcapdump" json:"pcapdump"` - Kube KubeConfig `yaml:"kube" json:"kube"` - DumpLogs bool `yaml:"dumpLogs" json:"dumpLogs" default:"false"` - HeadlessMode bool `yaml:"headless" json:"headless" default:"false"` - License string `yaml:"license" json:"license" default:""` - CloudLicenseEnabled bool `yaml:"cloudLicenseEnabled" json:"cloudLicenseEnabled" default:"true"` - SupportChatEnabled bool `yaml:"supportChatEnabled" json:"supportChatEnabled" default:"true"` - ServiceMapNewUiEnabled bool `yaml:"serviceMapNewUiEnabled" json:"serviceMapNewUiEnabled" default:"false"` - InternetConnectivity bool `yaml:"internetConnectivity" json:"internetConnectivity" default:"true"` - DissectorsUpdatingEnabled bool `yaml:"dissectorsUpdatingEnabled" json:"dissectorsUpdatingEnabled" default:"true"` - Scripting configStructs.ScriptingConfig `yaml:"scripting" json:"scripting"` - Manifests ManifestsConfig `yaml:"manifests,omitempty" json:"manifests,omitempty"` - Timezone string `yaml:"timezone" json:"timezone"` + Tap configStructs.TapConfig `yaml:"tap" json:"tap"` + Logs configStructs.LogsConfig `yaml:"logs" json:"logs"` + Config configStructs.ConfigConfig `yaml:"config,omitempty" json:"config,omitempty"` + PcapDump configStructs.PcapDumpConfig `yaml:"pcapdump" json:"pcapdump"` + Kube KubeConfig `yaml:"kube" json:"kube"` + DumpLogs bool `yaml:"dumpLogs" json:"dumpLogs" default:"false"` + HeadlessMode bool `yaml:"headless" json:"headless" default:"false"` + License string `yaml:"license" json:"license" default:""` + CloudLicenseEnabled bool `yaml:"cloudLicenseEnabled" json:"cloudLicenseEnabled" default:"true"` + AiAssistantEnabled bool `yaml:"aiAssistantEnabled" json:"aiAssistantEnabled" default:"false"` + DemoModeEnabled bool `yaml:"demoModeEnabled" json:"demoModeEnabled" default:"false"` + SupportChatEnabled bool `yaml:"supportChatEnabled" json:"supportChatEnabled" default:"true"` + ServiceMapNewUiEnabled bool `yaml:"serviceMapNewUiEnabled" json:"serviceMapNewUiEnabled" default:"false"` + InternetConnectivity bool `yaml:"internetConnectivity" json:"internetConnectivity" default:"true"` + Scripting configStructs.ScriptingConfig `yaml:"scripting" json:"scripting"` + Manifests ManifestsConfig `yaml:"manifests,omitempty" json:"manifests,omitempty"` + Timezone string `yaml:"timezone" json:"timezone"` + LogLevel string `yaml:"logLevel" json:"logLevel" default:"warning"` } func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy { diff --git a/config/configStructs/scriptingConfig.go b/config/configStructs/scriptingConfig.go index 9dd6265d8..0cc044ec3 100644 --- a/config/configStructs/scriptingConfig.go +++ b/config/configStructs/scriptingConfig.go @@ -1,6 +1,7 @@ package configStructs import ( + "fmt" "io/fs" "os" "path/filepath" @@ -13,40 +14,79 @@ import ( type ScriptingConfig struct { Env map[string]interface{} `yaml:"env" json:"env" default:"{}"` Source string `yaml:"source" json:"source" default:""` + Sources []string `yaml:"sources" json:"sources" default:"[]"` WatchScripts bool `yaml:"watchScripts" json:"watchScripts" default:"true"` + Active []string `yaml:"active" json:"active" default:"[]"` Console bool `yaml:"console" json:"console" default:"true"` } func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error) { - if config.Source == "" { - return + // Check if both Source and Sources are empty + if config.Source == "" && len(config.Sources) == 0 { + return nil, nil } - var files []fs.DirEntry - files, err = os.ReadDir(config.Source) - if err != nil { - return + var allFiles []struct { + Source string + File fs.DirEntry } - for _, f := range files { - if f.IsDir() { + // Handle single Source directory + if config.Source != "" { + files, err := os.ReadDir(config.Source) + if err != nil { + return nil, fmt.Errorf("failed to read directory %s: %v", config.Source, err) + } + for _, file := range files { + allFiles = append(allFiles, struct { + Source string + File fs.DirEntry + }{Source: config.Source, File: file}) + } + } + + // Handle multiple Sources directories + if len(config.Sources) > 0 { + for _, source := range config.Sources { + files, err := os.ReadDir(source) + if err != nil { + return nil, fmt.Errorf("failed to read directory %s: %v", source, err) + } + for _, file := range files { + allFiles = append(allFiles, struct { + Source string + File fs.DirEntry + }{Source: source, File: file}) + } + } + } + + // Iterate over all collected files + for _, f := range allFiles { + if f.File.IsDir() { continue } - var script *misc.Script - path := filepath.Join(config.Source, f.Name()) - if !strings.HasSuffix(path, ".js") { + // Construct the full path based on the relevant source directory + path := filepath.Join(f.Source, f.File.Name()) + if !strings.HasSuffix(f.File.Name(), ".js") { // Use file name suffix for skipping non-JS files log.Info().Str("path", path).Msg("Skipping non-JS file") continue } + + // Read the script file + var script *misc.Script script, err = misc.ReadScriptFile(path) if err != nil { - return + return nil, fmt.Errorf("failed to read script file %s: %v", path, err) } + + // Append the valid script to the scripts slice scripts = append(scripts, script) log.Debug().Str("path", path).Msg("Found script:") } - return + // Return the collected scripts and nil error if successful + return scripts, nil } diff --git a/config/configStructs/tapConfig.go b/config/configStructs/tapConfig.go index 2ccf972c4..456f165a5 100644 --- a/config/configStructs/tapConfig.go +++ b/config/configStructs/tapConfig.go @@ -35,24 +35,25 @@ const ( PprofPortLabel = "pprof-port" PprofViewLabel = "pprof-view" DebugLabel = "debug" - ContainerPort = 80 - ContainerPortStr = "80" + ContainerPort = 8080 + ContainerPortStr = "8080" PcapDest = "dest" PcapMaxSize = "maxSize" PcapMaxTime = "maxTime" PcapTimeInterval = "timeInterval" PcapKubeconfig = "kubeconfig" PcapDumpEnabled = "enabled" + PcapTime = "time" ) type ResourceLimitsHub struct { - CPU string `yaml:"cpu" json:"cpu" default:"1000m"` - Memory string `yaml:"memory" json:"memory" default:"1500Mi"` + CPU string `yaml:"cpu" json:"cpu" default:"0"` + Memory string `yaml:"memory" json:"memory" default:"5Gi"` } type ResourceLimitsWorker struct { - CPU string `yaml:"cpu" json:"cpu" default:"1200m"` - Memory string `yaml:"memory" json:"memory" default:"2000Mi"` + CPU string `yaml:"cpu" json:"cpu" default:"0"` + Memory string `yaml:"memory" json:"memory" default:"3Gi"` } type ResourceRequests struct { @@ -71,7 +72,7 @@ type ResourceRequirementsWorker struct { } type WorkerConfig struct { - SrvPort uint16 `yaml:"srvPort" json:"srvPort" default:"30001"` + SrvPort uint16 `yaml:"srvPort" json:"srvPort" default:"48999"` } type HubConfig struct { @@ -89,6 +90,11 @@ type ProxyConfig struct { Host string `yaml:"host" json:"host" default:"127.0.0.1"` } +type OverrideImageConfig struct { + Worker string `yaml:"worker" json:"worker"` + Hub string `yaml:"hub" json:"hub"` + Front string `yaml:"front" json:"front"` +} type OverrideTagConfig struct { Worker string `yaml:"worker" json:"worker"` Hub string `yaml:"hub" json:"hub"` @@ -96,12 +102,24 @@ type OverrideTagConfig struct { } type DockerConfig struct { - Registry string `yaml:"registry" json:"registry" default:"docker.io/kubeshark"` - Tag string `yaml:"tag" json:"tag" default:""` - TagLocked bool `yaml:"tagLocked" json:"tagLocked" default:"true"` - ImagePullPolicy string `yaml:"imagePullPolicy" json:"imagePullPolicy" default:"Always"` - ImagePullSecrets []string `yaml:"imagePullSecrets" json:"imagePullSecrets"` - OverrideTag OverrideTagConfig `yaml:"overrideTag" json:"overrideTag"` + Registry string `yaml:"registry" json:"registry" default:"docker.io/kubeshark"` + Tag string `yaml:"tag" json:"tag" default:""` + TagLocked bool `yaml:"tagLocked" json:"tagLocked" default:"true"` + ImagePullPolicy string `yaml:"imagePullPolicy" json:"imagePullPolicy" default:"Always"` + ImagePullSecrets []string `yaml:"imagePullSecrets" json:"imagePullSecrets"` + OverrideImage OverrideImageConfig `yaml:"overrideImage" json:"overrideImage"` + OverrideTag OverrideTagConfig `yaml:"overrideTag" json:"overrideTag"` +} + +type DnsConfig struct { + Nameservers []string `yaml:"nameservers" json:"nameservers" default:"[]"` + Searches []string `yaml:"searches" json:"searches" default:"[]"` + Options []DnsConfigOption `yaml:"options" json:"options" default:"[]"` +} + +type DnsConfigOption struct { + Name string `yaml:"name" json:"name"` + Value string `yaml:"value" json:"value"` } type ResourcesConfig struct { @@ -110,13 +128,44 @@ type ResourcesConfig struct { Tracer ResourceRequirementsWorker `yaml:"tracer" json:"tracer"` } +type ProbesConfig struct { + Hub ProbeConfig `yaml:"hub" json:"hub"` + Sniffer ProbeConfig `yaml:"sniffer" json:"sniffer"` +} + +type NodeSelectorTermsConfig struct { + Hub []v1.NodeSelectorTerm `yaml:"hub" json:"hub" default:"[]"` + Workers []v1.NodeSelectorTerm `yaml:"workers" json:"workers" default:"[]"` + Front []v1.NodeSelectorTerm `yaml:"front" json:"front" default:"[]"` +} + +type TolerationsConfig struct { + Hub []v1.Toleration `yaml:"hub" json:"hub" default:"[]"` + Workers []v1.Toleration `yaml:"workers" json:"workers" default:"[]"` + Front []v1.Toleration `yaml:"front" json:"front" default:"[]"` +} + +type ProbeConfig struct { + InitialDelaySeconds int `yaml:"initialDelaySeconds" json:"initialDelaySeconds" default:"15"` + PeriodSeconds int `yaml:"periodSeconds" json:"periodSeconds" default:"10"` + SuccessThreshold int `yaml:"successThreshold" json:"successThreshold" default:"1"` + FailureThreshold int `yaml:"failureThreshold" json:"failureThreshold" default:"3"` +} + +type ScriptingPermissions struct { + CanSave bool `yaml:"canSave" json:"canSave" default:"true"` + CanActivate bool `yaml:"canActivate" json:"canActivate" default:"true"` + CanDelete bool `yaml:"canDelete" json:"canDelete" default:"true"` +} + type Role struct { - Filter string `yaml:"filter" json:"filter" default:""` - CanDownloadPCAP bool `yaml:"canDownloadPCAP" json:"canDownloadPCAP" default:"false"` - CanUseScripting bool `yaml:"canUseScripting" json:"canUseScripting" default:"false"` - CanUpdateTargetedPods bool `yaml:"canUpdateTargetedPods" json:"canUpdateTargetedPods" default:"false"` - CanStopTrafficCapturing bool `yaml:"canStopTrafficCapturing" json:"canStopTrafficCapturing" default:"false"` - ShowAdminConsoleLink bool `yaml:"showAdminConsoleLink" json:"showAdminConsoleLink" default:"false"` + Filter string `yaml:"filter" json:"filter" default:""` + CanDownloadPCAP bool `yaml:"canDownloadPCAP" json:"canDownloadPCAP" default:"false"` + CanUseScripting bool `yaml:"canUseScripting" json:"canUseScripting" default:"false"` + ScriptingPermissions ScriptingPermissions `yaml:"scriptingPermissions" json:"scriptingPermissions"` + CanUpdateTargetedPods bool `yaml:"canUpdateTargetedPods" json:"canUpdateTargetedPods" default:"false"` + CanStopTrafficCapturing bool `yaml:"canStopTrafficCapturing" json:"canStopTrafficCapturing" default:"false"` + ShowAdminConsoleLink bool `yaml:"showAdminConsoleLink" json:"showAdminConsoleLink" default:"false"` } type SamlConfig struct { @@ -163,16 +212,9 @@ type SentryConfig struct { type CapabilitiesConfig struct { NetworkCapture []string `yaml:"networkCapture" json:"networkCapture" default:"[]"` ServiceMeshCapture []string `yaml:"serviceMeshCapture" json:"serviceMeshCapture" default:"[]"` - KernelModule []string `yaml:"kernelModule" json:"kernelModule" default:"[]"` EBPFCapture []string `yaml:"ebpfCapture" json:"ebpfCapture" default:"[]"` } -type KernelModuleConfig struct { - Enabled bool `yaml:"enabled" json:"enabled" default:"false"` - Image string `yaml:"image" json:"image" default:"kubeshark/pf-ring-module:all"` - UnloadOnDestroy bool `yaml:"unloadOnDestroy" json:"unloadOnDestroy" default:"false"` -} - type MetricsConfig struct { Port uint16 `yaml:"port" json:"port" default:"49100"` } @@ -201,53 +243,83 @@ type PcapDumpConfig struct { PcapTimeInterval string `yaml:"timeInterval" json:"timeInterval" default:"1m"` PcapMaxTime string `yaml:"maxTime" json:"maxTime" default:"1h"` PcapMaxSize string `yaml:"maxSize" json:"maxSize" default:"500MB"` - PcapSrcDir string `yaml:"pcapSrcDir" json:"pcapSrcDir" default:"pcapdump"` + PcapTime string `yaml:"time" json:"time" default:"time"` + PcapDebug bool `yaml:"debug" json:"debug" default:"false"` + PcapDest string `yaml:"dest" json:"dest" default:""` +} + +type PortMapping struct { + HTTP []uint16 `yaml:"http" json:"http"` + AMQP []uint16 `yaml:"amqp" json:"amqp"` + KAFKA []uint16 `yaml:"kafka" json:"kafka"` + REDIS []uint16 `yaml:"redis" json:"redis"` + LDAP []uint16 `yaml:"ldap" json:"ldap"` + DIAMETER []uint16 `yaml:"diameter" json:"diameter"` +} + +type SecurityContextConfig struct { + Privileged bool `yaml:"privileged" json:"privileged" default:"true"` + AppArmorProfile AppArmorProfileConfig `yaml:"appArmorProfile" json:"appArmorProfile"` + SeLinuxOptions SeLinuxOptionsConfig `yaml:"seLinuxOptions" json:"seLinuxOptions"` + Capabilities CapabilitiesConfig `yaml:"capabilities" json:"capabilities"` +} + +type AppArmorProfileConfig struct { + Type string `yaml:"type" json:"type"` + LocalhostProfile string `yaml:"localhostProfile" json:"localhostProfile"` +} + +type SeLinuxOptionsConfig struct { + Level string `yaml:"level" json:"level"` + Role string `yaml:"role" json:"role"` + Type string `yaml:"type" json:"type"` + User string `yaml:"user" json:"user"` } type TapConfig struct { - Docker DockerConfig `yaml:"docker" json:"docker"` - Proxy ProxyConfig `yaml:"proxy" json:"proxy"` - PodRegexStr string `yaml:"regex" json:"regex" default:".*"` - Namespaces []string `yaml:"namespaces" json:"namespaces" default:"[]"` - ExcludedNamespaces []string `yaml:"excludedNamespaces" json:"excludedNamespaces" default:"[]"` - BpfOverride string `yaml:"bpfOverride" json:"bpfOverride" default:""` - Stopped bool `yaml:"stopped" json:"stopped" default:"true"` - Release ReleaseConfig `yaml:"release" json:"release"` - PersistentStorage bool `yaml:"persistentStorage" json:"persistentStorage" default:"false"` - PersistentStorageStatic bool `yaml:"persistentStorageStatic" json:"persistentStorageStatic" default:"false"` - EfsFileSytemIdAndPath string `yaml:"efsFileSytemIdAndPath" json:"efsFileSytemIdAndPath" default:""` - StorageLimit string `yaml:"storageLimit" json:"storageLimit" default:"5000Mi"` - StorageClass string `yaml:"storageClass" json:"storageClass" default:"standard"` - DryRun bool `yaml:"dryRun" json:"dryRun" default:"false"` - Resources ResourcesConfig `yaml:"resources" json:"resources"` - ServiceMesh bool `yaml:"serviceMesh" json:"serviceMesh" default:"true"` - Tls bool `yaml:"tls" json:"tls" default:"true"` - DisableTlsLog bool `yaml:"disableTlsLog" json:"disableTlsLog" default:"false"` - PacketCapture string `yaml:"packetCapture" json:"packetCapture" default:"best"` - IgnoreTainted bool `yaml:"ignoreTainted" json:"ignoreTainted" default:"false"` - Labels map[string]string `yaml:"labels" json:"labels" default:"{}"` - Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"` - NodeSelectorTerms []v1.NodeSelectorTerm `yaml:"nodeSelectorTerms" json:"nodeSelectorTerms" default:"[]"` - Auth AuthConfig `yaml:"auth" json:"auth"` - Ingress IngressConfig `yaml:"ingress" json:"ingress"` - IPv6 bool `yaml:"ipv6" json:"ipv6" default:"true"` - Debug bool `yaml:"debug" json:"debug" default:"false"` - KernelModule KernelModuleConfig `yaml:"kernelModule" json:"kernelModule"` - Telemetry TelemetryConfig `yaml:"telemetry" json:"telemetry"` - ResourceGuard ResourceGuardConfig `yaml:"resourceGuard" json:"resourceGuard"` - Sentry SentryConfig `yaml:"sentry" json:"sentry"` - DefaultFilter string `yaml:"defaultFilter" json:"defaultFilter" default:"!dns and !tcp and !udp and !icmp"` - ScriptingDisabled bool `yaml:"scriptingDisabled" json:"scriptingDisabled" default:"false"` - TargetedPodsUpdateDisabled bool `yaml:"targetedPodsUpdateDisabled" json:"targetedPodsUpdateDisabled" default:"false"` - PresetFiltersChangingEnabled bool `yaml:"presetFiltersChangingEnabled" json:"presetFiltersChangingEnabled" default:"false"` - RecordingDisabled bool `yaml:"recordingDisabled" json:"recordingDisabled" default:"false"` - StopTrafficCapturingDisabled bool `yaml:"stopTrafficCapturingDisabled" json:"stopTrafficCapturingDisabled" default:"false"` - Capabilities CapabilitiesConfig `yaml:"capabilities" json:"capabilities"` - GlobalFilter string `yaml:"globalFilter" json:"globalFilter"` - EnabledDissectors []string `yaml:"enabledDissectors" json:"enabledDissectors"` - Metrics MetricsConfig `yaml:"metrics" json:"metrics"` - Pprof PprofConfig `yaml:"pprof" json:"pprof"` - Misc MiscConfig `yaml:"misc" json:"misc"` + Docker DockerConfig `yaml:"docker" json:"docker"` + Proxy ProxyConfig `yaml:"proxy" json:"proxy"` + PodRegexStr string `yaml:"regex" json:"regex" default:".*"` + Namespaces []string `yaml:"namespaces" json:"namespaces" default:"[]"` + ExcludedNamespaces []string `yaml:"excludedNamespaces" json:"excludedNamespaces" default:"[]"` + BpfOverride string `yaml:"bpfOverride" json:"bpfOverride" default:""` + Stopped bool `yaml:"stopped" json:"stopped" default:"false"` + Release ReleaseConfig `yaml:"release" json:"release"` + PersistentStorage bool `yaml:"persistentStorage" json:"persistentStorage" default:"false"` + PersistentStorageStatic bool `yaml:"persistentStorageStatic" json:"persistentStorageStatic" default:"false"` + EfsFileSytemIdAndPath string `yaml:"efsFileSytemIdAndPath" json:"efsFileSytemIdAndPath" default:""` + StorageLimit string `yaml:"storageLimit" json:"storageLimit" default:"5000Mi"` + StorageClass string `yaml:"storageClass" json:"storageClass" default:"standard"` + DryRun bool `yaml:"dryRun" json:"dryRun" default:"false"` + DnsConfig DnsConfig `yaml:"dns" json:"dns"` + Resources ResourcesConfig `yaml:"resources" json:"resources"` + Probes ProbesConfig `yaml:"probes" json:"probes"` + ServiceMesh bool `yaml:"serviceMesh" json:"serviceMesh" default:"true"` + Tls bool `yaml:"tls" json:"tls" default:"true"` + DisableTlsLog bool `yaml:"disableTlsLog" json:"disableTlsLog" default:"true"` + PacketCapture string `yaml:"packetCapture" json:"packetCapture" default:"best"` + Labels map[string]string `yaml:"labels" json:"labels" default:"{}"` + Annotations map[string]string `yaml:"annotations" json:"annotations" default:"{}"` + NodeSelectorTerms NodeSelectorTermsConfig `yaml:"nodeSelectorTerms" json:"nodeSelectorTerms" default:"{}"` + Tolerations TolerationsConfig `yaml:"tolerations" json:"tolerations" default:"{}"` + Auth AuthConfig `yaml:"auth" json:"auth"` + Ingress IngressConfig `yaml:"ingress" json:"ingress"` + IPv6 bool `yaml:"ipv6" json:"ipv6" default:"true"` + Debug bool `yaml:"debug" json:"debug" default:"false"` + Telemetry TelemetryConfig `yaml:"telemetry" json:"telemetry"` + ResourceGuard ResourceGuardConfig `yaml:"resourceGuard" json:"resourceGuard"` + Sentry SentryConfig `yaml:"sentry" json:"sentry"` + DefaultFilter string `yaml:"defaultFilter" json:"defaultFilter" default:"!dns and !error"` + LiveConfigMapChangesDisabled bool `yaml:"liveConfigMapChangesDisabled" json:"liveConfigMapChangesDisabled" default:"false"` + GlobalFilter string `yaml:"globalFilter" json:"globalFilter" default:""` + EnabledDissectors []string `yaml:"enabledDissectors" json:"enabledDissectors"` + PortMapping PortMapping `yaml:"portMapping" json:"portMapping"` + CustomMacros map[string]string `yaml:"customMacros" json:"customMacros" default:"{\"https\":\"tls and (http or http2)\"}"` + Metrics MetricsConfig `yaml:"metrics" json:"metrics"` + Pprof PprofConfig `yaml:"pprof" json:"pprof"` + Misc MiscConfig `yaml:"misc" json:"misc"` + SecurityContext SecurityContextConfig `yaml:"securityContext" json:"securityContext"` + MountBpf bool `yaml:"mountBpf" json:"mountBpf" default:"true"` } func (config *TapConfig) PodRegex() *regexp.Regexp { diff --git a/go.mod b/go.mod index 26deed0fa..57e6c2c8f 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/kubeshark/kubeshark -go 1.20 +go 1.21.1 require ( github.com/creasty/defaults v1.5.2 @@ -9,6 +9,7 @@ require ( github.com/goccy/go-yaml v1.11.2 github.com/google/go-github/v37 v37.0.0 github.com/gorilla/websocket v1.4.2 + github.com/kubeshark/gopacket v1.1.39 github.com/pkg/errors v0.9.1 github.com/rivo/tview v0.0.0-20240818110301-fd649dbf1223 github.com/robertkrimen/otto v0.2.1 @@ -82,7 +83,6 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.16.0 // indirect - github.com/kubeshark/gopacket v1.1.39 // indirect github.com/kubeshark/tracerproto v1.0.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect diff --git a/go.sum b/go.sum index ffdc205d9..62ed95588 100644 --- a/go.sum +++ b/go.sum @@ -46,6 +46,7 @@ github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -59,9 +60,12 @@ github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBa github.com/Masterminds/squirrel v1.5.3 h1:YPpoceAcxuzIljlr5iWpNKaql7hLeG1KLSrhvdHpkZc= github.com/Masterminds/squirrel v1.5.3/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= +github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8= +github.com/Microsoft/hcsshim v0.10.0-rc.7/go.mod h1:ILuwjA+kNW+MrN/w5un7n3mTqkwsFu4Bp05/okFUZlE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/a8m/expect v1.0.0/go.mod h1:4IwSCMumY49ScypDnjNbYEjgVeqy1/U2cEs3Lat96eA= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -72,6 +76,7 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -81,9 +86,13 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= @@ -98,9 +107,11 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg= github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -113,6 +124,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsr github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/creasty/defaults v1.5.2 h1:/VfB6uxpyp6h0fr7SPp7n8WJBoV8jfxQXPCnkVSjyls= github.com/creasty/defaults v1.5.2/go.mod h1:FPZ+Y0WNrbqOVw+c6av63eyHUAl6pMHZwqLPvXUZGfY= github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= @@ -124,6 +136,7 @@ github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27N github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2 h1:aBfCb7iqHmDEIp6fBvC/hQUddQfg+3qdYjwzaiP9Hnc= +github.com/distribution/distribution/v3 v3.0.0-20221208165359-362910506bc2/go.mod h1:WHNsWjnIn2V1LYOrME7e8KxSeKunYHsxEm4am0BUtcI= github.com/docker/cli v20.10.21+incompatible h1:qVkgyYUnOLQ98LtXBrwd/duVqPT2X4SHndOuGsfwyhU= github.com/docker/cli v20.10.21+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= @@ -135,11 +148,13 @@ github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNk github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -158,7 +173,9 @@ github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= +github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6FI= +github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4= github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -166,6 +183,7 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= github.com/gdamore/tcell/v2 v2.7.1 h1:TiCcmpWHiAU7F0rA2I3S2Y4mmLmO9KHxJ7E1QhYzQbc= @@ -197,14 +215,18 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= @@ -228,6 +250,7 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -256,6 +279,7 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= @@ -300,6 +324,7 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= @@ -311,6 +336,7 @@ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= @@ -344,6 +370,7 @@ github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -405,6 +432,7 @@ github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtB github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= @@ -449,6 +477,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.25 h1:dFwPR6SfLtrSwgDcIq2bcU/gVutB4sNApq2HBdqcakg= +github.com/miekg/dns v1.1.25/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= @@ -472,6 +501,7 @@ github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQ github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78= +github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI= github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -495,7 +525,9 @@ github.com/nelsam/hel/v2 v2.3.3/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE= +github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM= github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b h1:YWuSjZCQAPM8UUBLkYUk1e+rZcvWHJmFb6i6rM44Xs8= @@ -506,6 +538,7 @@ github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI= +github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -558,6 +591,7 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY= github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= @@ -569,6 +603,7 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -641,8 +676,11 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= @@ -655,6 +693,7 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM= go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU= go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M= @@ -950,6 +989,7 @@ golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= +golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1092,6 +1132,7 @@ gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= helm.sh/helm/v3 v3.12.0 h1:rOq2TPVzg5jt4q5ermAZGZFxNW2uQhKjRhBneAutMEM= helm.sh/helm/v3 v3.12.0/go.mod h1:8K/469yxjUMu6BaD2EagCitkPjELUL/l2AgCO142G94= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/helm-chart/Chart.yaml b/helm-chart/Chart.yaml index a5921e338..9d79fcace 100644 --- a/helm-chart/Chart.yaml +++ b/helm-chart/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: kubeshark -version: "52.3.83" +version: "52.5" description: The API Traffic Analyzer for Kubernetes home: https://kubeshark.co keywords: diff --git a/helm-chart/PF_RING.md b/helm-chart/PF_RING.md deleted file mode 100644 index 9d60db79b..000000000 --- a/helm-chart/PF_RING.md +++ /dev/null @@ -1,152 +0,0 @@ -# PF_RING - - - -- [PF\_RING](#pf_ring) - - [Overview](#overview) - - [Loading PF\_RING module on Kubernetes nodes](#loading-pf_ring-module-on-kubernetes-nodes) - - [Pre-built kernel module exists and external egress allowed](#pre-built-kernel-module-exists-and-external-egress-allowed) - - [Pre-built kernel module doesn't exist or external egress isn't allowed](#pre-built-kernel-module-doesnt-exist-or-external-egress-isnt-allowed) - - [Appendix A: PF\_RING kernel module compilation](#appendix-a-pf_ring-kernel-module-compilation) - - [Automated complilation](#automated-complilation) - - [Manual compilation](#manual-compilation) - - - -## Overview - -PF_RING™ is an advanced Linux kernel module and user-space framework designed for high-speed packet processing. It offers a uniform API for packet processing applications, enabling efficient handling of large volumes of network data. - -For comprehensive information on PF_RING™, please visit the [User's Guide]((https://www.ntop.org/guides/pf_ring) and access detailed [API Documentation](http://www.ntop.org/guides/pf_ring_api/files.html). - -## Loading PF_RING module on Kubernetes nodes - -PF_RING kernel module loading is performed via of the `worker` component pod. -The target container `tap.kernelModule.image` must contain `pf_ring.ko` file under path `/opt/lib/modules//pf_ring.ko`. -Kubeshark provides ready to use containers with kernel modules for the most popular kernel versions running in different managed clouds. - -Prior to deploying `kubeshark` with PF_RING enabled, it is essential to verify if a PF_RING kernel module is already built for your kernel version. -Kubeshark provides additional CLI tool for this purpose - [pf-ring-compiler](https://github.com/kubeshark/pf-ring-compiler). - -Compatibility verification can be done by running: - -```bash -pfring-compiler compatibility -``` - -This command checks for the availability of kernel modules for the kernel versions running across all nodes in the Kubernetes cluster. - -Example output for a compatible cluster: - -```bash -Node Kernel Version Supported -ip-192-168-77-230.us-west-2.compute.internal 5.10.199-190.747.amzn2.x86_64 true -ip-192-168-34-216.us-west-2.compute.internal 5.10.199-190.747.amzn2.x86_64 true - -Cluster is compatible -``` - -Another option to verify availability of kernel modules is just inspecting available kernel module versions via: - -```bash -curl https://api.kubeshark.co/kernel-modules/meta/versions.jso -``` - -Based on Kubernetes cluster compatibility and external connection capabilities, user has two options: - -1. Use Kubeshark provided container `kubeshark/pf-ring-module` -2. Build custom container with required kernel module version. - -### Pre-built kernel module exists and external egress allowed - -In this case no additional configuration required. -Kubeshark will load PF_RING kernel module from the default `kubeshark/pf-ring-module:all` container. - -### Pre-built kernel module doesn't exist or external egress isn't allowed - -In this case building custom Docker image is required. - -1. Compile PF_RING kernel module for target version - -Skip if you have `pf_ring.ko` for the target kernel version. -Otherwise, follow [Appendix A](#appendix-a-pf_ring-kernel-module-compilation) for details. - -2. Build container - -The same build process Kubeshark has can be reused (follow [pfring-compilier](https://github.com/kubeshark/pf-ring-compiler/tree/main/modules) for details). - -3. Configure Helm values - -```yaml -tap: - kernelModule: - image: -``` - - -## Appendix A: PF_RING kernel module compilation - -PF_RING kernel module compilation can be completed automatically or manually. - -### Automated complilation - -In case your Kubernetes workers run supported Linux distribution, `kubeshark` CLI can be used to build PF_RING module: - -```bash -pfring-compiler compile --target -``` - -This command requires: - -- kubectl to be installed and configured with a proper context -- egress connection to Internet available - -This command: - -1. Runs Kubernetes job with build container -2. Waits for job to be completed -3. Downloads `pf-ring-.ko` file into the current folder. -4. Cleans up created job. - -Currently supported distros: - -- Ubuntu -- RHEL 9 -- Amazon Linux 2 - -### Manual compilation - -The process description is based on Ubuntu 22.04 distribution. - -1. Get terminal access to the node with target kernel version -This can be done either via SSH directly to node or with debug container running on the target node: - -```bash -kubectl debug node/ -it --attach=true --image=ubuntu:22.04 -``` - -2. Install build tools and kernel headers - -```bash -apt update -apt install -y gcc build-essential make git wget tar gzip -apt install -y linux-headers-$(uname -r) -``` - -3. Download PF_RING source code - -```bash -wget https://github.com/ntop/PF_RING/archive/refs/tags/8.4.0.tar.gz -tar -xf 8.4.0.tar.gz -cd PF_RING-8.4.0/kernel -``` - -4. Compile the kernel module - -```bash -make KERNEL_SRC=/usr/src/linux-headers-$(uname -r) -``` - -5. Copy `pf_ring.ko` to the local file system. - -Use `scp` or `kubectl cp` depending on type of access(SSH or debug pod). diff --git a/helm-chart/README.md b/helm-chart/README.md index 89df18b06..7a66ce24b 100644 --- a/helm-chart/README.md +++ b/helm-chart/README.md @@ -104,6 +104,20 @@ helm install kubeshark kubeshark/kubeshark \ Please refer to [metrics](./metrics.md) documentation for details. +## Override Tag, Tags, Images + +In addition to using a private registry, you can further override the images' tag, specific image tags and specific image names. + +Example for overriding image names: + +```yaml + docker: + overrideImage: + worker: docker.io/kubeshark/worker:v52.3.87 + front: docker.io/kubeshark/front:v52.3.87 + hub: docker.io/kubeshark/hub:v52.3.87 +``` + ## Configuration | Parameter | Description | Default | @@ -114,16 +128,17 @@ Please refer to [metrics](./metrics.md) documentation for details. | `tap.docker.tagLocked` | If `false` - use latest minor tag | `true` | | `tap.docker.imagePullPolicy` | Kubernetes image pull policy | `Always` | | `tap.docker.imagePullSecrets` | Kubernetes secrets to pull the images | `[]` | -| `tap.docker.overrideTag` | DANGER: Used to override specific images, when testing custom features from the Kubeshark team | `""` | +| `tap.docker.overrideImage` | Can be used to directly override image names | `""` | +| `tap.docker.overrideTag` | Can be used to override image tags | `""` | | `tap.proxy.hub.srvPort` | Hub server port. Change if already occupied. | `8898` | -| `tap.proxy.worker.srvPort` | Worker server port. Change if already occupied.| `30001` | +| `tap.proxy.worker.srvPort` | Worker server port. Change if already occupied.| `48999` | | `tap.proxy.front.port` | Front service port. Change if already occupied.| `8899` | | `tap.proxy.host` | Change to 0.0.0.0 top open up to the world. | `127.0.0.1` | | `tap.regex` | Target (process traffic from) pods that match regex | `.*` | | `tap.namespaces` | Target pods in namespaces | `[]` | | `tap.excludedNamespaces` | Exclude pods in namespaces | `[]` | | `tap.bpfOverride` | When using AF_PACKET as a traffic capture backend, override any existing pod targeting rules and set explicit BPF expression (e.g. `net 0.0.0.0/0`). | `[]` | -| `tap.stopped` | Set to `false` to have traffic processing start automatically. When set to `true`, traffic processing is stopped by default, resulting in almost no resource consumption (e.g. Kubeshark is dormant). This property can be dynamically control via the dashboard. | `true` | +| `tap.stopped` | Set to `false` to have traffic processing start automatically. When set to `true`, traffic processing is stopped by default, resulting in almost no resource consumption (e.g. Kubeshark is dormant). This property can be dynamically control via the dashboard. | `false` | | `tap.release.repo` | URL of the Helm chart repository | `https://helm.kubeshark.co` | | `tap.release.name` | Helm release name | `kubeshark` | | `tap.release.namespace` | Helm release namespace | `default` | @@ -133,25 +148,40 @@ Please refer to [metrics](./metrics.md) documentation for details. | `tap.storageLimit` | Limit of either the `emptyDir` or `persistentVolumeClaim` | `500Mi` | | `tap.storageClass` | Storage class of the `PersistentVolumeClaim` | `standard` | | `tap.dryRun` | Preview of all pods matching the regex, without tapping them | `false` | -| `tap.resources.hub.limits.cpu` | CPU limit for hub | `1000m` | -| `tap.resources.hub.limits.memory` | Memory limit for hub | `1500Mi` | +| `tap.dnsConfig.nameservers` | Nameservers to use for DNS resolution | `[]` | +| `tap.dnsConfig.searches` | Search domains to use for DNS resolution | `[]` | +| `tap.dnsConfig.options` | DNS options to use for DNS resolution | `[]` | +| `tap.resources.hub.limits.cpu` | CPU limit for hub | `""` (no limit) | +| `tap.resources.hub.limits.memory` | Memory limit for hub | `5Gi` | | `tap.resources.hub.requests.cpu` | CPU request for hub | `50m` | | `tap.resources.hub.requests.memory` | Memory request for hub | `50Mi` | -| `tap.resources.sniffer.limits.cpu` | CPU limit for sniffer | `1000m` | -| `tap.resources.sniffer.limits.memory` | Memory limit for sniffer | `1500Mi` | +| `tap.resources.sniffer.limits.cpu` | CPU limit for sniffer | `""` (no limit) | +| `tap.resources.sniffer.limits.memory` | Memory limit for sniffer | `3Gi` | | `tap.resources.sniffer.requests.cpu` | CPU request for sniffer | `50m` | | `tap.resources.sniffer.requests.memory` | Memory request for sniffer | `50Mi` | -| `tap.resources.tracer.limits.cpu` | CPU limit for tracer | `1000m` | -| `tap.resources.tracer.limits.memory` | Memory limit for tracer | `1500Mi` | +| `tap.resources.tracer.limits.cpu` | CPU limit for tracer | `""` (no limit) | +| `tap.resources.tracer.limits.memory` | Memory limit for tracer | `3Gi` | | `tap.resources.tracer.requests.cpu` | CPU request for tracer | `50m` | | `tap.resources.tracer.requests.memory` | Memory request for tracer | `50Mi` | +| `tap.probes.hub.initialDelaySeconds` | Initial delay before probing the hub | `15` | +| `tap.probes.hub.periodSeconds` | Period between probes for the hub | `10` | +| `tap.probes.hub.successThreshold` | Number of successful probes before considering the hub healthy | `1` | +| `tap.probes.hub.failureThreshold` | Number of failed probes before considering the hub unhealthy | `3` | +| `tap.probes.sniffer.initialDelaySeconds` | Initial delay before probing the sniffer | `15` | +| `tap.probes.sniffer.periodSeconds` | Period between probes for the sniffer | `10` | +| `tap.probes.sniffer.successThreshold` | Number of successful probes before considering the sniffer healthy | `1` | +| `tap.probes.sniffer.failureThreshold` | Number of failed probes before considering the sniffer unhealthy | `3` | | `tap.serviceMesh` | Capture traffic from service meshes like Istio, Linkerd, Consul, etc. | `true` | | `tap.tls` | Capture the encrypted/TLS traffic from cryptography libraries like OpenSSL | `true` | -| `tap.disableTlsLog` | Suppress logging for TLS/eBPF | `false` | -| `tap.ignoreTainted` | Whether to ignore tainted nodes | `false` | +| `tap.disableTlsLog` | Suppress logging for TLS/eBPF | `true` | | `tap.labels` | Kubernetes labels to apply to all Kubeshark resources | `{}` | | `tap.annotations` | Kubernetes annotations to apply to all Kubeshark resources | `{}` | -| `tap.nodeSelectorTerms` | Node selector terms | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` | +| `tap.nodeSelectorTerms.workers` | Node selector terms for workers components | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` | +| `tap.nodeSelectorTerms.hub` | Node selector terms for hub component | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` | +| `tap.nodeSelectorTerms.front` | Node selector terms for front-end component | `[{"matchExpressions":[{"key":"kubernetes.io/os","operator":"In","values":["linux"]}]}]` | +| `tap.tolerations.workers` | Tolerations for workers components | `[ {"operator": "Exists", "effect": "NoExecute"}` | +| `tap.tolerations.hub` | Tolerations for hub component | `[]` | +| `tap.tolerations.front` | Tolerations for front-end component | `[]` | | `tap.auth.enabled` | Enable authentication | `false` | | `tap.auth.type` | Authentication type (1 option available: `saml`) | `saml` | | `tap.auth.approvedEmails` | List of approved email addresses for authentication | `[]` | @@ -160,7 +190,7 @@ Please refer to [metrics](./metrics.md) documentation for details. | `tap.auth.saml.x509crt` | A self-signed X.509 `.cert` contents
(effective, if `tap.auth.type = saml`) | `` | | `tap.auth.saml.x509key` | A self-signed X.509 `.key` contents
(effective, if `tap.auth.type = saml`) | `` | | `tap.auth.saml.roleAttribute` | A SAML attribute name corresponding to user's authorization role
(effective, if `tap.auth.type = saml`) | `role` | -| `tap.auth.saml.roles` | A list of SAML authorization roles and their permissions
(effective, if `tap.auth.type = saml`) | `{"admin":{"canDownloadPCAP":true,"canUpdateTargetedPods":true,"canUseScripting":true, "canStopTrafficCapturing":true, "filter":"","showAdminConsoleLink":true}}` | +| `tap.auth.saml.roles` | A list of SAML authorization roles and their permissions
(effective, if `tap.auth.type = saml`) | `{"admin":{"canDownloadPCAP":true,"canUpdateTargetedPods":true,"canUseScripting":true, "scriptingPermissions":{"canSave":true, "canActivate":true, "canDelete":true}, "canStopTrafficCapturing":true, "filter":"","showAdminConsoleLink":true}}` | | `tap.ingress.enabled` | Enable `Ingress` | `false` | | `tap.ingress.className` | Ingress class name | `""` | | `tap.ingress.host` | Host of the `Ingress` | `ks.svc.cluster.local` | @@ -168,17 +198,16 @@ Please refer to [metrics](./metrics.md) documentation for details. | `tap.ingress.annotations` | `Ingress` annotations | `{}` | | `tap.ipv6` | Enable IPv6 support for the front-end | `true` | | `tap.debug` | Enable debug mode | `false` | -| `tap.kernelModule.enabled` | Use PF_RING kernel module([details](PF_RING.md)) | `false` | -| `tap.kernelModule.image` | Container image containing PF_RING kernel module with supported kernel version([details](PF_RING.md)) | "kubeshark/pf-ring-module:all" | -| `tap.kernelModule.unloadOnDestroy` | Create additional container which watches for pod termination and unloads PF_RING kernel module. | `false`| | `tap.telemetry.enabled` | Enable anonymous usage statistics collection | `true` | | `tap.resourceGuard.enabled` | Enable resource guard worker process, which watches RAM/disk usage and enables/disables traffic capture based on available resources | `false` | | `tap.sentry.enabled` | Enable sending of error logs to Sentry | `false` | | `tap.sentry.environment` | Sentry environment to label error logs with | `production` | -| `tap.defaultFilter` | Sets the default dashboard KFL filter (e.g. `http`). By default, this value is set to filter out noisy protocols such as DNS, UDP, ICMP and TCP. The user can easily change this in the Dashboard. You can also change this value to change this behavior. | `"!dns and !tcp and !udp and !icmp"` | +| `tap.defaultFilter` | Sets the default dashboard KFL filter (e.g. `http`). By default, this value is set to filter out noisy protocols such as DNS, UDP, ICMP and TCP. The user can easily change this, **temporarily**, in the Dashboard. For a permanent change, you should change this value in the `values.yaml` or `config.yaml` file. | `"!dns and !error"` | +| `tap.liveConfigMapChangesDisabled` | If set to `true`, all user functionality (scripting, targeting settings, global & default KFL modification, traffic recording, traffic capturing on/off, protocol dissectors) involving dynamic ConfigMap changes from UI will be disabled | `false` | | `tap.globalFilter` | Prepends to any KFL filter and can be used to limit what is visible in the dashboard. For example, `redact("request.headers.Authorization")` will redact the appropriate field. Another example `!dns` will not show any DNS traffic. | `""` | | `tap.metrics.port` | Pod port used to expose Prometheus metrics | `49100` | -| `tap.enabledDissectors` | This is an array of strings representing the list of supported protocols. Remove or comment out redundant protocols (e.g., dns).| The default list includes: amqp, dns , http, icmp, kafka, redis,sctp, syscall, tcp, ws. | +| `tap.enabledDissectors` | This is an array of strings representing the list of supported protocols. Remove or comment out redundant protocols (e.g., dns).| The default list excludes: `udp` and `tcp` | +| `tap.mountBpf` | BPF filesystem needs to be mounted for eBPF to work properly. This helm value determines whether Kubeshark will attempt to mount the filesystem. This option is not required if filesystem is already mounts. │ `true`| | `logs.file` | Logs dump path | `""` | | `pcapdump.enabled` | Enable recording of all traffic captured according to other parameters. Whatever Kubeshark captures, considering pod targeting rules, will be stored in pcap files ready to be viewed by tools | `true` | | `pcapdump.maxTime` | The time window into the past that will be stored. Older traffic will be discarded. | `2h` | @@ -195,7 +224,6 @@ Please refer to [metrics](./metrics.md) documentation for details. | `supportChatEnabled` | Enable real-time support chat channel based on Intercom | `true` | | `serviceMapNewUiEnabled` | Enable new experimental version of Service Map UI | `false` | | `internetConnectivity` | Turns off API requests that are dependant on Internet connectivity such as `telemetry` and `online-support`. | `true` | -| `dissectorsUpdatingEnabled` | Turns off UI for enabling/disabling dissectors | `true` | KernelMapping pairs kernel versions with a DriverContainer image. Kernel versions can be matched diff --git a/helm-chart/templates/02-cluster-role.yaml b/helm-chart/templates/02-cluster-role.yaml index 75a1dd195..7191853c9 100644 --- a/helm-chart/templates/02-cluster-role.yaml +++ b/helm-chart/templates/02-cluster-role.yaml @@ -31,8 +31,19 @@ rules: - namespaces verbs: - get - resourceNames: - - kube-system + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch + - create + - update + - delete --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/helm-chart/templates/04-hub-deployment.yaml b/helm-chart/templates/04-hub-deployment.yaml index 8cf8ea39c..f2755eb04 100644 --- a/helm-chart/templates/04-hub-deployment.yaml +++ b/helm-chart/templates/04-hub-deployment.yaml @@ -31,9 +31,8 @@ spec: - ./hub - -port - "8080" - {{- if .Values.tap.debug }} - - -debug - {{- end }} + - -loglevel + - '{{ .Values.logLevel | default "warning" }}' env: - name: POD_NAME valueFrom: @@ -51,43 +50,100 @@ spec: value: 'https://api.kubeshark.co' - name: PROFILING_ENABLED value: '{{ .Values.tap.pprof.enabled }}' - {{- if .Values.tap.docker.overrideTag.hub }} + {{- if .Values.tap.docker.overrideImage.hub }} + image: '{{ .Values.tap.docker.overrideImage.hub }}' + {{- else if .Values.tap.docker.overrideTag.hub }} image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.overrideTag.hub }}' {{ else }} image: '{{ .Values.tap.docker.registry }}/hub:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}' {{- end }} imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }} - {{- if .Values.tap.docker.imagePullSecrets }} - imagePullSecrets: - {{- range .Values.tap.docker.imagePullSecrets }} - - name: {{ . }} - {{- end }} - {{- end }} readinessProbe: - periodSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - initialDelaySeconds: 3 + periodSeconds: {{ .Values.tap.probes.hub.periodSeconds }} + failureThreshold: {{ .Values.tap.probes.hub.failureThreshold }} + successThreshold: {{ .Values.tap.probes.hub.successThreshold }} + initialDelaySeconds: {{ .Values.tap.probes.hub.initialDelaySeconds }} tcpSocket: port: 8080 livenessProbe: - periodSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - initialDelaySeconds: 3 + periodSeconds: {{ .Values.tap.probes.hub.periodSeconds }} + failureThreshold: {{ .Values.tap.probes.hub.failureThreshold }} + successThreshold: {{ .Values.tap.probes.hub.successThreshold }} + initialDelaySeconds: {{ .Values.tap.probes.hub.initialDelaySeconds }} tcpSocket: port: 8080 resources: limits: + {{ if ne (toString .Values.tap.resources.hub.limits.cpu) "0" }} cpu: {{ .Values.tap.resources.hub.limits.cpu }} + {{ end }} + {{ if ne (toString .Values.tap.resources.hub.limits.memory) "0" }} memory: {{ .Values.tap.resources.hub.limits.memory }} + {{ end }} requests: + {{ if ne (toString .Values.tap.resources.hub.requests.cpu) "0" }} cpu: {{ .Values.tap.resources.hub.requests.cpu }} + {{ end }} + {{ if ne (toString .Values.tap.resources.hub.requests.memor) "0" }} memory: {{ .Values.tap.resources.hub.requests.memory }} + {{ end }} volumeMounts: - name: saml-x509-volume mountPath: "/etc/saml/x509" readOnly: true + {{- if .Values.tap.docker.imagePullSecrets }} + imagePullSecrets: + {{- range .Values.tap.docker.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- if gt (len .Values.tap.nodeSelectorTerms.hub) 0}} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + {{- toYaml .Values.tap.nodeSelectorTerms.hub | nindent 12 }} +{{- end }} + {{- if or .Values.tap.dns.nameservers .Values.tap.dns.searches .Values.tap.dns.options }} + dnsConfig: + {{- if .Values.tap.dns.nameservers }} + nameservers: + {{- range .Values.tap.dns.nameservers }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.tap.dns.searches }} + searches: + {{- range .Values.tap.dns.searches }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.tap.dns.options }} + options: + {{- range .Values.tap.dns.options }} + - name: {{ .name | quote }} + {{- if .value }} + value: {{ .value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.tap.tolerations.hub }} + tolerations: + {{- range .Values.tap.tolerations.hub }} + - key: {{ .key | quote }} + operator: {{ .operator | quote }} + {{- if .value }} + value: {{ .value | quote }} + {{- end }} + {{- if .effect }} + effect: {{ .effect | quote }} + {{- end }} + {{- if .tolerationSeconds }} + tolerationSeconds: {{ .tolerationSeconds }} + {{- end }} + {{- end }} + {{- end }} volumes: - name: saml-x509-volume projected: diff --git a/helm-chart/templates/06-front-deployment.yaml b/helm-chart/templates/06-front-deployment.yaml index 9827fa9ee..f259fcbeb 100644 --- a/helm-chart/templates/06-front-deployment.yaml +++ b/helm-chart/templates/06-front-deployment.yaml @@ -37,20 +37,28 @@ spec: - name: REACT_APP_TIMEZONE value: '{{ not (eq .Values.timezone "") | ternary .Values.timezone " " }}' - name: REACT_APP_SCRIPTING_DISABLED - value: '{{ .Values.tap.scriptingDisabled }}' + value: '{{- if .Values.tap.liveConfigMapChangesDisabled -}} + {{- if .Values.demoModeEnabled -}} + {{ .Values.demoModeEnabled | ternary false true }} + {{- else -}} + true + {{- end }} + {{- else -}} + false + {{- end }}' - name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED - value: '{{ .Values.tap.targetedPodsUpdateDisabled }}' + value: '{{ .Values.tap.liveConfigMapChangesDisabled }}' - name: REACT_APP_PRESET_FILTERS_CHANGING_ENABLED - value: '{{ .Values.tap.presetFiltersChangingEnabled }}' + value: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}' - name: REACT_APP_BPF_OVERRIDE_DISABLED - value: '{{ eq .Values.tap.packetCapture "ebpf" | ternary "true" "false" }}' + value: '{{ eq .Values.tap.packetCapture "af_packet" | ternary "false" "true" }}' - name: REACT_APP_RECORDING_DISABLED - value: '{{ .Values.tap.recordingDisabled }}' + value: '{{ .Values.tap.liveConfigMapChangesDisabled }}' - name: REACT_APP_STOP_TRAFFIC_CAPTURING_DISABLED - value: '{{- if and .Values.tap.stopTrafficCapturingDisabled .Values.tap.stopped -}} + value: '{{- if and .Values.tap.liveConfigMapChangesDisabled .Values.tap.stopped -}} false {{- else -}} - {{ .Values.tap.stopTrafficCapturingDisabled | ternary "true" "false" }} + {{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "false" }} {{- end -}}' - name: 'REACT_APP_CLOUD_LICENSE_ENABLED' value: '{{- if or (and .Values.cloudLicenseEnabled (not (empty .Values.license))) (not .Values.internetConnectivity) -}} @@ -58,28 +66,26 @@ spec: {{- else -}} {{ .Values.cloudLicenseEnabled }} {{- end }}' + - name: 'REACT_APP_AI_ASSISTANT_ENABLED' + value: '{{ .Values.aiAssistantEnabled | ternary "true" "false" }}' - name: REACT_APP_SUPPORT_CHAT_ENABLED value: '{{ and .Values.supportChatEnabled .Values.internetConnectivity | ternary "true" "false" }}' - name: REACT_APP_DISSECTORS_UPDATING_ENABLED - value: '{{ .Values.dissectorsUpdatingEnabled | ternary "true" "false" }}' + value: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}' - name: REACT_APP_SERVICE_MAP_NEW_UI_ENABLED value: '{{ .Values.serviceMapNewUiEnabled | ternary "true" "false" }}' - name: REACT_APP_SENTRY_ENABLED value: '{{ (include "sentry.enabled" .) }}' - name: REACT_APP_SENTRY_ENVIRONMENT value: '{{ .Values.tap.sentry.environment }}' - {{- if .Values.tap.docker.overrideTag.front }} + {{- if .Values.tap.docker.overrideImage.front }} + image: '{{ .Values.tap.docker.overrideImage.front }}' + {{- else if .Values.tap.docker.overrideTag.front }} image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.overrideTag.front }}' {{ else }} image: '{{ .Values.tap.docker.registry }}/front:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}' {{- end }} imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }} - {{- if .Values.tap.docker.imagePullSecrets }} - imagePullSecrets: - {{- range .Values.tap.docker.imagePullSecrets }} - - name: {{ . }} - {{- end }} - {{- end }} name: kubeshark-front livenessProbe: periodSeconds: 1 @@ -108,6 +114,59 @@ spec: mountPath: /etc/nginx/conf.d/default.conf subPath: default.conf readOnly: true + {{- if .Values.tap.docker.imagePullSecrets }} + imagePullSecrets: + {{- range .Values.tap.docker.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- if gt (len .Values.tap.nodeSelectorTerms.front) 0}} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + {{- toYaml .Values.tap.nodeSelectorTerms.front | nindent 12 }} +{{- end }} + {{- if or .Values.tap.dns.nameservers .Values.tap.dns.searches .Values.tap.dns.options }} + dnsConfig: + {{- if .Values.tap.dns.nameservers }} + nameservers: + {{- range .Values.tap.dns.nameservers }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.tap.dns.searches }} + searches: + {{- range .Values.tap.dns.searches }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.tap.dns.options }} + options: + {{- range .Values.tap.dns.options }} + - name: {{ .name | quote }} + {{- if .value }} + value: {{ .value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.tap.tolerations.front }} + tolerations: + {{- range .Values.tap.tolerations.front }} + - key: {{ .key | quote }} + operator: {{ .operator | quote }} + {{- if .value }} + value: {{ .value | quote }} + {{- end }} + {{- if .effect }} + effect: {{ .effect | quote }} + {{- end }} + {{- if .tolerationSeconds }} + tolerationSeconds: {{ .tolerationSeconds }} + {{- end }} + {{- end }} + {{- end }} volumes: - name: nginx-config configMap: diff --git a/helm-chart/templates/09-worker-daemon-set.yaml b/helm-chart/templates/09-worker-daemon-set.yaml index 298ef5d6f..b7cc221c2 100644 --- a/helm-chart/templates/09-worker-daemon-set.yaml +++ b/helm-chart/templates/09-worker-daemon-set.yaml @@ -25,29 +25,26 @@ spec: name: kubeshark-worker-daemon-set namespace: kubeshark spec: - {{- if .Values.tap.kernelModule.enabled }} + {{- if .Values.tap.mountBpf }} initContainers: - - name: load-pf-ring - image: {{ .Values.tap.kernelModule.image }} - imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }} - {{- if .Values.tap.docker.imagePullSecrets }} - imagePullSecrets: - {{- range .Values.tap.docker.imagePullSecrets }} - - name: {{ . }} - {{- end }} + - command: + - /bin/sh + - -c + - mkdir -p /sys/fs/bpf && mount | grep -q '/sys/fs/bpf' || mount -t bpf bpf /sys/fs/bpf + {{- if .Values.tap.docker.overrideTag.worker }} + image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}{{ include "kubeshark.dockerTagDebugVersion" . }}' + {{ else }} + image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}{{ include "kubeshark.dockerTagDebugVersion" . }}' + {{- end }} + imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }} + name: mount-bpf + securityContext: + privileged: true + volumeMounts: + - mountPath: /sys + name: sys + mountPropagation: Bidirectional {{- end }} - securityContext: - capabilities: - add: - {{- range .Values.tap.capabilities.kernelModule }} - {{ print "- " . }} - {{- end }} - drop: - - ALL - volumeMounts: - - name: lib-modules - mountPath: /lib/modules - {{- end }} containers: - command: - ./worker @@ -59,20 +56,16 @@ spec: - '{{ .Values.tap.metrics.port }}' - -packet-capture - '{{ .Values.tap.packetCapture }}' - {{- if .Values.tap.tls }} - - -unixsocket + - -loglevel + - '{{ .Values.logLevel | default "warning" }}' + {{- if not .Values.tap.tls }} + - -disable-tracer {{- end }} {{- if .Values.tap.serviceMesh }} - -servicemesh {{- end }} - -procfs - /hostproc - {{- if .Values.tap.kernelModule.enabled }} - - -kernel-module - {{- end }} - {{- if ne .Values.tap.packetCapture "ebpf" }} - - -disable-ebpf - {{- end }} {{- if .Values.tap.resourceGuard.enabled }} - -enable-resource-guard {{- end }} @@ -80,10 +73,9 @@ spec: - '{{ .Values.tap.misc.resolutionStrategy }}' - -staletimeout - '{{ .Values.tap.misc.staleTimeoutSeconds }}' - {{- if .Values.tap.debug }} - - -debug - {{- end }} - {{- if .Values.tap.docker.overrideTag.worker }} + {{- if .Values.tap.docker.overrideImage.worker }} + image: '{{ .Values.tap.docker.overrideImage.worker }}' + {{- else if .Values.tap.docker.overrideTag.worker }} image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}{{ include "kubeshark.dockerTagDebugVersion" . }}' {{ else }} image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}{{ include "kubeshark.dockerTagDebugVersion" . }}' @@ -123,36 +115,78 @@ spec: value: '{{ .Values.tap.sentry.environment }}' resources: limits: + {{ if ne (toString .Values.tap.resources.sniffer.limits.cpu) "0" }} cpu: {{ .Values.tap.resources.sniffer.limits.cpu }} + {{ end }} + {{ if ne (toString .Values.tap.resources.sniffer.limits.memory) "0" }} memory: {{ .Values.tap.resources.sniffer.limits.memory }} + {{ end }} requests: + {{ if ne (toString .Values.tap.resources.sniffer.requests.cpu) "0" }} cpu: {{ .Values.tap.resources.sniffer.requests.cpu }} + {{ end }} + {{ if ne (toString .Values.tap.resources.sniffer.requests.memory) "0" }} memory: {{ .Values.tap.resources.sniffer.requests.memory }} + {{ end }} securityContext: + privileged: {{ .Values.tap.securityContext.privileged }} + {{- if not .Values.tap.securityContext.privileged }} + {{- $aaProfile := .Values.tap.securityContext.appArmorProfile }} + {{- $selinuxOpts := .Values.tap.securityContext.seLinuxOptions }} + {{- if or (ne $aaProfile.type "") (ne $aaProfile.localhostProfile "") }} + appArmorProfile: + {{- if ne $aaProfile.type "" }} + type: {{ $aaProfile.type }} + {{- end }} + {{- if ne $aaProfile.localhostProfile "" }} + localhostProfile: {{ $aaProfile.localhostProfile }} + {{- end }} + {{- end }} + {{- if or (ne $selinuxOpts.level "") (ne $selinuxOpts.role "") (ne $selinuxOpts.type "") (ne $selinuxOpts.user "") }} + seLinuxOptions: + {{- if ne $selinuxOpts.level "" }} + level: {{ $selinuxOpts.level }} + {{- end }} + {{- if ne $selinuxOpts.role "" }} + role: {{ $selinuxOpts.role }} + {{- end }} + {{- if ne $selinuxOpts.type "" }} + type: {{ $selinuxOpts.type }} + {{- end }} + {{- if ne $selinuxOpts.user "" }} + user: {{ $selinuxOpts.user }} + {{- end }} + {{- end }} capabilities: add: - {{- range .Values.tap.capabilities.networkCapture }} + {{- range .Values.tap.securityContext.capabilities.networkCapture }} {{ print "- " . }} {{- end }} {{- if .Values.tap.serviceMesh }} - {{- range .Values.tap.capabilities.serviceMeshCapture }} + {{- range .Values.tap.securityContext.capabilities.serviceMeshCapture }} + {{ print "- " . }} + {{- end }} + {{- end }} + {{- if .Values.tap.securityContext.capabilities.ebpfCapture }} + {{- range .Values.tap.securityContext.capabilities.ebpfCapture }} {{ print "- " . }} {{- end }} {{- end }} drop: - ALL + {{- end }} readinessProbe: - periodSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - initialDelaySeconds: 5 + periodSeconds: {{ .Values.tap.probes.sniffer.periodSeconds }} + failureThreshold: {{ .Values.tap.probes.sniffer.failureThreshold }} + successThreshold: {{ .Values.tap.probes.sniffer.successThreshold }} + initialDelaySeconds: {{ .Values.tap.probes.sniffer.initialDelaySeconds }} tcpSocket: port: {{ .Values.tap.proxy.worker.srvPort }} livenessProbe: - periodSeconds: 1 - failureThreshold: 3 - successThreshold: 1 - initialDelaySeconds: 5 + periodSeconds: {{ .Values.tap.probes.sniffer.periodSeconds }} + failureThreshold: {{ .Values.tap.probes.sniffer.failureThreshold }} + successThreshold: {{ .Values.tap.probes.sniffer.successThreshold }} + initialDelaySeconds: {{ .Values.tap.probes.sniffer.initialDelaySeconds }} tcpSocket: port: {{ .Values.tap.proxy.worker.srvPort }} volumeMounts: @@ -162,33 +196,14 @@ spec: - mountPath: /sys name: sys readOnly: true + mountPropagation: HostToContainer - mountPath: /app/data name: data - {{- if and (eq .Values.tap.kernelModule.enabled true) (eq .Values.tap.kernelModule.unloadOnDestroy true) }} - - name: unload-pf-ring - image: {{ .Values.tap.kernelModule.image }} - command: ["/bin/sh"] - args: ["-c", "trap 'rmmod pf_ring && sleep 3' SIGTERM; while true; do sleep 1; done"] - securityContext: - capabilities: - add: - {{- range .Values.tap.capabilities.kernelModule }} - {{ print "- " . }} - {{- end }} - drop: - - ALL - {{- end }} {{- if .Values.tap.tls }} - command: - ./tracer - -procfs - /hostproc - {{- if ne .Values.tap.packetCapture "ebpf" }} - - -disable-ebpf - {{- end }} - {{- if .Values.tap.debug }} - - -debug - {{- end }} {{- if .Values.tap.disableTlsLog }} - -disable-tls-log {{- end }} @@ -196,18 +211,14 @@ spec: - -port - '{{ add .Values.tap.proxy.worker.srvPort 1 }}' {{- end }} + - -loglevel + - '{{ .Values.logLevel | default "warning" }}' {{- if .Values.tap.docker.overrideTag.worker }} image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}{{ include "kubeshark.dockerTagDebugVersion" . }}' {{ else }} image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}{{ include "kubeshark.dockerTagDebugVersion" . }}' {{- end }} imagePullPolicy: {{ .Values.tap.docker.imagePullPolicy }} - {{- if .Values.tap.docker.imagePullSecrets }} - imagePullSecrets: - {{- range .Values.tap.docker.imagePullSecrets }} - - name: {{ . }} - {{- end }} - {{- end }} name: tracer env: - name: POD_NAME @@ -226,22 +237,59 @@ spec: value: '{{ .Values.tap.sentry.environment }}' resources: limits: + {{ if ne (toString .Values.tap.resources.tracer.limits.cpu) "0" }} cpu: {{ .Values.tap.resources.tracer.limits.cpu }} + {{ end }} + {{ if ne (toString .Values.tap.resources.tracer.limits.memory) "0" }} memory: {{ .Values.tap.resources.tracer.limits.memory }} + {{ end }} requests: + {{ if ne (toString .Values.tap.resources.tracer.requests.cpu) "0" }} cpu: {{ .Values.tap.resources.tracer.requests.cpu }} + {{ end }} + {{ if ne (toString .Values.tap.resources.tracer.requests.memory) "0" }} memory: {{ .Values.tap.resources.tracer.requests.memory }} + {{ end }} securityContext: + privileged: {{ .Values.tap.securityContext.privileged }} + {{- if not .Values.tap.securityContext.privileged }} + {{- $aaProfile := .Values.tap.securityContext.appArmorProfile }} + {{- $selinuxOpts := .Values.tap.securityContext.seLinuxOptions }} + {{- if or (ne $aaProfile.type "") (ne $aaProfile.localhostProfile "") }} + appArmorProfile: + {{- if ne $aaProfile.type "" }} + type: {{ $aaProfile.type }} + {{- end }} + {{- if ne $aaProfile.localhostProfile "" }} + localhostProfile: {{ $aaProfile.localhostProfile }} + {{- end }} + {{- end }} + {{- if or (ne $selinuxOpts.level "") (ne $selinuxOpts.role "") (ne $selinuxOpts.type "") (ne $selinuxOpts.user "") }} + seLinuxOptions: + {{- if ne $selinuxOpts.level "" }} + level: {{ $selinuxOpts.level }} + {{- end }} + {{- if ne $selinuxOpts.role "" }} + role: {{ $selinuxOpts.role }} + {{- end }} + {{- if ne $selinuxOpts.type "" }} + type: {{ $selinuxOpts.type }} + {{- end }} + {{- if ne $selinuxOpts.user "" }} + user: {{ $selinuxOpts.user }} + {{- end }} + {{- end }} capabilities: add: - {{- range .Values.tap.capabilities.ebpfCapture }} + {{- range .Values.tap.securityContext.capabilities.ebpfCapture }} {{ print "- " . }} {{- end }} - {{- range .Values.tap.capabilities.networkCapture }} + {{- range .Values.tap.securityContext.capabilities.networkCapture }} {{ print "- " . }} {{- end }} drop: - ALL + {{- end }} volumeMounts: - mountPath: /hostproc name: proc @@ -249,6 +297,7 @@ spec: - mountPath: /sys name: sys readOnly: true + mountPropagation: HostToContainer - mountPath: /app/data name: data - mountPath: /etc/os-release @@ -262,21 +311,59 @@ spec: dnsPolicy: ClusterFirstWithHostNet hostNetwork: true serviceAccountName: {{ include "kubeshark.serviceAccountName" . }} - terminationGracePeriodSeconds: 0 + {{- if .Values.tap.tolerations.workers }} tolerations: - - effect: NoExecute - operator: Exists -{{- if not .Values.tap.ignoreTainted }} - - effect: NoSchedule - operator: Exists -{{- end }} -{{- if gt (len .Values.tap.nodeSelectorTerms) 0}} + {{- range .Values.tap.tolerations.workers }} + - key: {{ .key | quote }} + operator: {{ .operator | quote }} + {{- if .value }} + value: {{ .value | quote }} + {{- end }} + {{- if .effect }} + effect: {{ .effect | quote }} + {{- end }} + {{- if .tolerationSeconds }} + tolerationSeconds: {{ .tolerationSeconds }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.tap.docker.imagePullSecrets }} + imagePullSecrets: + {{- range .Values.tap.docker.imagePullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- if gt (len .Values.tap.nodeSelectorTerms.workers) 0}} affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: nodeSelectorTerms: - {{- toYaml .Values.tap.nodeSelectorTerms | nindent 12 }} + {{- toYaml .Values.tap.nodeSelectorTerms.workers | nindent 12 }} {{- end }} + {{- if or .Values.tap.dns.nameservers .Values.tap.dns.searches .Values.tap.dns.options }} + dnsConfig: + {{- if .Values.tap.dns.nameservers }} + nameservers: + {{- range .Values.tap.dns.nameservers }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.tap.dns.searches }} + searches: + {{- range .Values.tap.dns.searches }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.tap.dns.options }} + options: + {{- range .Values.tap.dns.options }} + - name: {{ .name | quote }} + {{- if .value }} + value: {{ .value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} volumes: - hostPath: path: /proc diff --git a/helm-chart/templates/12-config-map.yaml b/helm-chart/templates/12-config-map.yaml index 07aeff8e4..175a1b03b 100644 --- a/helm-chart/templates/12-config-map.yaml +++ b/helm-chart/templates/12-config-map.yaml @@ -13,6 +13,7 @@ data: BPF_OVERRIDE: '{{ .Values.tap.bpfOverride }}' STOPPED: '{{ .Values.tap.stopped | ternary "true" "false" }}' SCRIPTING_SCRIPTS: '{}' + SCRIPTING_ACTIVE_SCRIPTS: '{{ gt (len .Values.scripting.active) 0 | ternary (join "," .Values.scripting.active) "" }}' INGRESS_ENABLED: '{{ .Values.tap.ingress.enabled }}' INGRESS_HOST: '{{ .Values.tap.ingress.host }}' PROXY_FRONT_PORT: '{{ .Values.tap.proxy.front.port }}' @@ -26,14 +27,22 @@ data: AUTH_SAML_ROLE_ATTRIBUTE: '{{ .Values.tap.auth.saml.roleAttribute }}' AUTH_SAML_ROLES: '{{ .Values.tap.auth.saml.roles | toJson }}' TELEMETRY_DISABLED: '{{ not .Values.internetConnectivity | ternary "true" (not .Values.tap.telemetry.enabled | ternary "true" "false") }}' - SCRIPTING_DISABLED: '{{ .Values.tap.scriptingDisabled | ternary "true" "" }}' - TARGETED_PODS_UPDATE_DISABLED: '{{ .Values.tap.targetedPodsUpdateDisabled | ternary "true" "" }}' - PRESET_FILTERS_CHANGING_ENABLED: '{{ .Values.tap.presetFiltersChangingEnabled | ternary "true" "" }}' - RECORDING_DISABLED: '{{ .Values.tap.recordingDisabled | ternary "true" "" }}' - STOP_TRAFFIC_CAPTURING_DISABLED: '{{- if and .Values.tap.stopTrafficCapturingDisabled .Values.tap.stopped -}} + SCRIPTING_DISABLED: '{{- if .Values.tap.liveConfigMapChangesDisabled -}} + {{- if .Values.demoModeEnabled -}} + {{ .Values.demoModeEnabled | ternary false true }} + {{- else -}} + true + {{- end }} + {{- else -}} + false + {{- end }}' + TARGETED_PODS_UPDATE_DISABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "" }}' + PRESET_FILTERS_CHANGING_ENABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}' + RECORDING_DISABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "" }}' + STOP_TRAFFIC_CAPTURING_DISABLED: '{{- if and .Values.tap.liveConfigMapChangesDisabled .Values.tap.stopped -}} false {{- else -}} - {{ .Values.tap.stopTrafficCapturingDisabled | ternary "true" "false" }} + {{ .Values.tap.liveConfigMapChangesDisabled | ternary "true" "false" }} {{- end }}' GLOBAL_FILTER: {{ include "kubeshark.escapeDoubleQuotes" .Values.tap.globalFilter | quote }} DEFAULT_FILTER: {{ include "kubeshark.escapeDoubleQuotes" .Values.tap.defaultFilter | quote }} @@ -47,12 +56,14 @@ data: {{- else -}} {{ .Values.cloudLicenseEnabled }} {{- end }}' + AI_ASSISTANT_ENABLED: '{{ .Values.aiAssistantEnabled | ternary "true" "false" }}' DUPLICATE_TIMEFRAME: '{{ .Values.tap.misc.duplicateTimeframe }}' ENABLED_DISSECTORS: '{{ gt (len .Values.tap.enabledDissectors) 0 | ternary (join "," .Values.tap.enabledDissectors) "" }}' - DISSECTORS_UPDATING_ENABLED: '{{ .Values.dissectorsUpdatingEnabled | ternary "true" "false" }}' + CUSTOM_MACROS: '{{ toJson .Values.tap.customMacros }}' + DISSECTORS_UPDATING_ENABLED: '{{ .Values.tap.liveConfigMapChangesDisabled | ternary "false" "true" }}' DETECT_DUPLICATES: '{{ .Values.tap.misc.detectDuplicates | ternary "true" "false" }}' PCAP_DUMP_ENABLE: '{{ .Values.pcapdump.enabled }}' PCAP_TIME_INTERVAL: '{{ .Values.pcapdump.timeInterval }}' PCAP_MAX_TIME: '{{ .Values.pcapdump.maxTime }}' PCAP_MAX_SIZE: '{{ .Values.pcapdump.maxSize }}' - PCAP_SRC_DIR: '{{ .Values.pcapdump.pcapSrcDir }}' + PORT_MAPPING: '{{ toJson .Values.tap.portMapping }}' diff --git a/helm-chart/templates/16-hub-service-metrics.yaml b/helm-chart/templates/16-hub-service-metrics.yaml new file mode 100644 index 000000000..cd26ba712 --- /dev/null +++ b/helm-chart/templates/16-hub-service-metrics.yaml @@ -0,0 +1,23 @@ +--- +kind: Service +apiVersion: v1 +metadata: + labels: + {{- include "kubeshark.labels" . | nindent 4 }} + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9100' + {{- if .Values.tap.annotations }} + {{- toYaml .Values.tap.annotations | nindent 4 }} + {{- end }} + name: kubeshark-hub-metrics + namespace: {{ .Release.Namespace }} +spec: + selector: + app.kubeshark.co/app: hub + {{- include "kubeshark.labels" . | nindent 4 }} + ports: + - name: metrics + protocol: TCP + port: 9100 + targetPort: 9100 diff --git a/helm-chart/templates/16-network-policies.yaml b/helm-chart/templates/17-network-policies.yaml similarity index 96% rename from helm-chart/templates/16-network-policies.yaml rename to helm-chart/templates/17-network-policies.yaml index a412cde2a..276acd2db 100644 --- a/helm-chart/templates/16-network-policies.yaml +++ b/helm-chart/templates/17-network-policies.yaml @@ -20,6 +20,9 @@ spec: - ports: - protocol: TCP port: 8080 + - ports: + - protocol: TCP + port: 9100 egress: - {} --- diff --git a/helm-chart/templates/NOTES.txt b/helm-chart/templates/NOTES.txt index 1dfbf0960..8b91df42d 100644 --- a/helm-chart/templates/NOTES.txt +++ b/helm-chart/templates/NOTES.txt @@ -2,26 +2,36 @@ Thank you for installing {{ title .Chart.Name }}. Registry: {{ .Values.tap.docker.registry }} Tag: {{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (printf "v%s" .Chart.Version) }} - {{- if .Values.tap.docker.overrideTag.worker }} Overridden worker tag: {{ .Values.tap.docker.overrideTag.worker }} -{{ end }} - +{{- end }} {{- if .Values.tap.docker.overrideTag.hub }} Overridden hub tag: {{ .Values.tap.docker.overrideTag.hub }} -{{ end }} - +{{- end }} {{- if .Values.tap.docker.overrideTag.front }} Overridden front tag: {{ .Values.tap.docker.overrideTag.front }} -{{ end }} +{{- end }} +{{- if .Values.tap.docker.overrideImage.worker }} +Overridden worker image: {{ .Values.tap.docker.overrideImage.worker }} +{{- end }} +{{- if .Values.tap.docker.overrideImage.hub }} +Overridden hub image: {{ .Values.tap.docker.overrideImage.hub }} +{{- end }} +{{- if .Values.tap.docker.overrideImage.front }} +Overridden front image: {{ .Values.tap.docker.overrideImage.front }} +{{- end }} Your deployment has been successful. The release is named `{{ .Release.Name }}` and it has been deployed in the `{{ .Release.Namespace }}` namespace. -{{- if .Values.tap.telemetry.enabled }} -Notice: Telemetry is enabled. Kubeshark will collect anonymous usage statistics. -{{ end }} +Notices: +{{- if .Values.supportChatEnabled}} +- Support chat using Intercom is enabled. It can be disabled using `--set supportChatEnabled=false` +{{- end }} +{{- if eq .Values.license ""}} +- No license key was detected. You can get your license key from https://console.kubeshark.co/. +{{- end }} -{{- if .Values.tap.ingress.enabled }} +{{ if .Values.tap.ingress.enabled }} You can now access the application through the following URL: http{{ if .Values.tap.ingress.tls }}s{{ end }}://{{ .Values.tap.ingress.host }} @@ -36,4 +46,4 @@ To access the application, follow these steps: 2. Once port forwarding is done, you can access the application by visiting the following URL in your web browser: http://0.0.0.0:8899 -{{ end }} +{{- end }} diff --git a/helm-chart/values.yaml b/helm-chart/values.yaml index 522ec4b91..b5646ba16 100644 --- a/helm-chart/values.yaml +++ b/helm-chart/values.yaml @@ -6,13 +6,17 @@ tap: tagLocked: true imagePullPolicy: Always imagePullSecrets: [] + overrideImage: + worker: "" + hub: "" + front: "" overrideTag: worker: "" hub: "" front: "" proxy: worker: - srvPort: 30001 + srvPort: 48999 hub: srvPort: 8898 front: @@ -22,7 +26,7 @@ tap: namespaces: [] excludedNamespaces: [] bpfOverride: "" - stopped: true + stopped: false release: repo: https://helm.kubeshark.co name: kubeshark @@ -33,41 +37,74 @@ tap: storageLimit: 5000Mi storageClass: standard dryRun: false + dns: + nameservers: [] + searches: [] + options: [] resources: hub: limits: - cpu: 1000m - memory: 1500Mi + cpu: "0" + memory: 5Gi requests: cpu: 50m memory: 50Mi sniffer: limits: - cpu: 1000m - memory: 1500Mi + cpu: "0" + memory: 5Gi requests: cpu: 50m memory: 50Mi tracer: limits: - cpu: 1000m - memory: 1500Mi + cpu: "0" + memory: 5Gi requests: cpu: 50m memory: 50Mi + probes: + hub: + initialDelaySeconds: 15 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + sniffer: + initialDelaySeconds: 15 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 serviceMesh: true tls: true - disableTlsLog: false + disableTlsLog: true packetCapture: best - ignoreTainted: false labels: {} annotations: {} nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/os - operator: In - values: - - linux + hub: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + workers: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + front: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + tolerations: + hub: [] + workers: + - operator: Exists + effect: NoExecute + front: [] auth: enabled: false type: saml @@ -81,6 +118,10 @@ tap: filter: "" canDownloadPCAP: true canUseScripting: true + scriptingPermissions: + canSave: true + canActivate: true + canDelete: true canUpdateTargetedPods: true canStopTrafficCapturing: true showAdminConsoleLink: true @@ -92,10 +133,6 @@ tap: annotations: {} ipv6: true debug: false - kernelModule: - enabled: false - image: kubeshark/pf-ring-module:all - unloadOnDestroy: false telemetry: enabled: true resourceGuard: @@ -103,27 +140,8 @@ tap: sentry: enabled: false environment: production - defaultFilter: "!dns and !tcp and !udp and !icmp" - scriptingDisabled: false - targetedPodsUpdateDisabled: false - presetFiltersChangingEnabled: false - recordingDisabled: false - stopTrafficCapturingDisabled: false - capabilities: - networkCapture: - - NET_RAW - - NET_ADMIN - serviceMeshCapture: - - SYS_ADMIN - - SYS_PTRACE - - DAC_OVERRIDE - kernelModule: - - SYS_MODULE - ebpfCapture: - - SYS_ADMIN - - SYS_PTRACE - - SYS_RESOURCE - - IPC_LOCK + defaultFilter: "!dns and !error" + liveConfigMapChangesDisabled: false globalFilter: "" enabledDissectors: - amqp @@ -134,10 +152,28 @@ tap: - redis - sctp - syscall - - tcp - - udp - ws - - tls + - ldap + - radius + - diameter + portMapping: + http: + - 80 + - 443 + - 8080 + amqp: + - 5671 + - 5672 + kafka: + - 9092 + redis: + - 6379 + ldap: + - 389 + diameter: + - 3868 + customMacros: + https: tls and (http or http2) metrics: port: 49100 pprof: @@ -155,6 +191,30 @@ tap: duplicateTimeframe: 200ms detectDuplicates: false staleTimeoutSeconds: 30 + securityContext: + privileged: true + appArmorProfile: + type: "" + localhostProfile: "" + seLinuxOptions: + level: "" + role: "" + type: "" + user: "" + capabilities: + networkCapture: + - NET_RAW + - NET_ADMIN + serviceMeshCapture: + - SYS_ADMIN + - SYS_PTRACE + - DAC_OVERRIDE + ebpfCapture: + - SYS_ADMIN + - SYS_PTRACE + - SYS_RESOURCE + - IPC_LOCK + mountBpf: true logs: file: "" grep: "" @@ -163,7 +223,9 @@ pcapdump: timeInterval: 1m maxTime: 1h maxSize: 500MB - pcapSrcDir: pcapdump + time: time + debug: false + dest: "" kube: configPath: "" context: "" @@ -171,12 +233,17 @@ dumpLogs: false headless: false license: "" cloudLicenseEnabled: true +aiAssistantEnabled: false +demoModeEnabled: false supportChatEnabled: true serviceMapNewUiEnabled: false internetConnectivity: true -dissectorsUpdatingEnabled: true scripting: env: {} source: "" + sources: [] watchScripts: true + active: [] + console: true timezone: "" +logLevel: warning diff --git a/kubernetes/config.go b/kubernetes/config.go index b8f1e1db1..14d8f0c6c 100644 --- a/kubernetes/config.go +++ b/kubernetes/config.go @@ -3,6 +3,8 @@ package kubernetes import ( "context" "encoding/json" + "slices" + "strings" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/misc" @@ -26,6 +28,7 @@ const ( CONFIG_AUTH_TYPE = "AUTH_TYPE" CONFIG_AUTH_SAML_IDP_METADATA_URL = "AUTH_SAML_IDP_METADATA_URL" CONFIG_SCRIPTING_SCRIPTS = "SCRIPTING_SCRIPTS" + CONFIG_SCRIPTING_ACTIVE_SCRIPTS = "SCRIPTING_ACTIVE_SCRIPTS" CONFIG_PCAP_DUMP_ENABLE = "PCAP_DUMP_ENABLE" CONFIG_TIME_INTERVAL = "TIME_INTERVAL" CONFIG_MAX_TIME = "MAX_TIME" @@ -81,7 +84,16 @@ func SetConfig(provider *Provider, key string, value string) (updated bool, err _, err = provider.clientSet.CoreV1().ConfigMaps(config.Config.Tap.Release.Namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{}) if err == nil { if updated { - log.Info().Str("config", key).Str("value", value).Msg("Updated:") + log.Info(). + Str("config", key). + Str("value", func() string { + if len(value) > 10 { + return value[:10] + } + return value + }()). + Int("length", len(value)). + Msg("Updated. Printing only 10 first characters of value:") } } else { log.Error().Str("config", key).Err(err).Send() @@ -99,3 +111,29 @@ func ConfigGetScripts(provider *Provider) (scripts map[int64]misc.ConfigMapScrip err = json.Unmarshal([]byte(data), &scripts) return } + +func IsActiveScript(provider *Provider, title string) bool { + configActiveScripts, err := GetConfig(provider, CONFIG_SCRIPTING_ACTIVE_SCRIPTS) + if err != nil { + return false + } + return strings.Contains(configActiveScripts, title) +} + +func DeleteActiveScriptByTitle(provider *Provider, title string) (err error) { + configActiveScripts, err := GetConfig(provider, CONFIG_SCRIPTING_ACTIVE_SCRIPTS) + if err != nil { + return err + } + activeScripts := strings.Split(configActiveScripts, ",") + + idx := slices.Index(activeScripts, title) + if idx != -1 { + activeScripts = slices.Delete(activeScripts, idx, idx+1) + _, err = SetConfig(provider, CONFIG_SCRIPTING_ACTIVE_SCRIPTS, strings.Join(activeScripts, ",")) + if err != nil { + return err + } + } + return nil +} diff --git a/kubernetes/provider.go b/kubernetes/provider.go index a4f66d50c..721ff5f42 100644 --- a/kubernetes/provider.go +++ b/kubernetes/provider.go @@ -247,6 +247,10 @@ func (provider *Provider) GetNamespaces() (namespaces []string) { return } +func (provider *Provider) GetClientSet() *kubernetes.Clientset { + return provider.clientSet +} + func getClientSet(config *rest.Config) (*kubernetes.Clientset, error) { clientSet, err := kubernetes.NewForConfig(config) if err != nil { diff --git a/manifests/complete.yaml b/manifests/complete.yaml index 6f2fb6040..6ea91458f 100644 --- a/manifests/complete.yaml +++ b/manifests/complete.yaml @@ -1,13 +1,13 @@ --- -# Source: kubeshark/templates/16-network-policies.yaml +# Source: kubeshark/templates/17-network-policies.yaml apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: labels: - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-hub-network-policy @@ -23,18 +23,21 @@ spec: - ports: - protocol: TCP port: 8080 + - ports: + - protocol: TCP + port: 9100 egress: - {} --- -# Source: kubeshark/templates/16-network-policies.yaml +# Source: kubeshark/templates/17-network-policies.yaml apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: labels: - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-front-network-policy @@ -53,15 +56,15 @@ spec: egress: - {} --- -# Source: kubeshark/templates/16-network-policies.yaml +# Source: kubeshark/templates/17-network-policies.yaml apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: labels: - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-worker-network-policy @@ -76,7 +79,7 @@ spec: ingress: - ports: - protocol: TCP - port: 30001 + port: 48999 - protocol: TCP port: 49100 egress: @@ -87,10 +90,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-service-account @@ -104,10 +107,10 @@ metadata: namespace: default labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm stringData: LICENSE: '' @@ -121,10 +124,10 @@ metadata: namespace: default labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm stringData: AUTH_SAML_X509_CRT: | @@ -137,10 +140,10 @@ metadata: namespace: default labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm stringData: AUTH_SAML_X509_KEY: | @@ -152,10 +155,10 @@ metadata: name: kubeshark-nginx-config-map namespace: default labels: - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm data: default.conf: | @@ -216,18 +219,19 @@ metadata: namespace: default labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm data: POD_REGEX: '.*' NAMESPACES: '' EXCLUDED_NAMESPACES: '' BPF_OVERRIDE: '' - STOPPED: 'true' + STOPPED: 'false' SCRIPTING_SCRIPTS: '{}' + SCRIPTING_ACTIVE_SCRIPTS: '' INGRESS_ENABLED: 'false' INGRESS_HOST: 'ks.svc.cluster.local' PROXY_FRONT_PORT: '8899' @@ -235,40 +239,42 @@ data: AUTH_TYPE: 'oidc' AUTH_SAML_IDP_METADATA_URL: '' AUTH_SAML_ROLE_ATTRIBUTE: 'role' - AUTH_SAML_ROLES: '{"admin":{"canDownloadPCAP":true,"canStopTrafficCapturing":true,"canUpdateTargetedPods":true,"canUseScripting":true,"filter":"","showAdminConsoleLink":true}}' + AUTH_SAML_ROLES: '{"admin":{"canDownloadPCAP":true,"canStopTrafficCapturing":true,"canUpdateTargetedPods":true,"canUseScripting":true,"filter":"","scriptingPermissions":{"canActivate":true,"canDelete":true,"canSave":true},"showAdminConsoleLink":true}}' TELEMETRY_DISABLED: 'false' - SCRIPTING_DISABLED: '' + SCRIPTING_DISABLED: 'false' TARGETED_PODS_UPDATE_DISABLED: '' - PRESET_FILTERS_CHANGING_ENABLED: '' + PRESET_FILTERS_CHANGING_ENABLED: 'true' RECORDING_DISABLED: '' STOP_TRAFFIC_CAPTURING_DISABLED: 'false' GLOBAL_FILTER: "" - DEFAULT_FILTER: "!dns and !tcp and !udp and !icmp" + DEFAULT_FILTER: "!dns and !error" TRAFFIC_SAMPLE_RATE: '100' JSON_TTL: '5m' PCAP_TTL: '10s' PCAP_ERROR_TTL: '60s' TIMEZONE: ' ' CLOUD_LICENSE_ENABLED: 'true' + AI_ASSISTANT_ENABLED: 'false' DUPLICATE_TIMEFRAME: '200ms' - ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,sctp,syscall,tcp,udp,ws,tls' + ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,sctp,syscall,ws,ldap,radius,diameter' + CUSTOM_MACROS: '{"https":"tls and (http or http2)"}' DISSECTORS_UPDATING_ENABLED: 'true' DETECT_DUPLICATES: 'false' PCAP_DUMP_ENABLE: 'true' PCAP_TIME_INTERVAL: '1m' PCAP_MAX_TIME: '1h' PCAP_MAX_SIZE: '500MB' - PCAP_SRC_DIR: 'pcapdump' + PORT_MAPPING: '{"amqp":[5671,5672],"diameter":[3868],"http":[80,443,8080],"kafka":[9092],"ldap":[389],"redis":[6379]}' --- # Source: kubeshark/templates/02-cluster-role.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-cluster-role-default @@ -294,18 +300,29 @@ rules: - namespaces verbs: - get - resourceNames: - - kube-system + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - watch + - create + - update + - delete --- # Source: kubeshark/templates/03-cluster-role-binding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-cluster-role-binding-default @@ -324,10 +341,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: labels: - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-self-config-role @@ -354,10 +371,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-self-config-role-binding @@ -377,10 +394,10 @@ kind: Service metadata: labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-hub @@ -399,10 +416,10 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-front @@ -421,10 +438,10 @@ kind: Service apiVersion: v1 metadata: labels: - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm annotations: prometheus.io/scrape: 'true' @@ -434,10 +451,10 @@ metadata: spec: selector: app.kubeshark.co/app: worker - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm ports: - name: metrics @@ -445,6 +462,35 @@ spec: port: 49100 targetPort: 49100 --- +# Source: kubeshark/templates/16-hub-service-metrics.yaml +kind: Service +apiVersion: v1 +metadata: + labels: + helm.sh/chart: kubeshark-52.5 + app.kubernetes.io/name: kubeshark + app.kubernetes.io/instance: kubeshark + app.kubernetes.io/version: "52.5" + app.kubernetes.io/managed-by: Helm + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9100' + name: kubeshark-hub-metrics + namespace: default +spec: + selector: + app.kubeshark.co/app: hub + helm.sh/chart: kubeshark-52.5 + app.kubernetes.io/name: kubeshark + app.kubernetes.io/instance: kubeshark + app.kubernetes.io/version: "52.5" + app.kubernetes.io/managed-by: Helm + ports: + - name: metrics + protocol: TCP + port: 9100 + targetPort: 9100 +--- # Source: kubeshark/templates/09-worker-daemon-set.yaml apiVersion: apps/v1 kind: DaemonSet @@ -452,10 +498,10 @@ metadata: labels: app.kubeshark.co/app: worker sidecar.istio.io/inject: "false" - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-worker-daemon-set @@ -470,35 +516,49 @@ spec: metadata: labels: app.kubeshark.co/app: worker - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm name: kubeshark-worker-daemon-set namespace: kubeshark spec: + initContainers: + - command: + - /bin/sh + - -c + - mkdir -p /sys/fs/bpf && mount | grep -q '/sys/fs/bpf' || mount -t bpf bpf /sys/fs/bpf + image: 'docker.io/kubeshark/worker:v52.5' + imagePullPolicy: Always + name: mount-bpf + securityContext: + privileged: true + volumeMounts: + - mountPath: /sys + name: sys + mountPropagation: Bidirectional containers: - command: - ./worker - -i - any - -port - - '30001' + - '48999' - -metrics-port - '49100' - -packet-capture - 'best' - - -unixsocket + - -loglevel + - 'warning' - -servicemesh - -procfs - /hostproc - - -disable-ebpf - -resolution-strategy - 'auto' - -staletimeout - '30' - image: 'docker.io/kubeshark/worker:v52.3.83' + image: 'docker.io/kubeshark/worker:v52.5' imagePullPolicy: Always name: sniffer ports: @@ -528,35 +588,33 @@ spec: value: 'production' resources: limits: - cpu: 1000m - memory: 1500Mi + + + memory: 5Gi + requests: + cpu: 50m + + memory: 50Mi + securityContext: - capabilities: - add: - - NET_RAW - - NET_ADMIN - - SYS_ADMIN - - SYS_PTRACE - - DAC_OVERRIDE - drop: - - ALL + privileged: true readinessProbe: - periodSeconds: 1 + periodSeconds: 10 failureThreshold: 3 successThreshold: 1 - initialDelaySeconds: 5 + initialDelaySeconds: 15 tcpSocket: - port: 30001 + port: 48999 livenessProbe: - periodSeconds: 1 + periodSeconds: 10 failureThreshold: 3 successThreshold: 1 - initialDelaySeconds: 5 + initialDelaySeconds: 15 tcpSocket: - port: 30001 + port: 48999 volumeMounts: - mountPath: /hostproc name: proc @@ -564,14 +622,17 @@ spec: - mountPath: /sys name: sys readOnly: true + mountPropagation: HostToContainer - mountPath: /app/data name: data - command: - ./tracer - -procfs - /hostproc - - -disable-ebpf - image: 'docker.io/kubeshark/worker:v52.3.83' + - -disable-tls-log + - -loglevel + - 'warning' + image: 'docker.io/kubeshark/worker:v52.5' imagePullPolicy: Always name: tracer env: @@ -591,22 +652,19 @@ spec: value: 'production' resources: limits: - cpu: 1000m - memory: 1500Mi + + + memory: 5Gi + requests: + cpu: 50m + + memory: 50Mi + securityContext: - capabilities: - add: - - SYS_ADMIN - - SYS_PTRACE - - SYS_RESOURCE - - IPC_LOCK - - NET_RAW - - NET_ADMIN - drop: - - ALL + privileged: true volumeMounts: - mountPath: /hostproc name: proc @@ -614,6 +672,7 @@ spec: - mountPath: /sys name: sys readOnly: true + mountPropagation: HostToContainer - mountPath: /app/data name: data - mountPath: /etc/os-release @@ -626,12 +685,10 @@ spec: dnsPolicy: ClusterFirstWithHostNet hostNetwork: true serviceAccountName: kubeshark-service-account - terminationGracePeriodSeconds: 0 tolerations: - - effect: NoExecute - operator: Exists - - effect: NoSchedule - operator: Exists + - key: + operator: "Exists" + effect: "NoExecute" affinity: nodeAffinity: requiredDuringSchedulingIgnoredDuringExecution: @@ -667,10 +724,10 @@ kind: Deployment metadata: labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-hub @@ -686,10 +743,10 @@ spec: metadata: labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm spec: dnsPolicy: ClusterFirstWithHostNet @@ -700,6 +757,8 @@ spec: - ./hub - -port - "8080" + - -loglevel + - 'warning' env: - name: POD_NAME valueFrom: @@ -717,33 +776,48 @@ spec: value: 'https://api.kubeshark.co' - name: PROFILING_ENABLED value: 'false' - image: 'docker.io/kubeshark/hub:v52.3.83' + image: 'docker.io/kubeshark/hub:v52.5' imagePullPolicy: Always readinessProbe: - periodSeconds: 1 + periodSeconds: 10 failureThreshold: 3 successThreshold: 1 - initialDelaySeconds: 3 + initialDelaySeconds: 15 tcpSocket: port: 8080 livenessProbe: - periodSeconds: 1 + periodSeconds: 10 failureThreshold: 3 successThreshold: 1 - initialDelaySeconds: 3 + initialDelaySeconds: 15 tcpSocket: port: 8080 resources: limits: - cpu: 1000m - memory: 1500Mi + + + memory: 5Gi + requests: + cpu: 50m + + memory: 50Mi + volumeMounts: - name: saml-x509-volume mountPath: "/etc/saml/x509" readOnly: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux volumes: - name: saml-x509-volume projected: @@ -765,10 +839,10 @@ kind: Deployment metadata: labels: app.kubeshark.co/app: front - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-front @@ -784,10 +858,10 @@ spec: metadata: labels: app.kubeshark.co/app: front - helm.sh/chart: kubeshark-52.3.83 + helm.sh/chart: kubeshark-52.5 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.83" + app.kubernetes.io/version: "52.5" app.kubernetes.io/managed-by: Helm spec: containers: @@ -805,15 +879,17 @@ spec: - name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED value: 'false' - name: REACT_APP_PRESET_FILTERS_CHANGING_ENABLED - value: 'false' + value: 'true' - name: REACT_APP_BPF_OVERRIDE_DISABLED - value: 'false' + value: 'true' - name: REACT_APP_RECORDING_DISABLED value: 'false' - name: REACT_APP_STOP_TRAFFIC_CAPTURING_DISABLED value: 'false' - name: 'REACT_APP_CLOUD_LICENSE_ENABLED' value: 'true' + - name: 'REACT_APP_AI_ASSISTANT_ENABLED' + value: 'false' - name: REACT_APP_SUPPORT_CHAT_ENABLED value: 'true' - name: REACT_APP_DISSECTORS_UPDATING_ENABLED @@ -822,7 +898,7 @@ spec: value: 'false' - name: REACT_APP_SENTRY_ENVIRONMENT value: 'production' - image: 'docker.io/kubeshark/front:v52.3.83' + image: 'docker.io/kubeshark/front:v52.5' imagePullPolicy: Always name: kubeshark-front livenessProbe: @@ -852,6 +928,15 @@ spec: mountPath: /etc/nginx/conf.d/default.conf subPath: default.conf readOnly: true + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux volumes: - name: nginx-config configMap: diff --git a/misc/scripting.go b/misc/scripting.go index 968f89bd0..4b3b9e074 100644 --- a/misc/scripting.go +++ b/misc/scripting.go @@ -10,20 +10,23 @@ import ( ) type Script struct { - Path string `json:"path"` - Title string `json:"title"` - Code string `json:"code"` + Path string `json:"path"` + Title string `json:"title"` + Code string `json:"code"` + Active bool `json:"active"` } type ConfigMapScript struct { - Title string `json:"title"` - Code string `json:"code"` + Title string `json:"title"` + Code string `json:"code"` + Active bool `json:"active"` } func (s *Script) ConfigMap() ConfigMapScript { return ConfigMapScript{ - Title: s.Title, - Code: s.Code, + Title: s.Title, + Code: s.Code, + Active: s.Active, } } @@ -58,9 +61,10 @@ func ReadScriptFile(path string) (script *Script, err error) { } script = &Script{ - Path: path, - Title: title, - Code: code, + Path: path, + Title: title, + Code: code, + Active: false, } return