1
0
mirror of https://github.com/kubeshark/kubeshark.git synced 2025-05-09 17:19:03 +00:00

Feat pcapsaver ()

* Add cmd to copy pcaps from worker

* Update commands to merge pcaps

* Remove test img

* Remove usage of http endpoint in copy

* Unify commands

* Add copy flag

* Address review comments

* Update k8s config path processing

* Remove debug prints

* setting the pcapSrcDit to the name of the command

* Update values.yaml

* Remove the start,stop and copy flags

* Clean up the the code a bit
Changed the logic so it's either copy or start/stop.
Works well for a first version.

* Improved the logic

* Changed pcapdump enable flag to boolean

* Added helm value documentation

* minor default configuration changes

* Fix default val for enabled

* Final changes
Cleaned up the helm worker template
Improve the logic a bit

* Code cleanup
Changed instances of `enable` to `enabled` for purpose of consistency
Removed unused helm environment variables

* Enable merging all node files to a single file.
Before the outcome had been a merged file per node.
Now the outcome is a single merged file for all nodes.

* Committed for testing purpose

* Reduced the initial disk foot print to 10MB per node

---------

Co-authored-by: bogdan.balan1 <bogdanvalentin.balan@1nce.com>
Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
This commit is contained in:
bogdanvbalan 2024-10-07 18:39:52 +03:00 committed by GitHub
parent 68da6a819a
commit 783aa03b6a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 505 additions and 2 deletions

101
cmd/pcapDump.go Normal file
View File

@ -0,0 +1,101 @@
package cmd
import (
"errors"
"path/filepath"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
)
// pcapDumpCmd represents the consolidated pcapdump command
var pcapDumpCmd = &cobra.Command{
Use: "pcapdump",
Short: "Manage PCAP dump operations: start, stop, or copy PCAP files",
RunE: func(cmd *cobra.Command, args []string) error {
// Retrieve the kubeconfig path from the flag
kubeconfig, _ := cmd.Flags().GetString(configStructs.PcapKubeconfig)
// If kubeconfig is not provided, use the default location
if kubeconfig == "" {
if home := homedir.HomeDir(); home != "" {
kubeconfig = filepath.Join(home, ".kube", "config")
} else {
return errors.New("kubeconfig flag not provided and no home directory available for default config location")
}
}
// Use the current context in kubeconfig
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
if err != nil {
log.Error().Err(err).Msg("Error building kubeconfig")
return err
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
log.Error().Err(err).Msg("Error creating Kubernetes client")
return err
}
// Handle copy operation if the copy string is provided
if !cmd.Flags().Changed(configStructs.PcapDumpEnabled) {
destDir, _ := cmd.Flags().GetString(configStructs.PcapDest)
log.Info().Msg("Copying PCAP files")
err = copyPcapFiles(clientset, config, destDir)
if err != nil {
log.Error().Err(err).Msg("Error copying PCAP files")
return err
}
} else {
// Handle start operation if the start string is provided
enabled, err := cmd.Flags().GetBool(configStructs.PcapDumpEnabled)
if err != nil {
log.Error().Err(err).Msg("Error getting pcapdump enable flag")
return err
}
timeInterval, _ := cmd.Flags().GetString(configStructs.PcapTimeInterval)
maxTime, _ := cmd.Flags().GetString(configStructs.PcapMaxTime)
maxSize, _ := cmd.Flags().GetString(configStructs.PcapMaxSize)
err = startStopPcap(clientset, enabled, timeInterval, maxTime, maxSize)
if err != nil {
log.Error().Err(err).Msg("Error starting/stopping PCAP dump")
return err
}
if enabled {
log.Info().Msg("Pcapdump started successfully")
return nil
} else {
log.Info().Msg("Pcapdump stopped successfully")
return nil
}
}
return nil
},
}
func init() {
rootCmd.AddCommand(pcapDumpCmd)
defaultPcapDumpConfig := configStructs.PcapDumpConfig{}
if err := defaults.Set(&defaultPcapDumpConfig); err != nil {
log.Debug().Err(err).Send()
}
pcapDumpCmd.Flags().String(configStructs.PcapTimeInterval, defaultPcapDumpConfig.PcapTimeInterval, "Time interval for PCAP file rotation (used with --start)")
pcapDumpCmd.Flags().String(configStructs.PcapMaxTime, defaultPcapDumpConfig.PcapMaxTime, "Maximum time for retaining old PCAP files (used with --start)")
pcapDumpCmd.Flags().String(configStructs.PcapMaxSize, defaultPcapDumpConfig.PcapMaxSize, "Maximum size of PCAP files before deletion (used with --start)")
pcapDumpCmd.Flags().String(configStructs.PcapDest, "", "Local destination path for copied PCAP files (can not be used together with --enabled)")
pcapDumpCmd.Flags().String(configStructs.PcapKubeconfig, "", "Enabled/Disable to pcap dumps (can not be used together with --dest)")
}

359
cmd/pcapDumpRunner.go Normal file
View File

@ -0,0 +1,359 @@
package cmd
import (
"bytes"
"context"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/kubeshark/gopacket"
"github.com/kubeshark/gopacket/layers"
"github.com/kubeshark/gopacket/pcapgo"
"github.com/rs/zerolog/log"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
clientk8s "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
)
const label = "app.kubeshark.co/app=worker"
const SELF_RESOURCES_PREFIX = "kubeshark-"
const SUFFIX_CONFIG_MAP = "config-map"
// NamespaceFiles represents the namespace and the files found in that namespace.
type NamespaceFiles struct {
Namespace string // The namespace in which the files were found
SrcDir string // The source directory from which the files were listed
Files []string // List of files found in the namespace
}
// listWorkerPods fetches all worker pods from multiple namespaces
func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespaces []string) ([]corev1.Pod, error) {
var allPods []corev1.Pod
labelSelector := label
for _, namespace := range namespaces {
// List all pods matching the label in the current namespace
pods, err := clientset.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labelSelector,
})
if err != nil {
return nil, fmt.Errorf("failed to list worker pods in namespace %s: %w", namespace, err)
}
// Accumulate the pods
allPods = append(allPods, pods.Items...)
}
return allPods, nil
}
// listFilesInPodDir lists all files in the specified directory inside the pod across multiple namespaces
func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, config *rest.Config, podName string, namespaces []string, configMapName, configMapKey string) ([]NamespaceFiles, error) {
var namespaceFilesList []NamespaceFiles
for _, namespace := range namespaces {
// Attempt to get the ConfigMap in the current namespace
configMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, configMapName, metav1.GetOptions{})
if err != nil {
continue
}
// Check if the source directory exists in the ConfigMap
srcDir, ok := configMap.Data[configMapKey]
if !ok || srcDir == "" {
continue
}
// Attempt to get the pod in the current namespace
pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
continue
}
nodeName := pod.Spec.NodeName
srcFilePath := filepath.Join("data", nodeName, srcDir)
cmd := []string{"ls", srcFilePath}
req := clientset.CoreV1().RESTClient().Post().
Resource("pods").
Name(podName).
Namespace(namespace).
SubResource("exec").
Param("container", "sniffer").
Param("stdout", "true").
Param("stderr", "true").
Param("command", cmd[0]).
Param("command", cmd[1])
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if err != nil {
continue
}
var stdoutBuf bytes.Buffer
var stderrBuf bytes.Buffer
// Execute the command to list files
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdout: &stdoutBuf,
Stderr: &stderrBuf,
})
if err != nil {
continue
}
// Split the output (file names) into a list
files := strings.Split(strings.TrimSpace(stdoutBuf.String()), "\n")
if len(files) > 0 {
// Append the NamespaceFiles struct to the list
namespaceFilesList = append(namespaceFilesList, NamespaceFiles{
Namespace: namespace,
SrcDir: srcDir,
Files: files,
})
}
}
if len(namespaceFilesList) == 0 {
return nil, fmt.Errorf("no files found in pod %s across the provided namespaces", podName)
}
return namespaceFilesList, nil
}
// copyFileFromPod copies a single file from a pod to a local destination
func copyFileFromPod(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, podName, namespace, srcDir, srcFile, destFile string) error {
// Get the pod to retrieve its node name
pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get pod %s in namespace %s: %w", podName, namespace, err)
}
// Construct the complete path using /data, the node name, srcDir, and srcFile
nodeName := pod.Spec.NodeName
srcFilePath := filepath.Join("data", nodeName, srcDir, srcFile)
// Execute the `cat` command to read the file at the srcFilePath
cmd := []string{"cat", srcFilePath}
req := clientset.CoreV1().RESTClient().Post().
Resource("pods").
Name(podName).
Namespace(namespace).
SubResource("exec").
Param("container", "sniffer").
Param("stdout", "true").
Param("stderr", "true").
Param("command", cmd[0]).
Param("command", cmd[1])
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if err != nil {
return fmt.Errorf("failed to initialize executor for pod %s in namespace %s: %w", podName, namespace, err)
}
// Create the local file to write the content to
outFile, err := os.Create(destFile)
if err != nil {
return fmt.Errorf("failed to create destination file: %w", err)
}
defer outFile.Close()
// Capture stderr for error logging
var stderrBuf bytes.Buffer
// Stream the file content from the pod to the local file
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdout: outFile,
Stderr: &stderrBuf,
})
if err != nil {
return fmt.Errorf("error copying file from pod %s in namespace %s: %s", podName, namespace, stderrBuf.String())
}
return nil
}
func mergePCAPs(outputFile string, inputFiles []string) error {
// Create the output file
f, err := os.Create(outputFile)
if err != nil {
return err
}
defer f.Close()
// Create a pcap writer for the output file
writer := pcapgo.NewWriter(f)
err = writer.WriteFileHeader(65536, layers.LinkTypeEthernet) // Snapshot length and LinkType
if err != nil {
return err
}
for _, inputFile := range inputFiles {
// Open each input file
file, err := os.Open(inputFile)
if err != nil {
return err
}
defer file.Close()
reader, err := pcapgo.NewReader(file)
if err != nil {
log.Error().Err(err).Msgf("Failed to create pcapng reader for %v", file.Name())
return err
}
// Create the packet source
packetSource := gopacket.NewPacketSource(reader, layers.LinkTypeEthernet)
for packet := range packetSource.Packets() {
err := writer.WritePacket(packet.Metadata().CaptureInfo, packet.Data())
if err != nil {
return err
}
}
}
return nil
}
// setPcapConfigInKubernetes sets the PCAP config for all pods across multiple namespaces
func setPcapConfigInKubernetes(ctx context.Context, clientset *clientk8s.Clientset, podName string, namespaces []string, enabledPcap bool, timeInterval, maxTime, maxSize string) error {
for _, namespace := range namespaces {
// Load the existing ConfigMap in the current namespace
configMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, "kubeshark-config-map", metav1.GetOptions{})
if err != nil {
continue
}
// Update the values with user-provided input
configMap.Data["PCAP_TIME_INTERVAL"] = timeInterval
configMap.Data["PCAP_MAX_SIZE"] = maxSize
configMap.Data["PCAP_MAX_TIME"] = maxTime
configMap.Data["PCAP_DUMP_ENABLE"] = strconv.FormatBool(enabledPcap)
// Apply the updated ConfigMap back to the cluster in the current namespace
_, err = clientset.CoreV1().ConfigMaps(namespace).Update(ctx, configMap, metav1.UpdateOptions{})
if err != nil {
continue
}
}
return nil
}
// startPcap function for starting the PCAP capture
func startStopPcap(clientset *kubernetes.Clientset, pcapEnable bool, timeInterval, maxTime, maxSize string) error {
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
log.Error().Err(err).Send()
return err
}
targetNamespaces := kubernetesProvider.GetNamespaces()
// List worker pods
workerPods, err := listWorkerPods(context.Background(), clientset, targetNamespaces)
if err != nil {
log.Error().Err(err).Msg("Error listing worker pods")
return err
}
// Iterate over each pod to start the PCAP capture by updating the configuration in Kubernetes
for _, pod := range workerPods {
err := setPcapConfigInKubernetes(context.Background(), clientset, pod.Name, targetNamespaces, pcapEnable, timeInterval, maxTime, maxSize)
if err != nil {
log.Error().Err(err).Msgf("Error setting PCAP config for pod %s", pod.Name)
continue
}
}
return nil
}
// copyPcapFiles function for copying the PCAP files from the worker pods
func copyPcapFiles(clientset *kubernetes.Clientset, config *rest.Config, destDir string) error {
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
log.Error().Err(err).Send()
return err
}
targetNamespaces := kubernetesProvider.GetNamespaces()
// List worker pods
workerPods, err := listWorkerPods(context.Background(), clientset, targetNamespaces)
if err != nil {
log.Error().Err(err).Msg("Error listing worker pods")
return err
}
var currentFiles []string
// Iterate over each pod to get the PCAP directory from config and copy files
for _, pod := range workerPods {
// Get the list of NamespaceFiles (files per namespace) and their source directories
namespaceFiles, err := listFilesInPodDir(context.Background(), clientset, config, pod.Name, targetNamespaces, SELF_RESOURCES_PREFIX+SUFFIX_CONFIG_MAP, "PCAP_SRC_DIR")
if err != nil {
log.Error().Err(err).Msgf("Error listing files in pod %s", pod.Name)
continue
}
// Copy each file from the pod to the local destination for each namespace
for _, nsFiles := range namespaceFiles {
for _, file := range nsFiles.Files {
destFile := filepath.Join(destDir, file)
// Pass the correct namespace and related details to the function
err = copyFileFromPod(context.Background(), clientset, config, pod.Name, nsFiles.Namespace, nsFiles.SrcDir, file, destFile)
if err != nil {
log.Error().Err(err).Msgf("Error copying file from pod %s in namespace %s", pod.Name, nsFiles.Namespace)
}
currentFiles = append(currentFiles, destFile)
}
}
}
if len(currentFiles) == 0 {
log.Error().Msgf("No files to merge")
return nil
// continue
}
// Generate a temporary filename based on the first file
tempMergedFile := currentFiles[0] + "_temp"
// Merge the PCAPs into the temporary file
err = mergePCAPs(tempMergedFile, currentFiles)
if err != nil {
log.Error().Err(err).Msgf("Error merging files")
return err
// continue
}
// Remove the original files after merging
for _, file := range currentFiles {
err := os.Remove(file)
if err != nil {
log.Error().Err(err).Msgf("Error removing file %s", file)
}
}
// Rename the temp file to the final name (removing "_temp")
finalMergedFile := strings.TrimSuffix(tempMergedFile, "_temp")
err = os.Rename(tempMergedFile, finalMergedFile)
if err != nil {
log.Error().Err(err).Msgf("Error renaming merged file %s", tempMergedFile)
// continue
return err
}
log.Info().Msgf("Merged file created: %s", finalMergedFile)
return nil
}

View File

@ -104,6 +104,7 @@ type ConfigStruct struct {
Tap configStructs.TapConfig `yaml:"tap" json:"tap"`
Logs configStructs.LogsConfig `yaml:"logs" json:"logs"`
Config configStructs.ConfigConfig `yaml:"config,omitempty" json:"config,omitempty"`
PcapDump configStructs.PcapDumpConfig `yaml:"pcapdump" json:"pcapdump"`
Kube KubeConfig `yaml:"kube" json:"kube"`
DumpLogs bool `yaml:"dumpLogs" json:"dumpLogs" default:"false"`
HeadlessMode bool `yaml:"headless" json:"headless" default:"false"`

View File

@ -37,6 +37,12 @@ const (
DebugLabel = "debug"
ContainerPort = 80
ContainerPortStr = "80"
PcapDest = "dest"
PcapMaxSize = "maxSize"
PcapMaxTime = "maxTime"
PcapTimeInterval = "timeInterval"
PcapKubeconfig = "kubeconfig"
PcapDumpEnabled = "enabled"
)
type ResourceLimitsHub struct {
@ -190,6 +196,14 @@ type MiscConfig struct {
StaleTimeoutSeconds int `yaml:"staleTimeoutSeconds" json:"staleTimeoutSeconds" default:"30"`
}
type PcapDumpConfig struct {
PcapDumpEnabled bool `yaml:"enabled" json:"enabled" default:"true"`
PcapTimeInterval string `yaml:"timeInterval" json:"timeInterval" default:"10m"`
PcapMaxTime string `yaml:"maxTime" json:"maxTime" default:"1h"`
PcapMaxSize string `yaml:"maxSize" json:"maxSize" default:"10MB"`
PcapSrcDir string `yaml:"pcapSrcDir" json:"pcapSrcDir" default:"pcapdump"`
}
type TapConfig struct {
Docker DockerConfig `yaml:"docker" json:"docker"`
Proxy ProxyConfig `yaml:"proxy" json:"proxy"`

2
go.mod
View File

@ -82,6 +82,8 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.16.0 // indirect
github.com/kubeshark/gopacket v1.1.39 // indirect
github.com/kubeshark/tracerproto v1.0.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/lib/pq v1.10.7 // indirect

9
go.sum
View File

@ -396,6 +396,10 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubeshark/gopacket v1.1.39 h1:NNiMTPO8v2+5FVlJTulT0Z+O0TLEAzavJBto10AY7js=
github.com/kubeshark/gopacket v1.1.39/go.mod h1:Qo8/i/tdT74CCT7/pjO0L55Pktv5dQfj7M/Arv8MKm8=
github.com/kubeshark/tracerproto v1.0.0 h1:/euPX9KMrKDS92hSMrLuhncYAX22dYlsnM2aD4AYhhE=
github.com/kubeshark/tracerproto v1.0.0/go.mod h1:+efDYkwXxwakmHRpxHVEekyXNtg/aFx0uSo/I0lGV9k=
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=
@ -615,6 +619,9 @@ github.com/tanqiangyes/grep-go v0.0.0-20220515134556-b36bff9c3d8e h1:+qDZ81UqxfZ
github.com/tanqiangyes/grep-go v0.0.0-20220515134556-b36bff9c3d8e/go.mod h1:ANZlXE3vfRYCYnkojePl2hJODYmOeCVD+XahuhDdTbI=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@ -804,6 +811,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -817,6 +825,7 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@ -180,6 +180,9 @@ Please refer to [metrics](./metrics.md) documentation for details.
| `tap.metrics.port` | Pod port used to expose Prometheus metrics | `49100` |
| `tap.enabledDissectors` | This is an array of strings representing the list of supported protocols. Remove or comment out redundant protocols (e.g., dns).| The default list includes: amqp, dns , http, icmp, kafka, redis,sctp, syscall, tcp, ws. |
| `logs.file` | Logs dump path | `""` |
| `pcapdump.enabled` | Enable recording of all traffic captured according to other parameters. Whatever Kubeshark captures, considering pod targeting rules, will be stored in pcap files ready to be viewed by tools | `true` |
| `pcapdump.maxTime` | The time window into the past that will be stored. Older traffic will be discarded. | `2h` |
| `pcapdump.maxSize` | The maximum storage size the PCAP files will consume. Old files that cause to surpass storage consumption will get discarded. | `500MB` |
| `kube.configPath` | Path to the `kubeconfig` file (`$HOME/.kube/config`) | `""` |
| `kube.context` | Kubernetes context to use for the deployment | `""` |
| `dumpLogs` | Enable dumping of logs | `false` |

View File

@ -49,4 +49,8 @@ data:
ENABLED_DISSECTORS: '{{ gt (len .Values.tap.enabledDissectors) 0 | ternary (join "," .Values.tap.enabledDissectors) "" }}'
DISSECTORS_UPDATING_ENABLED: '{{ .Values.dissectorsUpdatingEnabled | ternary "true" "false" }}'
DETECT_DUPLICATES: '{{ .Values.tap.misc.detectDuplicates | ternary "true" "false" }}'
PCAP_DUMP_ENABLE: '{{ .Values.pcapdump.enabled }}'
PCAP_TIME_INTERVAL: '{{ .Values.pcapdump.timeInterval }}'
PCAP_MAX_TIME: '{{ .Values.pcapdump.maxTime }}'
PCAP_MAX_SIZE: '{{ .Values.pcapdump.maxSize }}'
PCAP_SRC_DIR: '{{ .Values.pcapdump.pcapSrcDir }}'

View File

@ -1,4 +1,3 @@
# find a detailed description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md
tap:
docker:
registry: docker.io/kubeshark
@ -136,6 +135,7 @@ tap:
- tcp
- udp
- ws
- tls
metrics:
port: 49100
pprof:
@ -156,6 +156,12 @@ tap:
logs:
file: ""
grep: ""
pcapdump:
enabled: true
timeInterval: 10m
maxTime: 1h
maxSize: 50MB
pcapSrcDir: pcapdump
kube:
configPath: ""
context: ""

View File

@ -26,6 +26,10 @@ const (
CONFIG_AUTH_TYPE = "AUTH_TYPE"
CONFIG_AUTH_SAML_IDP_METADATA_URL = "AUTH_SAML_IDP_METADATA_URL"
CONFIG_SCRIPTING_SCRIPTS = "SCRIPTING_SCRIPTS"
CONFIG_PCAP_DUMP_ENABLE = "PCAP_DUMP_ENABLE"
CONFIG_TIME_INTERVAL = "TIME_INTERVAL"
CONFIG_MAX_TIME = "MAX_TIME"
CONFIG_MAX_SIZE = "MAX_SIZE"
)
func SetSecret(provider *Provider, key string, value string) (updated bool, err error) {