Update commands to merge pcaps

This commit is contained in:
bogdan.balan1 2024-09-30 08:00:16 +03:00
parent 1720f19b45
commit 4a6076c8d1
12 changed files with 196 additions and 224 deletions

View File

@ -2,7 +2,9 @@ package cmd
import (
"context"
"os"
"path/filepath"
"strings"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config/configStructs"
@ -14,8 +16,7 @@ import (
)
const (
configPath = "/app/config/pcap_config.txt"
namespace = "default"
namespace = "default"
)
// pcapCopyCmd represents the pcapcopy command
@ -49,18 +50,9 @@ var pcapCopyCmd = &cobra.Command{
// Iterate over each pod to get the PCAP directory from config and copy files
for _, pod := range workerPods.Items {
// Read the config file from the pod to get the PCAP_DIR value
configMap, err := readConfigFileFromPod(context.Background(), clientset, config, pod.Name, namespace, configPath)
srcDir, err := getWorkerSource(clientset, pod.Name, pod.Namespace)
if err != nil {
log.Error().Err(err).Msgf("Error reading config file from pod %s", pod.Name)
continue
}
// Use the PCAP_DIR value from the config file
srcDir := configMap["PCAP_DIR"]
if srcDir == "" {
log.Error().Msgf("PCAP_DIR not found in config for pod %s", pod.Name)
continue
log.Error().Err(err).Msgf("Failed to read the worker source dir for %s", pod.Name)
}
// List files in the PCAP directory on the pod
@ -70,6 +62,8 @@ var pcapCopyCmd = &cobra.Command{
continue
}
var currentFiles []string
// Copy each file from the pod to the local destination
for _, file := range files {
srcFile := filepath.Join(srcDir, file)
@ -79,7 +73,41 @@ var pcapCopyCmd = &cobra.Command{
if err != nil {
log.Error().Err(err).Msgf("Error copying file from pod %s", pod.Name)
}
currentFiles = append(currentFiles, destFile)
}
if len(currentFiles) == 0 {
log.Error().Msgf("No files to merge for pod %s", pod.Name)
continue
}
// Generate a temporary filename based on the first file
tempMergedFile := currentFiles[0] + "_temp"
// Merge the PCAPs into the temporary file
err = mergePCAPs(tempMergedFile, currentFiles)
if err != nil {
log.Error().Err(err).Msgf("Error merging file from pod %s", pod.Name)
continue
}
// Remove the original files after merging
for _, file := range currentFiles {
err := os.Remove(file)
if err != nil {
log.Error().Err(err).Msgf("Error removing file %s", file)
}
}
// Rename the temp file to the final name (removing "_temp")
finalMergedFile := strings.TrimSuffix(tempMergedFile, "_temp")
err = os.Rename(tempMergedFile, finalMergedFile)
if err != nil {
log.Error().Err(err).Msgf("Error renaming merged file %s", tempMergedFile)
continue
}
log.Info().Msgf("Merged file created: %s", finalMergedFile)
}
return nil

View File

@ -3,13 +3,21 @@ package cmd
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"strings"
"github.com/kubeshark/gopacket"
"github.com/kubeshark/gopacket/layers"
"github.com/kubeshark/gopacket/pcapgo"
"github.com/rs/zerolog/log"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
clientk8s "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/remotecommand"
)
@ -17,7 +25,7 @@ import (
const label = "app.kubeshark.co/app=worker"
// listWorkerPods fetches all the worker pods using the Kubernetes client
func listWorkerPods(ctx context.Context, clientset *kubernetes.Clientset, namespace string) (*corev1.PodList, error) {
func listWorkerPods(ctx context.Context, clientset *clientk8s.Clientset, namespace string) (*corev1.PodList, error) {
labelSelector := label
// List all pods matching the label
@ -32,7 +40,7 @@ func listWorkerPods(ctx context.Context, clientset *kubernetes.Clientset, namesp
}
// listFilesInPodDir lists all files in the specified directory inside the pod
func listFilesInPodDir(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, podName, namespace, srcDir string) ([]string, error) {
func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, config *rest.Config, podName, namespace, srcDir string) ([]string, error) {
cmd := []string{"ls", srcDir}
req := clientset.CoreV1().RESTClient().Post().
Resource("pods").
@ -69,7 +77,7 @@ func listFilesInPodDir(ctx context.Context, clientset *kubernetes.Clientset, con
}
// copyFileFromPod copies a single file from a pod to a local destination
func copyFileFromPod(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, podName, namespace, srcFile, destFile string) error {
func copyFileFromPod(ctx context.Context, clientset *clientk8s.Clientset, config *rest.Config, podName, namespace, srcFile, destFile string) error {
cmd := []string{"cat", srcFile}
req := clientset.CoreV1().RESTClient().Post().
Resource("pods").
@ -106,154 +114,126 @@ func copyFileFromPod(ctx context.Context, clientset *kubernetes.Clientset, confi
return fmt.Errorf("error copying file from pod: %w. Stderr: %s", err, stderrBuf.String())
}
fmt.Printf("File from pod %s copied to local destination: %s\n", podName, destFile)
return nil
}
// updatePodEnvVars updates the configuration file inside the worker pod
func updatePodEnvVars(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, podName, namespace string, stop bool, timeInterval, maxTime, maxSize string) error {
var envVars []string
if stop {
envVars = append(envVars, "PCAP_DUMP_ENABLE=false")
} else {
envVars = append(envVars, "PCAP_DUMP_ENABLE=true")
func mergePCAPs(outputFile string, inputFiles []string) error {
// Create the output file
f, err := os.Create(outputFile)
if err != nil {
return err
}
defer f.Close()
if timeInterval != "" {
envVars = append(envVars, fmt.Sprintf("TIME_INTERVAL=%s", timeInterval))
}
if maxTime != "" {
envVars = append(envVars, fmt.Sprintf("MAX_TIME=%s", maxTime))
}
if maxSize != "" {
envVars = append(envVars, fmt.Sprintf("MAX_SIZE=%s", maxSize))
}
// Create a pcap writer for the output file
writer := pcapgo.NewWriter(f)
err = writer.WriteFileHeader(65536, layers.LinkTypeEthernet) // Snapshot length and LinkType
if err != nil {
return err
}
// Create a command that sets the environment variables directly in the pod
for _, envVar := range envVars {
cmd := []string{"sh", "-c", fmt.Sprintf("export %s", envVar)}
req := clientset.CoreV1().RESTClient().Post().
Resource("pods").
Name(podName).
Namespace(namespace).
SubResource("exec").
Param("container", "sniffer"). // Assuming container is called 'sniffer'
Param("stdout", "true").
Param("stderr", "true").
Param("command", cmd[0]).
Param("command", cmd[1]).
Param("command", cmd[2])
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
for _, inputFile := range inputFiles {
// Open each input file
file, err := os.Open(inputFile)
if err != nil {
return fmt.Errorf("failed to initialize executor for pod %s: %w", podName, err)
return err
}
defer file.Close()
var stdoutBuf, stderrBuf bytes.Buffer
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdout: &stdoutBuf,
Stderr: &stderrBuf,
})
reader, err := pcapgo.NewReader(file)
if err != nil {
return fmt.Errorf("failed to update env vars in pod %s: %w. Stderr: %s", podName, err, stderrBuf.String())
log.Error().Err(err).Msgf("Failed to create pcapng reader for %v", file.Name())
return err
}
}
fmt.Printf("Updated env vars for pod %s\n", podName)
return nil
}
// Create the packet source
packetSource := gopacket.NewPacketSource(reader, layers.LinkTypeEthernet)
// readConfigFileFromPod reads the configuration file from the pod
func readConfigFileFromPod(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, podName, namespace, configFilePath string) (map[string]string, error) {
cmd := []string{"cat", configFilePath}
req := clientset.CoreV1().RESTClient().Post().
Resource("pods").
Name(podName).
Namespace(namespace).
SubResource("exec").
Param("container", "sniffer").
Param("stdout", "true").
Param("stderr", "true").
Param("command", cmd[0]).
Param("command", cmd[1])
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if err != nil {
return nil, fmt.Errorf("failed to initialize executor for pod %s: %w", podName, err)
}
var stdoutBuf, stderrBuf bytes.Buffer
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdout: &stdoutBuf,
Stderr: &stderrBuf,
})
if err != nil {
return nil, fmt.Errorf("failed to read config file from pod %s: %w. Stderr: %s", podName, err, stderrBuf.String())
}
// Parse the config content into a map of key-value pairs
configMap := parseConfigContent(stdoutBuf.String())
return configMap, nil
}
// writeConfigFileToPod writes the updated configuration map to the file in the pod
func writeConfigFileToPod(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, podName, namespace, configFilePath string, configMap map[string]string) error {
// Convert the config map back to a string format for writing
configContent := formatConfigMapToString(configMap)
// Escape any single quotes in the config content to avoid issues in the shell command
escapedConfigContent := strings.ReplaceAll(configContent, "'", "'\\''")
// Prepare the command to write the configuration to the file
cmd := []string{"sh", "-c", fmt.Sprintf("echo '%s' > %s", escapedConfigContent, configFilePath)}
req := clientset.CoreV1().RESTClient().Post().
Resource("pods").
Name(podName).
Namespace(namespace).
SubResource("exec").
Param("container", "sniffer").
Param("stdout", "true").
Param("stderr", "true").
Param("command", cmd[0]).
Param("command", cmd[1]).
Param("command", cmd[2])
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if err != nil {
return fmt.Errorf("failed to initialize executor for pod %s: %w", podName, err)
}
var stdoutBuf, stderrBuf bytes.Buffer
err = exec.StreamWithContext(ctx, remotecommand.StreamOptions{
Stdout: &stdoutBuf,
Stderr: &stderrBuf,
})
if err != nil {
return fmt.Errorf("failed to write config file to pod %s: %w. Stderr: %s", podName, err, stderrBuf.String())
for packet := range packetSource.Packets() {
err := writer.WritePacket(packet.Metadata().CaptureInfo, packet.Data())
if err != nil {
return err
}
}
}
return nil
}
// parseConfigContent parses the content of the config file into a map of key-value pairs
func parseConfigContent(content string) map[string]string {
configMap := make(map[string]string)
lines := strings.Split(content, "\n")
for _, line := range lines {
if strings.TrimSpace(line) == "" || !strings.Contains(line, "=") {
continue
}
parts := strings.SplitN(line, "=", 2)
configMap[strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
func setPcapConfigInKubernetes(clientset *clientk8s.Clientset, podName, namespace, enabledPcap, timeInterval, maxTime, maxSize string) error {
// Load the existing ConfigMap
configMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(context.TODO(), "kubeshark-config-map", metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to load ConfigMap: %w", err)
}
return configMap
// Update the values with user-provided input
if len(configMap.Data["PCAP_TIME_INTERVAL"]) > 0 {
configMap.Data["PCAP_TIME_INTERVAL"] = timeInterval
}
if len(configMap.Data["PCAP_MAX_SIZE"]) > 0 {
configMap.Data["PCAP_MAX_SIZE"] = maxSize
}
if len(configMap.Data["PCAP_MAX_TIME"]) > 0 {
configMap.Data["PCAP_MAX_TIME"] = maxTime
}
if len(configMap.Data["PCAP_DUMP_ENABLE"]) > 0 {
configMap.Data["PCAP_DUMP_ENABLE"] = enabledPcap
}
// Apply the updated ConfigMap back to the cluster
_, err = clientset.CoreV1().ConfigMaps(namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update ConfigMap: %w", err)
}
return nil
}
// formatConfigMapToString converts the config map back to string format
func formatConfigMapToString(configMap map[string]string) string {
var sb strings.Builder
for key, value := range configMap {
sb.WriteString(fmt.Sprintf("%s=%s\n", key, value))
}
return sb.String()
// WorkerSrcResponse represents the response structure from the worker's /pcaps/worker-src endpoint.
type WorkerSrcResponse struct {
WorkerSrcDir string `json:"workerSrcDir"`
}
// getWorkerSource fetches the worker source directory from the worker pod via the /pcaps/worker-src endpoint.
func getWorkerSource(clientset *kubernetes.Clientset, podName, namespace string) (string, error) {
// Get the worker pod IP or service address (you can also use the cluster DNS name)
pod, err := clientset.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
log.Error().Err(err).Msgf("Failed to get pod %s", podName)
return "", err
}
// Construct the URL to access the worker's /pcaps/worker-src endpoint
workerURL := fmt.Sprintf("http://%s:30001/pcaps/worker-src", pod.Status.PodIP)
// Make an HTTP request to the worker pod's endpoint
resp, err := http.Get(workerURL)
if err != nil {
log.Error().Err(err).Msgf("Failed to request worker src dir from %s", workerURL)
return "", err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("failed to get worker src dir, status code: %d", resp.StatusCode)
}
// Read the response body
body, err := io.ReadAll(resp.Body)
if err != nil {
return "", err
}
// Parse the JSON response
var workerSrcResp WorkerSrcResponse
err = json.Unmarshal(body, &workerSrcResp)
if err != nil {
return "", fmt.Errorf("failed to parse worker src dir response: %v", err)
}
return workerSrcResp.WorkerSrcDir, nil
}

View File

@ -2,7 +2,6 @@ package cmd
import (
"context"
"fmt"
"path/filepath"
"github.com/creasty/defaults"
@ -10,7 +9,6 @@ import (
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
)
@ -46,14 +44,13 @@ var pcapStartCmd = &cobra.Command{
maxTime, _ := cmd.Flags().GetString("max-time")
maxSize, _ := cmd.Flags().GetString("max-size")
// Iterate over each pod to start the PCAP capture by updating the config file
// Iterate over each pod to start the PCAP capture by updating the configuration in Kubernetes
for _, pod := range workerPods.Items {
err := writeStartConfigToFileInPod(context.Background(), clientset, config, pod.Name, namespace, timeInterval, maxTime, maxSize)
err := setPcapConfigInKubernetes(clientset, pod.Name, namespace, "true", timeInterval, maxTime, maxSize)
if err != nil {
log.Error().Err(err).Msgf("Error updating config file for pod %s", pod.Name)
log.Error().Err(err).Msgf("Error setting PCAP config for pod %s", pod.Name)
continue
}
fmt.Printf("PCAP capture started for pod %s\n", pod.Name)
}
return nil
@ -69,33 +66,7 @@ func init() {
}
// Use full flag name without shorthand
pcapStartCmd.Flags().String("time-interval", defaultTapConfig.Misc.TimeInterval, "Time interval for PCAP file rotation (e.g., 1m, 2h)")
pcapStartCmd.Flags().String("max-time", defaultTapConfig.Misc.MaxTime, "Maximum time for retaining old PCAP files (e.g., 24h)")
pcapStartCmd.Flags().String("max-size", defaultTapConfig.Misc.MaxSize, "Maximum size of PCAP files before deletion (e.g., 500MB, 10GB)")
}
// writeStartConfigToFileInPod writes config to start pcap in the worker pods
func writeStartConfigToFileInPod(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, podName, namespace, timeInterval, maxTime, maxSize string) error {
existingConfig, err := readConfigFileFromPod(ctx, clientset, config, podName, namespace, configPath)
if err != nil {
return fmt.Errorf("failed to read config file from pod %s: %w", podName, err)
}
existingConfig["PCAP_DUMP_ENABLE"] = "true"
if timeInterval != "" {
existingConfig["TIME_INTERVAL"] = timeInterval
}
if maxTime != "" {
existingConfig["MAX_TIME"] = maxTime
}
if maxSize != "" {
existingConfig["MAX_SIZE"] = maxSize
}
err = writeConfigFileToPod(ctx, clientset, config, podName, namespace, configPath, existingConfig)
if err != nil {
return fmt.Errorf("failed to write config file to pod %s: %w", podName, err)
}
return nil
pcapStartCmd.Flags().String("time-interval", defaultTapConfig.Misc.PcapTimeInterval, "Time interval for PCAP file rotation (e.g., 1m, 2h)")
pcapStartCmd.Flags().String("max-time", defaultTapConfig.Misc.PcapMaxTime, "Maximum time for retaining old PCAP files (e.g., 24h)")
pcapStartCmd.Flags().String("max-size", defaultTapConfig.Misc.PcapMaxSize, "Maximum size of PCAP files before deletion (e.g., 500MB, 10GB)")
}

View File

@ -2,13 +2,11 @@ package cmd
import (
"context"
"fmt"
"path/filepath"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/homedir"
)
@ -28,8 +26,6 @@ func init() {
}
func stopPcap(cmd *cobra.Command) error {
fmt.Println("Stopping PCAP capture.")
// Load Kubernetes configuration
kubeconfig := filepath.Join(homedir.HomeDir(), ".kube", "config")
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
@ -53,30 +49,11 @@ func stopPcap(cmd *cobra.Command) error {
// Iterate over the worker pods and set config to stop pcap
for _, pod := range workerPods.Items {
err := writeStopConfigToFileInPod(context.Background(), clientset, config, pod.Name, namespace)
err := setPcapConfigInKubernetes(clientset, pod.Name, namespace, "false", "", "", "")
if err != nil {
log.Error().Err(err).Msgf("Error updating config file for pod %s", pod.Name)
log.Error().Err(err).Msgf("Error setting PCAP config for pod %s", pod.Name)
continue
}
fmt.Printf("PCAP capture stopped for pod %s\n", pod.Name)
}
fmt.Println("PCAP capture stopped successfully.")
return nil
}
// writeStopConfigToFileInPod reads the existing config, updates the PCAP_DUMP_ENABLE value, and writes it back to the file
func writeStopConfigToFileInPod(ctx context.Context, clientset *kubernetes.Clientset, config *rest.Config, podName, namespace string) error {
existingConfig, err := readConfigFileFromPod(ctx, clientset, config, podName, namespace, configPath)
if err != nil {
return fmt.Errorf("failed to read config file from pod %s: %w", podName, err)
}
existingConfig["PCAP_DUMP_ENABLE"] = "false"
err = writeConfigFileToPod(ctx, clientset, config, podName, namespace, configPath, existingConfig)
if err != nil {
return fmt.Errorf("failed to write config file to pod %s: %w", podName, err)
}
return nil

View File

@ -171,9 +171,9 @@ type MiscConfig struct {
DuplicateTimeframe string `yaml:"duplicateTimeframe" json:"duplicateTimeframe" default:"200ms"`
DetectDuplicates bool `yaml:"detectDuplicates" json:"detectDuplicates" default:"false"`
PcapDumpEnable bool `yaml:"pcapDumpEnable" json:"pcapDumpEnable" default:"false"`
TimeInterval string `yaml:"timeInterval" json:"timeInterval" default:"1m"`
MaxTime string `yaml:"maxTime" json:"maxTime" default:"24h"`
MaxSize string `yaml:"maxSize" json:"maxSize" default:"500MB"`
PcapTimeInterval string `yaml:"timeInterval" json:"timeInterval" default:"1m"`
PcapMaxTime string `yaml:"maxTime" json:"maxTime" default:"24h"`
PcapMaxSize string `yaml:"maxSize" json:"maxSize" default:"500MB"`
PcapDest string `yaml:"pcapDest" json:"pcapDest" default:"."`
}

2
go.mod
View File

@ -82,6 +82,8 @@ require (
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.16.0 // indirect
github.com/kubeshark/gopacket v1.1.39 // indirect
github.com/kubeshark/tracerproto v1.0.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect
github.com/lib/pq v1.10.7 // indirect

9
go.sum
View File

@ -396,6 +396,10 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kubeshark/gopacket v1.1.39 h1:NNiMTPO8v2+5FVlJTulT0Z+O0TLEAzavJBto10AY7js=
github.com/kubeshark/gopacket v1.1.39/go.mod h1:Qo8/i/tdT74CCT7/pjO0L55Pktv5dQfj7M/Arv8MKm8=
github.com/kubeshark/tracerproto v1.0.0 h1:/euPX9KMrKDS92hSMrLuhncYAX22dYlsnM2aD4AYhhE=
github.com/kubeshark/tracerproto v1.0.0/go.mod h1:+efDYkwXxwakmHRpxHVEekyXNtg/aFx0uSo/I0lGV9k=
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=
@ -615,6 +619,9 @@ github.com/tanqiangyes/grep-go v0.0.0-20220515134556-b36bff9c3d8e h1:+qDZ81UqxfZ
github.com/tanqiangyes/grep-go v0.0.0-20220515134556-b36bff9c3d8e/go.mod h1:ANZlXE3vfRYCYnkojePl2hJODYmOeCVD+XahuhDdTbI=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@ -804,6 +811,7 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -817,6 +825,7 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@ -99,13 +99,13 @@ spec:
- name: PROFILING_ENABLED
value: '{{ .Values.tap.misc.profile }}'
- name: PCAP_DUMP_ENABLE
value: '{{ .Values.tap.worker.env.pcapDumpEnable }}'
- name: TIME_INTERVAL
value: '{{ .Values.tap.worker.env.timeInterval }}'
- name: MAX_TIME
value: '{{ .Values.tap.worker.env.maxTime }}'
- name: MAX_SIZE
value: '{{ .Values.tap.worker.env.maxSize }}'
value: '{{ .Values.tap.misc.pcapDumpEnable }}'
- name: PCAP_TIME_INTERVAL
value: '{{ .Values.tap.misc.pcapTimeInterval }}'
- name: PCAP_MAX_TIME
value: '{{ .Values.tap.misc.pcapMaxTime }}'
- name: PCAP_MAX_SIZE
value: '{{ .Values.tap.misc.pcapMaxSize }}'
resources:
limits:
cpu: {{ .Values.tap.resources.sniffer.limits.cpu }}

View File

@ -49,4 +49,7 @@ data:
ENABLED_DISSECTORS: '{{ gt (len .Values.tap.enabledDissectors) 0 | ternary (join "," .Values.tap.enabledDissectors) "" }}'
DISSECTORS_UPDATING_ENABLED: '{{ .Values.dissectorsUpdatingEnabled | ternary "true" "false" }}'
DETECT_DUPLICATES: '{{ .Values.tap.misc.detectDuplicates | ternary "true" "false" }}'
PCAP_DUMP_ENABLE: '{{ .Values.tap.misc.pcapDumpEnable }}'
PCAP_TIME_INTERVAL: '{{ .Values.tap.misc.pcapTimeInterval }}'
PCAP_MAX_TIME: '{{ .Values.tap.misc.pcapMaxTime }}'
PCAP_MAX_SIZE: '{{ .Values.tap.misc.pcapMaxSize }}'

View File

@ -140,12 +140,10 @@ tap:
profile: false
duplicateTimeframe: 200ms
detectDuplicates: false
worker:
env:
pcapDumpEnable: true
timeInterval: 1m
maxTime: 2h
maxSize: 10GB
pcapDumpEnable: true
pcapTimeInterval: 1m
pcapMaxTime: 2h
pcapMaxSize: 1MB
logs:
file: ""
grep: ""

View File

@ -23,6 +23,10 @@ const (
CONFIG_AUTH_ENABLED = "AUTH_ENABLED"
CONFIG_AUTH_TYPE = "AUTH_TYPE"
CONFIG_AUTH_SAML_IDP_METADATA_URL = "AUTH_SAML_IDP_METADATA_URL"
CONFIG_PCAP_DUMP_ENABLE = "PCAP_DUMP_ENABLE"
CONFIG_TIME_INTERVAL = "TIME_INTERVAL"
CONFIG_MAX_TIME = "MAX_TIME"
CONFIG_MAX_SIZE = "MAX_SIZE"
)
func SetSecret(provider *Provider, key string, value string) (updated bool, err error) {

View File

@ -487,7 +487,7 @@ spec:
- -disable-ebpf
- -resolution-strategy
- 'auto'
image: 'docker.io/kubeshark/worker:v52.3.79'
image: '192.168.49.2:5000/kubeshark/worker:dev'
imagePullPolicy: Always
name: sniffer
ports:
@ -556,7 +556,7 @@ spec:
- -procfs
- /hostproc
- -disable-ebpf
image: 'docker.io/kubeshark/worker:v52.3.79'
image: '192.168.49.2:5000/kubeshark/worker:dev'
imagePullPolicy: Always
name: tracer
env: