From 3fcc51c5c31a9a78762a76f5914596f2c8ac3320 Mon Sep 17 00:00:00 2001 From: Alon Girmonsky <1990761+alongir@users.noreply.github.com> Date: Tue, 8 Oct 2024 10:04:46 -0700 Subject: [PATCH 01/10] Ensure `scripting` command watched only JS files --- cmd/scripts.go | 5 +++++ config/configStructs/scriptingConfig.go | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/cmd/scripts.go b/cmd/scripts.go index 73ecadc4c..406b3a015 100644 --- a/cmd/scripts.go +++ b/cmd/scripts.go @@ -3,6 +3,7 @@ package cmd import ( "context" "encoding/json" + "strings" "github.com/creasty/defaults" "github.com/fsnotify/fsnotify" @@ -168,6 +169,10 @@ func watchScripts(provider *kubernetes.Provider, block bool) { select { // watch for events case event := <-watcher.Events: + if !strings.HasSuffix(event.Name, "js") { + log.Info().Str("file", event.Name).Msg("Ignoring file") + continue + } switch event.Op { case fsnotify.Create: script, err := misc.ReadScriptFile(event.Name) diff --git a/config/configStructs/scriptingConfig.go b/config/configStructs/scriptingConfig.go index 6cf58c5a2..fbeeed763 100644 --- a/config/configStructs/scriptingConfig.go +++ b/config/configStructs/scriptingConfig.go @@ -4,6 +4,7 @@ import ( "io/fs" "os" "path/filepath" + "strings" "github.com/kubeshark/kubeshark/misc" "github.com/rs/zerolog/log" @@ -33,6 +34,10 @@ func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error) var script *misc.Script path := filepath.Join(config.Source, f.Name()) + if !strings.HasSuffix(path, ".js") { + log.Info().Str("path", path).Msg("Skipping non-JS file") + continue + } script, err = misc.ReadScriptFile(path) if err != nil { return From b86f80ebd777fded011d122b43ee0576e7b7e64c Mon Sep 17 00:00:00 2001 From: Alon Girmonsky <1990761+alongir@users.noreply.github.com> Date: Tue, 8 Oct 2024 10:07:11 -0700 Subject: [PATCH 02/10] Tag Tracer as well as the other components upon a new release --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 4cfdb3b53..73d7b8f48 100644 --- a/Makefile +++ b/Makefile @@ -178,6 +178,7 @@ port-forward: release: @cd ../worker && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags + @cd ../tracer && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags @cd ../hub && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags @cd ../front && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags @cd ../kubeshark && git checkout master && git pull && sed -i 's/^version:.*/version: "$(VERSION)"/' helm-chart/Chart.yaml && make && make generate-helm-values && make generate-manifests From 1bd8f9b8c55a8299985b49e01c98a7846d52f7cd Mon Sep 17 00:00:00 2001 From: Alon Girmonsky <1990761+alongir@users.noreply.github.com> Date: Tue, 8 Oct 2024 10:43:42 -0700 Subject: [PATCH 03/10] Set reasonable `pcapdump` defaults. Storage is now at 10% of the Worker's allocated storage. --- config/configStructs/tapConfig.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config/configStructs/tapConfig.go b/config/configStructs/tapConfig.go index b67ea3188..093a5d257 100644 --- a/config/configStructs/tapConfig.go +++ b/config/configStructs/tapConfig.go @@ -198,9 +198,9 @@ type MiscConfig struct { type PcapDumpConfig struct { PcapDumpEnabled bool `yaml:"enabled" json:"enabled" default:"true"` - PcapTimeInterval string `yaml:"timeInterval" json:"timeInterval" default:"10m"` + PcapTimeInterval string `yaml:"timeInterval" json:"timeInterval" default:"1m"` PcapMaxTime string `yaml:"maxTime" json:"maxTime" default:"1h"` - PcapMaxSize string `yaml:"maxSize" json:"maxSize" default:"10MB"` + PcapMaxSize string `yaml:"maxSize" json:"maxSize" default:"500MB"` PcapSrcDir string `yaml:"pcapSrcDir" json:"pcapSrcDir" default:"pcapdump"` } From 49755671f5c1e452a4a52fa1c7bc9c93cc895bbc Mon Sep 17 00:00:00 2001 From: Alon Girmonsky <1990761+alongir@users.noreply.github.com> Date: Tue, 8 Oct 2024 18:37:29 -0700 Subject: [PATCH 04/10] Added some error and info log lines --- cmd/pcapDumpRunner.go | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/cmd/pcapDumpRunner.go b/cmd/pcapDumpRunner.go index 619b8c916..684f8a14e 100644 --- a/cmd/pcapDumpRunner.go +++ b/cmd/pcapDumpRunner.go @@ -67,12 +67,14 @@ func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, conf // Check if the source directory exists in the ConfigMap srcDir, ok := configMap.Data[configMapKey] if !ok || srcDir == "" { + log.Error().Msgf("source directory not found in ConfigMap %s in namespace %s", configMapName, namespace) continue } // Attempt to get the pod in the current namespace pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{}) if err != nil { + log.Error().Err(err).Msgf("failed to get pod %s in namespace %s", podName, namespace) continue } @@ -93,6 +95,7 @@ func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, conf exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL()) if err != nil { + log.Error().Err(err).Msgf("failed to initialize executor for pod %s in namespace %s", podName, namespace) continue } @@ -105,6 +108,7 @@ func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, conf Stderr: &stderrBuf, }) if err != nil { + log.Error().Err(err).Msgf("error listing files in pod %s in namespace %s: %s", podName, namespace, stderrBuf.String()) continue } @@ -195,17 +199,19 @@ func mergePCAPs(outputFile string, inputFiles []string) error { } for _, inputFile := range inputFiles { + log.Info().Msgf("Merging %s int %s", inputFile, outputFile) // Open each input file file, err := os.Open(inputFile) if err != nil { - return err + log.Error().Err(err).Msgf("Failed to open %v", inputFile) + continue } defer file.Close() reader, err := pcapgo.NewReader(file) if err != nil { log.Error().Err(err).Msgf("Failed to create pcapng reader for %v", file.Name()) - return err + continue } // Create the packet source @@ -214,7 +220,8 @@ func mergePCAPs(outputFile string, inputFiles []string) error { for packet := range packetSource.Packets() { err := writer.WritePacket(packet.Metadata().CaptureInfo, packet.Data()) if err != nil { - return err + log.Error().Err(err).Msgf("Failed to write packet to %v", outputFile) + continue } } } @@ -228,6 +235,7 @@ func setPcapConfigInKubernetes(ctx context.Context, clientset *clientk8s.Clients // Load the existing ConfigMap in the current namespace configMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, "kubeshark-config-map", metav1.GetOptions{}) if err != nil { + log.Error().Err(err).Msgf("failed to get ConfigMap in namespace %s", namespace) continue } @@ -240,6 +248,7 @@ func setPcapConfigInKubernetes(ctx context.Context, clientset *clientk8s.Clients // Apply the updated ConfigMap back to the cluster in the current namespace _, err = clientset.CoreV1().ConfigMaps(namespace).Update(ctx, configMap, metav1.UpdateOptions{}) if err != nil { + log.Error().Err(err).Msgf("failed to update ConfigMap in namespace %s", namespace) continue } } @@ -311,6 +320,8 @@ func copyPcapFiles(clientset *kubernetes.Clientset, config *rest.Config, destDir err = copyFileFromPod(context.Background(), clientset, config, pod.Name, nsFiles.Namespace, nsFiles.SrcDir, file, destFile) if err != nil { log.Error().Err(err).Msgf("Error copying file from pod %s in namespace %s", pod.Name, nsFiles.Namespace) + } else { + log.Info().Msgf("Copied %s from %s to %s", file, pod.Name, destFile) } currentFiles = append(currentFiles, destFile) From 1926067bd928c2acfc875542d6ce4e418e7e95d8 Mon Sep 17 00:00:00 2001 From: Alon Girmonsky <1990761+alongir@users.noreply.github.com> Date: Wed, 9 Oct 2024 21:46:06 -0700 Subject: [PATCH 05/10] :bookmark: Bump the Helm chart version to 72.3.83 --- Makefile | 2 +- helm-chart/Chart.yaml | 2 +- helm-chart/values.yaml | 5 +- manifests/complete.yaml | 113 +++++++++++++++++++++------------------- 4 files changed, 65 insertions(+), 57 deletions(-) diff --git a/Makefile b/Makefile index 73d7b8f48..c24b4ae0f 100644 --- a/Makefile +++ b/Makefile @@ -183,7 +183,7 @@ release: @cd ../front && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags @cd ../kubeshark && git checkout master && git pull && sed -i 's/^version:.*/version: "$(VERSION)"/' helm-chart/Chart.yaml && make && make generate-helm-values && make generate-manifests @git add -A . && git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)" && git push - @git tag v$(VERSION) && git push origin --tags + @git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags @cd helm-chart && cp -r . ../../kubeshark.github.io/charts/chart @cd ../../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push @cd ../kubeshark diff --git a/helm-chart/Chart.yaml b/helm-chart/Chart.yaml index 90dc1b0a7..d1e433b69 100644 --- a/helm-chart/Chart.yaml +++ b/helm-chart/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: kubeshark -version: "52.3.82" +version: "72.3.83" description: The API Traffic Analyzer for Kubernetes home: https://kubeshark.co keywords: diff --git a/helm-chart/values.yaml b/helm-chart/values.yaml index c4629c05a..31496e748 100644 --- a/helm-chart/values.yaml +++ b/helm-chart/values.yaml @@ -1,3 +1,4 @@ +# find a detailed description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md tap: docker: registry: docker.io/kubeshark @@ -159,9 +160,9 @@ logs: grep: "" pcapdump: enabled: true - timeInterval: 10m + timeInterval: 1m maxTime: 1h - maxSize: 50MB + maxSize: 500MB pcapSrcDir: pcapdump kube: configPath: "" diff --git a/manifests/complete.yaml b/manifests/complete.yaml index 4cf7ee175..8311ba10f 100644 --- a/manifests/complete.yaml +++ b/manifests/complete.yaml @@ -4,10 +4,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: labels: - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-hub-network-policy @@ -31,10 +31,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: labels: - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-front-network-policy @@ -58,10 +58,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: labels: - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-worker-network-policy @@ -87,10 +87,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-service-account @@ -104,10 +104,10 @@ metadata: namespace: default labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm stringData: LICENSE: '' @@ -121,10 +121,10 @@ metadata: namespace: default labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm stringData: AUTH_SAML_X509_CRT: | @@ -137,10 +137,10 @@ metadata: namespace: default labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm stringData: AUTH_SAML_X509_KEY: | @@ -152,10 +152,10 @@ metadata: name: kubeshark-nginx-config-map namespace: default labels: - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm data: default.conf: | @@ -216,10 +216,10 @@ metadata: namespace: default labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm data: POD_REGEX: '.*' @@ -239,9 +239,11 @@ data: TELEMETRY_DISABLED: 'false' SCRIPTING_DISABLED: '' TARGETED_PODS_UPDATE_DISABLED: '' + PRESET_FILTERS_CHANGING_ENABLED: '' RECORDING_DISABLED: '' STOP_TRAFFIC_CAPTURING_DISABLED: 'false' GLOBAL_FILTER: "" + DEFAULT_FILTER: "!dns and !tcp and !udp and !icmp" TRAFFIC_SAMPLE_RATE: '100' JSON_TTL: '5m' PCAP_TTL: '10s' @@ -249,19 +251,24 @@ data: TIMEZONE: ' ' CLOUD_LICENSE_ENABLED: 'true' DUPLICATE_TIMEFRAME: '200ms' - ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,sctp,syscall,tcp,udp,ws' + ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,sctp,syscall,tcp,udp,ws,tls' DISSECTORS_UPDATING_ENABLED: 'true' DETECT_DUPLICATES: 'false' + PCAP_DUMP_ENABLE: 'true' + PCAP_TIME_INTERVAL: '1m' + PCAP_MAX_TIME: '1h' + PCAP_MAX_SIZE: '500MB' + PCAP_SRC_DIR: 'pcapdump' --- # Source: kubeshark/templates/02-cluster-role.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-cluster-role-default @@ -295,10 +302,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-cluster-role-binding-default @@ -317,10 +324,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: labels: - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-self-config-role @@ -347,10 +354,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-self-config-role-binding @@ -370,10 +377,10 @@ kind: Service metadata: labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-hub @@ -392,10 +399,10 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-front @@ -414,10 +421,10 @@ kind: Service apiVersion: v1 metadata: labels: - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm annotations: prometheus.io/scrape: 'true' @@ -427,10 +434,10 @@ metadata: spec: selector: app.kubeshark.co/app: worker - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm ports: - name: metrics @@ -445,10 +452,10 @@ metadata: labels: app.kubeshark.co/app: worker sidecar.istio.io/inject: "false" - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-worker-daemon-set @@ -463,10 +470,10 @@ spec: metadata: labels: app.kubeshark.co/app: worker - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm name: kubeshark-worker-daemon-set namespace: kubeshark @@ -491,7 +498,7 @@ spec: - 'auto' - -staletimeout - '30' - image: 'docker.io/kubeshark/worker:v52.3.82' + image: 'docker.io/kubeshark/worker:v72.3.83' imagePullPolicy: Always name: sniffer ports: @@ -564,7 +571,7 @@ spec: - -procfs - /hostproc - -disable-ebpf - image: 'docker.io/kubeshark/worker:v52.3.82' + image: 'docker.io/kubeshark/worker:v72.3.83' imagePullPolicy: Always name: tracer env: @@ -660,10 +667,10 @@ kind: Deployment metadata: labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-hub @@ -679,10 +686,10 @@ spec: metadata: labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm spec: dnsPolicy: ClusterFirstWithHostNet @@ -710,7 +717,7 @@ spec: value: 'https://api.kubeshark.co' - name: PROFILING_ENABLED value: 'false' - image: 'docker.io/kubeshark/hub:v52.3.82' + image: 'docker.io/kubeshark/hub:v72.3.83' imagePullPolicy: Always readinessProbe: periodSeconds: 1 @@ -758,10 +765,10 @@ kind: Deployment metadata: labels: app.kubeshark.co/app: front - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-front @@ -777,16 +784,14 @@ spec: metadata: labels: app.kubeshark.co/app: front - helm.sh/chart: kubeshark-52.3.82 + helm.sh/chart: kubeshark-72.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "52.3.82" + app.kubernetes.io/version: "72.3.83" app.kubernetes.io/managed-by: Helm spec: containers: - env: - - name: REACT_APP_DEFAULT_FILTER - value: '!dns and !tcp and !udp and !icmp' - name: REACT_APP_AUTH_ENABLED value: 'true' - name: REACT_APP_AUTH_TYPE @@ -799,6 +804,8 @@ spec: value: 'false' - name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED value: 'false' + - name: REACT_APP_PRESET_FILTERS_CHANGING_ENABLED + value: 'false' - name: REACT_APP_BPF_OVERRIDE_DISABLED value: 'false' - name: REACT_APP_RECORDING_DISABLED @@ -815,7 +822,7 @@ spec: value: 'false' - name: REACT_APP_SENTRY_ENVIRONMENT value: 'production' - image: 'docker.io/kubeshark/front:v52.3.82' + image: 'docker.io/kubeshark/front:v72.3.83' imagePullPolicy: Always name: kubeshark-front livenessProbe: From b7b0e3dcee1c298b6b930d5736384a6940266748 Mon Sep 17 00:00:00 2001 From: Alon Girmonsky <1990761+alongir@users.noreply.github.com> Date: Wed, 9 Oct 2024 21:57:05 -0700 Subject: [PATCH 06/10] :bookmark: Bump the Helm chart version to 52.3.83 --- helm-chart/Chart.yaml | 2 +- manifests/complete.yaml | 100 ++++++++++++++++++++-------------------- 2 files changed, 51 insertions(+), 51 deletions(-) diff --git a/helm-chart/Chart.yaml b/helm-chart/Chart.yaml index d1e433b69..a5921e338 100644 --- a/helm-chart/Chart.yaml +++ b/helm-chart/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: kubeshark -version: "72.3.83" +version: "52.3.83" description: The API Traffic Analyzer for Kubernetes home: https://kubeshark.co keywords: diff --git a/manifests/complete.yaml b/manifests/complete.yaml index 8311ba10f..6f2fb6040 100644 --- a/manifests/complete.yaml +++ b/manifests/complete.yaml @@ -4,10 +4,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: labels: - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-hub-network-policy @@ -31,10 +31,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: labels: - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-front-network-policy @@ -58,10 +58,10 @@ apiVersion: networking.k8s.io/v1 kind: NetworkPolicy metadata: labels: - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-worker-network-policy @@ -87,10 +87,10 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-service-account @@ -104,10 +104,10 @@ metadata: namespace: default labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm stringData: LICENSE: '' @@ -121,10 +121,10 @@ metadata: namespace: default labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm stringData: AUTH_SAML_X509_CRT: | @@ -137,10 +137,10 @@ metadata: namespace: default labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm stringData: AUTH_SAML_X509_KEY: | @@ -152,10 +152,10 @@ metadata: name: kubeshark-nginx-config-map namespace: default labels: - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm data: default.conf: | @@ -216,10 +216,10 @@ metadata: namespace: default labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm data: POD_REGEX: '.*' @@ -265,10 +265,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-cluster-role-default @@ -302,10 +302,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-cluster-role-binding-default @@ -324,10 +324,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: labels: - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-self-config-role @@ -354,10 +354,10 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: labels: - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-self-config-role-binding @@ -377,10 +377,10 @@ kind: Service metadata: labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-hub @@ -399,10 +399,10 @@ apiVersion: v1 kind: Service metadata: labels: - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-front @@ -421,10 +421,10 @@ kind: Service apiVersion: v1 metadata: labels: - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm annotations: prometheus.io/scrape: 'true' @@ -434,10 +434,10 @@ metadata: spec: selector: app.kubeshark.co/app: worker - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm ports: - name: metrics @@ -452,10 +452,10 @@ metadata: labels: app.kubeshark.co/app: worker sidecar.istio.io/inject: "false" - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-worker-daemon-set @@ -470,10 +470,10 @@ spec: metadata: labels: app.kubeshark.co/app: worker - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm name: kubeshark-worker-daemon-set namespace: kubeshark @@ -498,7 +498,7 @@ spec: - 'auto' - -staletimeout - '30' - image: 'docker.io/kubeshark/worker:v72.3.83' + image: 'docker.io/kubeshark/worker:v52.3.83' imagePullPolicy: Always name: sniffer ports: @@ -571,7 +571,7 @@ spec: - -procfs - /hostproc - -disable-ebpf - image: 'docker.io/kubeshark/worker:v72.3.83' + image: 'docker.io/kubeshark/worker:v52.3.83' imagePullPolicy: Always name: tracer env: @@ -667,10 +667,10 @@ kind: Deployment metadata: labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-hub @@ -686,10 +686,10 @@ spec: metadata: labels: app.kubeshark.co/app: hub - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm spec: dnsPolicy: ClusterFirstWithHostNet @@ -717,7 +717,7 @@ spec: value: 'https://api.kubeshark.co' - name: PROFILING_ENABLED value: 'false' - image: 'docker.io/kubeshark/hub:v72.3.83' + image: 'docker.io/kubeshark/hub:v52.3.83' imagePullPolicy: Always readinessProbe: periodSeconds: 1 @@ -765,10 +765,10 @@ kind: Deployment metadata: labels: app.kubeshark.co/app: front - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm annotations: name: kubeshark-front @@ -784,10 +784,10 @@ spec: metadata: labels: app.kubeshark.co/app: front - helm.sh/chart: kubeshark-72.3.83 + helm.sh/chart: kubeshark-52.3.83 app.kubernetes.io/name: kubeshark app.kubernetes.io/instance: kubeshark - app.kubernetes.io/version: "72.3.83" + app.kubernetes.io/version: "52.3.83" app.kubernetes.io/managed-by: Helm spec: containers: @@ -822,7 +822,7 @@ spec: value: 'false' - name: REACT_APP_SENTRY_ENVIRONMENT value: 'production' - image: 'docker.io/kubeshark/front:v72.3.83' + image: 'docker.io/kubeshark/front:v52.3.83' imagePullPolicy: Always name: kubeshark-front livenessProbe: From ecc577ccc8abb91ca387dd941d305eb981edf9f7 Mon Sep 17 00:00:00 2001 From: Alon Girmonsky <1990761+alongir@users.noreply.github.com> Date: Thu, 10 Oct 2024 22:15:00 -0700 Subject: [PATCH 07/10] Improved the `console` command made it resilient to Websocket breaks and redeployment. --- cmd/console.go | 109 +++++++++++++++++++++++++------------------------ 1 file changed, 56 insertions(+), 53 deletions(-) diff --git a/cmd/console.go b/cmd/console.go index 16f4bb427..13d56e5d7 100644 --- a/cmd/console.go +++ b/cmd/console.go @@ -42,68 +42,71 @@ func init() { } func runConsole() { - hubUrl := kubernetes.GetHubUrl() - response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl)) - if err != nil || response.StatusCode != 200 { - log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy...")) - runProxy(false, true) - } - - interrupt := make(chan os.Signal, 1) - signal.Notify(interrupt, os.Interrupt) - - log.Info().Str("host", config.Config.Tap.Proxy.Host).Str("url", hubUrl).Msg("Connecting to:") - u := url.URL{ - Scheme: "ws", - Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Front.Port), - Path: "/api/scripts/logs", - } - headers := http.Header{} - headers.Set(utils.X_KUBESHARK_CAPTURE_HEADER_KEY, utils.X_KUBESHARK_CAPTURE_HEADER_IGNORE_VALUE) - headers.Set("License-Key", config.Config.License) - - c, _, err := websocket.DefaultDialer.Dial(u.String(), headers) - if err != nil { - log.Error().Err(err).Send() - return - } - defer c.Close() - - done := make(chan struct{}) - - go func() { - defer close(done) - for { - _, message, err := c.ReadMessage() - if err != nil { - log.Error().Err(err).Send() - return - } - - msg := string(message) - if strings.Contains(msg, ":ERROR]") { - msg = fmt.Sprintf(utils.Red, msg) - fmt.Fprintln(os.Stderr, msg) - } else { - fmt.Fprintln(os.Stdout, msg) - } - } - }() - - ticker := time.NewTicker(time.Second) - defer ticker.Stop() - for { + hubUrl := kubernetes.GetHubUrl() + response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl)) + if err != nil || response.StatusCode != 200 { + log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy...")) + runProxy(false, true) + } + + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt) + + log.Info().Str("host", config.Config.Tap.Proxy.Host).Str("url", hubUrl).Msg("Connecting to:") + u := url.URL{ + Scheme: "ws", + Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Front.Port), + Path: "/api/scripts/logs", + } + headers := http.Header{} + headers.Set(utils.X_KUBESHARK_CAPTURE_HEADER_KEY, utils.X_KUBESHARK_CAPTURE_HEADER_IGNORE_VALUE) + headers.Set("License-Key", config.Config.License) + + c, _, err := websocket.DefaultDialer.Dial(u.String(), headers) + if err != nil { + log.Error().Err(err).Msg("Websocket dial error, retrying in 5 seconds...") + time.Sleep(5 * time.Second) // Delay before retrying + continue + } + defer c.Close() + + done := make(chan struct{}) + + go func() { + defer close(done) + for { + _, message, err := c.ReadMessage() + if err != nil { + log.Error().Err(err).Msg("Error reading websocket message, reconnecting...") + break // Break to reconnect + } + + msg := string(message) + if strings.Contains(msg, ":ERROR]") { + msg = fmt.Sprintf(utils.Red, msg) + fmt.Fprintln(os.Stderr, msg) + } else { + fmt.Fprintln(os.Stdout, msg) + } + } + }() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + select { case <-done: - return + log.Warn().Msg(fmt.Sprintf(utils.Yellow, "Connection closed, reconnecting...")) + time.Sleep(5 * time.Second) // Delay before reconnecting + continue // Reconnect after error case <-interrupt: log.Warn().Msg(fmt.Sprintf(utils.Yellow, "Received interrupt, exiting...")) err := c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) if err != nil { log.Error().Err(err).Send() - return + continue } select { From 41f36ba9c29b8764469a85264448e545de3930eb Mon Sep 17 00:00:00 2001 From: Alon Girmonsky <1990761+alongir@users.noreply.github.com> Date: Fri, 11 Oct 2024 13:06:02 -0700 Subject: [PATCH 08/10] Added the scripting `console` command functionality to the `tap` command Added both the `scripting` and `console` commands to the `proxy` command Added a `scripting.console`, a boolean value indicating whether the `console` functionality should be part of the `tap` and `proxy` commands --- cmd/console.go | 39 +++++++++++++++++++++---- cmd/proxyRunner.go | 7 +++++ cmd/tapRunner.go | 4 +++ config/configStructs/scriptingConfig.go | 1 + 4 files changed, 46 insertions(+), 5 deletions(-) diff --git a/cmd/console.go b/cmd/console.go index 13d56e5d7..b27ea0814 100644 --- a/cmd/console.go +++ b/cmd/console.go @@ -41,13 +41,15 @@ func init() { consoleCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark") } -func runConsole() { +func runConsoleWithoutProxy() { + time.Sleep(5 * time.Second) for { hubUrl := kubernetes.GetHubUrl() response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl)) if err != nil || response.StatusCode != 200 { - log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy...")) - runProxy(false, true) + log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub.")) + time.Sleep(5 * time.Second) + continue } interrupt := make(chan os.Signal, 1) @@ -101,8 +103,6 @@ func runConsole() { time.Sleep(5 * time.Second) // Delay before reconnecting continue // Reconnect after error case <-interrupt: - log.Warn().Msg(fmt.Sprintf(utils.Yellow, "Received interrupt, exiting...")) - err := c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) if err != nil { log.Error().Err(err).Send() @@ -117,3 +117,32 @@ func runConsole() { } } } + +func runConsole() { + go runConsoleWithoutProxy() + for { + hubUrl := kubernetes.GetHubUrl() + response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl)) + if err != nil || response.StatusCode != 200 { + log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy...")) + runProxy(false, true) + } + + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt) + done := make(chan struct{}) + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + select { // Reconnect after error + case <-interrupt: + log.Warn().Msg(fmt.Sprintf(utils.Yellow, "Received interrupt, exiting...")) + select { + case <-done: + case <-time.After(time.Second): + } + return + } + } +} diff --git a/cmd/proxyRunner.go b/cmd/proxyRunner.go index 8e685c69b..0b7804b92 100644 --- a/cmd/proxyRunner.go +++ b/cmd/proxyRunner.go @@ -92,10 +92,17 @@ func runProxy(block bool, noBrowser bool) { establishedProxy = true okToOpen("Kubeshark", frontUrl, noBrowser) } + if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts { + watchScripts(kubernetesProvider, false) + } + if config.Config.Scripting.Console { + go runConsoleWithoutProxy() + } if establishedProxy && block { utils.WaitForTermination(ctx, cancel) } + } func okToOpen(name string, url string, noBrowser bool) { diff --git a/cmd/tapRunner.go b/cmd/tapRunner.go index 7c6dd5313..7f2c0f684 100644 --- a/cmd/tapRunner.go +++ b/cmd/tapRunner.go @@ -427,6 +427,10 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts { watchScripts(kubernetesProvider, false) } + + if config.Config.Scripting.Console { + go runConsoleWithoutProxy() + } } func updateConfig(kubernetesProvider *kubernetes.Provider) { diff --git a/config/configStructs/scriptingConfig.go b/config/configStructs/scriptingConfig.go index fbeeed763..9dd6265d8 100644 --- a/config/configStructs/scriptingConfig.go +++ b/config/configStructs/scriptingConfig.go @@ -14,6 +14,7 @@ type ScriptingConfig struct { Env map[string]interface{} `yaml:"env" json:"env" default:"{}"` Source string `yaml:"source" json:"source" default:""` WatchScripts bool `yaml:"watchScripts" json:"watchScripts" default:"true"` + Console bool `yaml:"console" json:"console" default:"true"` } func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error) { From a2e0e013e5c30e2b80686027e17c8401f0cd596f Mon Sep 17 00:00:00 2001 From: Alon Girmonsky <1990761+alongir@users.noreply.github.com> Date: Mon, 14 Oct 2024 08:28:44 -0700 Subject: [PATCH 09/10] Added log lines for verbosity --- cmd/console.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/console.go b/cmd/console.go index b27ea0814..4a8052f38 100644 --- a/cmd/console.go +++ b/cmd/console.go @@ -42,6 +42,7 @@ func init() { } func runConsoleWithoutProxy() { + log.Info().Msg("Starting scripting console ...") time.Sleep(5 * time.Second) for { hubUrl := kubernetes.GetHubUrl() From 99aff8d5131b5fbb9eb5acd2206b470f036e845a Mon Sep 17 00:00:00 2001 From: Alon Girmonsky <1990761+alongir@users.noreply.github.com> Date: Mon, 14 Oct 2024 14:00:34 -0700 Subject: [PATCH 10/10] fix lint issue --- cmd/console.go | 39 +++++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/cmd/console.go b/cmd/console.go index 4a8052f38..6bdee9a20 100644 --- a/cmd/console.go +++ b/cmd/console.go @@ -44,8 +44,10 @@ func init() { func runConsoleWithoutProxy() { log.Info().Msg("Starting scripting console ...") time.Sleep(5 * time.Second) + hubUrl := kubernetes.GetHubUrl() for { - hubUrl := kubernetes.GetHubUrl() + + // Attempt to connect to the Hub every second response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl)) if err != nil || response.StatusCode != 200 { log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub.")) @@ -121,29 +123,34 @@ func runConsoleWithoutProxy() { func runConsole() { go runConsoleWithoutProxy() + + // Create interrupt channel and setup signal handling once + interrupt := make(chan os.Signal, 1) + signal.Notify(interrupt, os.Interrupt) + done := make(chan struct{}) + + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for { - hubUrl := kubernetes.GetHubUrl() - response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl)) - if err != nil || response.StatusCode != 200 { - log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy...")) - runProxy(false, true) - } - - interrupt := make(chan os.Signal, 1) - signal.Notify(interrupt, os.Interrupt) - done := make(chan struct{}) - - ticker := time.NewTicker(time.Second) - defer ticker.Stop() - - select { // Reconnect after error + select { case <-interrupt: + // Handle interrupt and exit gracefully log.Warn().Msg(fmt.Sprintf(utils.Yellow, "Received interrupt, exiting...")) select { case <-done: case <-time.After(time.Second): } return + + case <-ticker.C: + // Attempt to connect to the Hub every second + hubUrl := kubernetes.GetHubUrl() + response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl)) + if err != nil || response.StatusCode != 200 { + log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy...")) + runProxy(false, true) + } } } }