Merge branch 'master' into service-map-new-ui-enabled-flag

This commit is contained in:
Alon Girmonsky 2024-10-15 09:38:48 -07:00 committed by GitHub
commit ac4d8ce0a2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 199 additions and 117 deletions

View File

@ -178,11 +178,12 @@ port-forward:
release:
@cd ../worker && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
@cd ../tracer && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
@cd ../hub && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
@cd ../front && git checkout master && git pull && git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
@cd ../kubeshark && git checkout master && git pull && sed -i 's/^version:.*/version: "$(VERSION)"/' helm-chart/Chart.yaml && make && make generate-helm-values && make generate-manifests
@git add -A . && git commit -m ":bookmark: Bump the Helm chart version to $(VERSION)" && git push
@git tag v$(VERSION) && git push origin --tags
@git tag -d v$(VERSION); git tag v$(VERSION) && git push origin --tags
@cd helm-chart && cp -r . ../../kubeshark.github.io/charts/chart
@cd ../../kubeshark.github.io/ && git add -A . && git commit -m ":sparkles: Update the Helm chart" && git push
@cd ../kubeshark

View File

@ -41,12 +41,18 @@ func init() {
consoleCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
}
func runConsole() {
func runConsoleWithoutProxy() {
log.Info().Msg("Starting scripting console ...")
time.Sleep(5 * time.Second)
hubUrl := kubernetes.GetHubUrl()
for {
// Attempt to connect to the Hub every second
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub."))
time.Sleep(5 * time.Second)
continue
}
interrupt := make(chan os.Signal, 1)
@ -64,8 +70,9 @@ func runConsole() {
c, _, err := websocket.DefaultDialer.Dial(u.String(), headers)
if err != nil {
log.Error().Err(err).Send()
return
log.Error().Err(err).Msg("Websocket dial error, retrying in 5 seconds...")
time.Sleep(5 * time.Second) // Delay before retrying
continue
}
defer c.Close()
@ -76,8 +83,8 @@ func runConsole() {
for {
_, message, err := c.ReadMessage()
if err != nil {
log.Error().Err(err).Send()
return
log.Error().Err(err).Msg("Error reading websocket message, reconnecting...")
break // Break to reconnect
}
msg := string(message)
@ -93,17 +100,16 @@ func runConsole() {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-done:
return
log.Warn().Msg(fmt.Sprintf(utils.Yellow, "Connection closed, reconnecting..."))
time.Sleep(5 * time.Second) // Delay before reconnecting
continue // Reconnect after error
case <-interrupt:
log.Warn().Msg(fmt.Sprintf(utils.Yellow, "Received interrupt, exiting..."))
err := c.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""))
if err != nil {
log.Error().Err(err).Send()
return
continue
}
select {
@ -114,3 +120,37 @@ func runConsole() {
}
}
}
func runConsole() {
go runConsoleWithoutProxy()
// Create interrupt channel and setup signal handling once
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
done := make(chan struct{})
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-interrupt:
// Handle interrupt and exit gracefully
log.Warn().Msg(fmt.Sprintf(utils.Yellow, "Received interrupt, exiting..."))
select {
case <-done:
case <-time.After(time.Second):
}
return
case <-ticker.C:
// Attempt to connect to the Hub every second
hubUrl := kubernetes.GetHubUrl()
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
}
}
}

View File

@ -67,12 +67,14 @@ func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, conf
// Check if the source directory exists in the ConfigMap
srcDir, ok := configMap.Data[configMapKey]
if !ok || srcDir == "" {
log.Error().Msgf("source directory not found in ConfigMap %s in namespace %s", configMapName, namespace)
continue
}
// Attempt to get the pod in the current namespace
pod, err := clientset.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
log.Error().Err(err).Msgf("failed to get pod %s in namespace %s", podName, namespace)
continue
}
@ -93,6 +95,7 @@ func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, conf
exec, err := remotecommand.NewSPDYExecutor(config, "POST", req.URL())
if err != nil {
log.Error().Err(err).Msgf("failed to initialize executor for pod %s in namespace %s", podName, namespace)
continue
}
@ -105,6 +108,7 @@ func listFilesInPodDir(ctx context.Context, clientset *clientk8s.Clientset, conf
Stderr: &stderrBuf,
})
if err != nil {
log.Error().Err(err).Msgf("error listing files in pod %s in namespace %s: %s", podName, namespace, stderrBuf.String())
continue
}
@ -195,17 +199,19 @@ func mergePCAPs(outputFile string, inputFiles []string) error {
}
for _, inputFile := range inputFiles {
log.Info().Msgf("Merging %s int %s", inputFile, outputFile)
// Open each input file
file, err := os.Open(inputFile)
if err != nil {
return err
log.Error().Err(err).Msgf("Failed to open %v", inputFile)
continue
}
defer file.Close()
reader, err := pcapgo.NewReader(file)
if err != nil {
log.Error().Err(err).Msgf("Failed to create pcapng reader for %v", file.Name())
return err
continue
}
// Create the packet source
@ -214,7 +220,8 @@ func mergePCAPs(outputFile string, inputFiles []string) error {
for packet := range packetSource.Packets() {
err := writer.WritePacket(packet.Metadata().CaptureInfo, packet.Data())
if err != nil {
return err
log.Error().Err(err).Msgf("Failed to write packet to %v", outputFile)
continue
}
}
}
@ -228,6 +235,7 @@ func setPcapConfigInKubernetes(ctx context.Context, clientset *clientk8s.Clients
// Load the existing ConfigMap in the current namespace
configMap, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, "kubeshark-config-map", metav1.GetOptions{})
if err != nil {
log.Error().Err(err).Msgf("failed to get ConfigMap in namespace %s", namespace)
continue
}
@ -240,6 +248,7 @@ func setPcapConfigInKubernetes(ctx context.Context, clientset *clientk8s.Clients
// Apply the updated ConfigMap back to the cluster in the current namespace
_, err = clientset.CoreV1().ConfigMaps(namespace).Update(ctx, configMap, metav1.UpdateOptions{})
if err != nil {
log.Error().Err(err).Msgf("failed to update ConfigMap in namespace %s", namespace)
continue
}
}
@ -311,6 +320,8 @@ func copyPcapFiles(clientset *kubernetes.Clientset, config *rest.Config, destDir
err = copyFileFromPod(context.Background(), clientset, config, pod.Name, nsFiles.Namespace, nsFiles.SrcDir, file, destFile)
if err != nil {
log.Error().Err(err).Msgf("Error copying file from pod %s in namespace %s", pod.Name, nsFiles.Namespace)
} else {
log.Info().Msgf("Copied %s from %s to %s", file, pod.Name, destFile)
}
currentFiles = append(currentFiles, destFile)

View File

@ -92,10 +92,17 @@ func runProxy(block bool, noBrowser bool) {
establishedProxy = true
okToOpen("Kubeshark", frontUrl, noBrowser)
}
if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts {
watchScripts(kubernetesProvider, false)
}
if config.Config.Scripting.Console {
go runConsoleWithoutProxy()
}
if establishedProxy && block {
utils.WaitForTermination(ctx, cancel)
}
}
func okToOpen(name string, url string, noBrowser bool) {

View File

@ -3,6 +3,7 @@ package cmd
import (
"context"
"encoding/json"
"strings"
"github.com/creasty/defaults"
"github.com/fsnotify/fsnotify"
@ -168,6 +169,10 @@ func watchScripts(provider *kubernetes.Provider, block bool) {
select {
// watch for events
case event := <-watcher.Events:
if !strings.HasSuffix(event.Name, "js") {
log.Info().Str("file", event.Name).Msg("Ignoring file")
continue
}
switch event.Op {
case fsnotify.Create:
script, err := misc.ReadScriptFile(event.Name)

View File

@ -427,6 +427,10 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid
if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts {
watchScripts(kubernetesProvider, false)
}
if config.Config.Scripting.Console {
go runConsoleWithoutProxy()
}
}
func updateConfig(kubernetesProvider *kubernetes.Provider) {

View File

@ -4,6 +4,7 @@ import (
"io/fs"
"os"
"path/filepath"
"strings"
"github.com/kubeshark/kubeshark/misc"
"github.com/rs/zerolog/log"
@ -13,6 +14,7 @@ type ScriptingConfig struct {
Env map[string]interface{} `yaml:"env" json:"env" default:"{}"`
Source string `yaml:"source" json:"source" default:""`
WatchScripts bool `yaml:"watchScripts" json:"watchScripts" default:"true"`
Console bool `yaml:"console" json:"console" default:"true"`
}
func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error) {
@ -33,6 +35,10 @@ func (config *ScriptingConfig) GetScripts() (scripts []*misc.Script, err error)
var script *misc.Script
path := filepath.Join(config.Source, f.Name())
if !strings.HasSuffix(path, ".js") {
log.Info().Str("path", path).Msg("Skipping non-JS file")
continue
}
script, err = misc.ReadScriptFile(path)
if err != nil {
return

View File

@ -198,9 +198,9 @@ type MiscConfig struct {
type PcapDumpConfig struct {
PcapDumpEnabled bool `yaml:"enabled" json:"enabled" default:"true"`
PcapTimeInterval string `yaml:"timeInterval" json:"timeInterval" default:"10m"`
PcapTimeInterval string `yaml:"timeInterval" json:"timeInterval" default:"1m"`
PcapMaxTime string `yaml:"maxTime" json:"maxTime" default:"1h"`
PcapMaxSize string `yaml:"maxSize" json:"maxSize" default:"10MB"`
PcapMaxSize string `yaml:"maxSize" json:"maxSize" default:"500MB"`
PcapSrcDir string `yaml:"pcapSrcDir" json:"pcapSrcDir" default:"pcapdump"`
}

View File

@ -1,6 +1,6 @@
apiVersion: v2
name: kubeshark
version: "52.3.82"
version: "52.3.83"
description: The API Traffic Analyzer for Kubernetes
home: https://kubeshark.co
keywords:

View File

@ -1,3 +1,4 @@
# find a detailed description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md
tap:
docker:
registry: docker.io/kubeshark
@ -159,9 +160,9 @@ logs:
grep: ""
pcapdump:
enabled: true
timeInterval: 10m
timeInterval: 1m
maxTime: 1h
maxSize: 50MB
maxSize: 500MB
pcapSrcDir: pcapdump
kube:
configPath: ""

View File

@ -4,10 +4,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub-network-policy
@ -31,10 +31,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front-network-policy
@ -58,10 +58,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-worker-network-policy
@ -87,10 +87,10 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-service-account
@ -104,10 +104,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
stringData:
LICENSE: ''
@ -121,10 +121,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
stringData:
AUTH_SAML_X509_CRT: |
@ -137,10 +137,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
stringData:
AUTH_SAML_X509_KEY: |
@ -152,10 +152,10 @@ metadata:
name: kubeshark-nginx-config-map
namespace: default
labels:
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
data:
default.conf: |
@ -216,10 +216,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
data:
POD_REGEX: '.*'
@ -239,9 +239,11 @@ data:
TELEMETRY_DISABLED: 'false'
SCRIPTING_DISABLED: ''
TARGETED_PODS_UPDATE_DISABLED: ''
PRESET_FILTERS_CHANGING_ENABLED: ''
RECORDING_DISABLED: ''
STOP_TRAFFIC_CAPTURING_DISABLED: 'false'
GLOBAL_FILTER: ""
DEFAULT_FILTER: "!dns and !tcp and !udp and !icmp"
TRAFFIC_SAMPLE_RATE: '100'
JSON_TTL: '5m'
PCAP_TTL: '10s'
@ -249,19 +251,24 @@ data:
TIMEZONE: ' '
CLOUD_LICENSE_ENABLED: 'true'
DUPLICATE_TIMEFRAME: '200ms'
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,sctp,syscall,tcp,udp,ws'
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,sctp,syscall,tcp,udp,ws,tls'
DISSECTORS_UPDATING_ENABLED: 'true'
DETECT_DUPLICATES: 'false'
PCAP_DUMP_ENABLE: 'true'
PCAP_TIME_INTERVAL: '1m'
PCAP_MAX_TIME: '1h'
PCAP_MAX_SIZE: '500MB'
PCAP_SRC_DIR: 'pcapdump'
---
# Source: kubeshark/templates/02-cluster-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-cluster-role-default
@ -295,10 +302,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-cluster-role-binding-default
@ -317,10 +324,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-config-role
@ -347,10 +354,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-config-role-binding
@ -370,10 +377,10 @@ kind: Service
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub
@ -392,10 +399,10 @@ apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front
@ -414,10 +421,10 @@ kind: Service
apiVersion: v1
metadata:
labels:
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
annotations:
prometheus.io/scrape: 'true'
@ -427,10 +434,10 @@ metadata:
spec:
selector:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
ports:
- name: metrics
@ -445,10 +452,10 @@ metadata:
labels:
app.kubeshark.co/app: worker
sidecar.istio.io/inject: "false"
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-worker-daemon-set
@ -463,10 +470,10 @@ spec:
metadata:
labels:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
name: kubeshark-worker-daemon-set
namespace: kubeshark
@ -491,7 +498,7 @@ spec:
- 'auto'
- -staletimeout
- '30'
image: 'docker.io/kubeshark/worker:v52.3.82'
image: 'docker.io/kubeshark/worker:v52.3.83'
imagePullPolicy: Always
name: sniffer
ports:
@ -564,7 +571,7 @@ spec:
- -procfs
- /hostproc
- -disable-ebpf
image: 'docker.io/kubeshark/worker:v52.3.82'
image: 'docker.io/kubeshark/worker:v52.3.83'
imagePullPolicy: Always
name: tracer
env:
@ -660,10 +667,10 @@ kind: Deployment
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub
@ -679,10 +686,10 @@ spec:
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
spec:
dnsPolicy: ClusterFirstWithHostNet
@ -710,7 +717,7 @@ spec:
value: 'https://api.kubeshark.co'
- name: PROFILING_ENABLED
value: 'false'
image: 'docker.io/kubeshark/hub:v52.3.82'
image: 'docker.io/kubeshark/hub:v52.3.83'
imagePullPolicy: Always
readinessProbe:
periodSeconds: 1
@ -758,10 +765,10 @@ kind: Deployment
metadata:
labels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front
@ -777,16 +784,14 @@ spec:
metadata:
labels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-52.3.82
helm.sh/chart: kubeshark-52.3.83
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.82"
app.kubernetes.io/version: "52.3.83"
app.kubernetes.io/managed-by: Helm
spec:
containers:
- env:
- name: REACT_APP_DEFAULT_FILTER
value: '!dns and !tcp and !udp and !icmp'
- name: REACT_APP_AUTH_ENABLED
value: 'true'
- name: REACT_APP_AUTH_TYPE
@ -799,6 +804,8 @@ spec:
value: 'false'
- name: REACT_APP_TARGETED_PODS_UPDATE_DISABLED
value: 'false'
- name: REACT_APP_PRESET_FILTERS_CHANGING_ENABLED
value: 'false'
- name: REACT_APP_BPF_OVERRIDE_DISABLED
value: 'false'
- name: REACT_APP_RECORDING_DISABLED
@ -815,7 +822,7 @@ spec:
value: 'false'
- name: REACT_APP_SENTRY_ENVIRONMENT
value: 'production'
image: 'docker.io/kubeshark/front:v52.3.82'
image: 'docker.io/kubeshark/front:v52.3.83'
imagePullPolicy: Always
name: kubeshark-front
livenessProbe: