Compare commits

...

8 Commits

Author SHA1 Message Date
Alon Girmonsky
bec0b25daa 🔖 Bump the Helm chart version to 52.3.88 2024-11-02 13:11:02 -07:00
Alon Girmonsky
9248f07af0 missing commit 2024-11-02 09:50:30 -07:00
Alon Girmonsky
a1e05db4b0 Improved resource limits and requests Helm templating 2024-11-02 09:49:45 -07:00
Alon Girmonsky
b3f6fdc831 Added an ability to override image names for a case, where when using a CI, one needs to use individual image names (#1636) 2024-10-31 21:18:13 -07:00
Alon Girmonsky
e0c010eb29 🔖 Bump the Helm chart version to 52.3.87 2024-10-30 12:51:15 -07:00
Alon Girmonsky
d9fedc5bec removed debug comments 2024-10-29 21:55:35 -07:00
Alon Girmonsky
d1b4f9dcb1 🔖 Bump the Helm chart version to 52.3.86 2024-10-29 21:53:23 -07:00
Alon Girmonsky
629fb118e8 Revert "Set resource guard to true by default."
This reverts commit a7692a664d.
2024-10-29 21:49:25 -07:00
8 changed files with 152 additions and 80 deletions

View File

@@ -46,17 +46,17 @@ const (
)
type ResourceLimitsHub struct {
CPU string `yaml:"cpu" json:"cpu" default:""`
CPU string `yaml:"cpu" json:"cpu" default:"0"`
Memory string `yaml:"memory" json:"memory" default:"5Gi"`
}
type ResourceLimitsWorker struct {
CPU string `yaml:"cpu" json:"cpu" default:""`
CPU string `yaml:"cpu" json:"cpu" default:"0"`
Memory string `yaml:"memory" json:"memory" default:"3Gi"`
}
type ResourceRequests struct {
CPU string `yaml:"cpu" json:"cpu" default:""`
CPU string `yaml:"cpu" json:"cpu" default:"50m"`
Memory string `yaml:"memory" json:"memory" default:"50Mi"`
}
@@ -89,6 +89,11 @@ type ProxyConfig struct {
Host string `yaml:"host" json:"host" default:"127.0.0.1"`
}
type OverrideImageConfig struct {
Worker string `yaml:"worker" json:"worker"`
Hub string `yaml:"hub" json:"hub"`
Front string `yaml:"front" json:"front"`
}
type OverrideTagConfig struct {
Worker string `yaml:"worker" json:"worker"`
Hub string `yaml:"hub" json:"hub"`
@@ -96,12 +101,13 @@ type OverrideTagConfig struct {
}
type DockerConfig struct {
Registry string `yaml:"registry" json:"registry" default:"docker.io/kubeshark"`
Tag string `yaml:"tag" json:"tag" default:""`
TagLocked bool `yaml:"tagLocked" json:"tagLocked" default:"true"`
ImagePullPolicy string `yaml:"imagePullPolicy" json:"imagePullPolicy" default:"Always"`
ImagePullSecrets []string `yaml:"imagePullSecrets" json:"imagePullSecrets"`
OverrideTag OverrideTagConfig `yaml:"overrideTag" json:"overrideTag"`
Registry string `yaml:"registry" json:"registry" default:"docker.io/kubeshark"`
Tag string `yaml:"tag" json:"tag" default:""`
TagLocked bool `yaml:"tagLocked" json:"tagLocked" default:"true"`
ImagePullPolicy string `yaml:"imagePullPolicy" json:"imagePullPolicy" default:"Always"`
ImagePullSecrets []string `yaml:"imagePullSecrets" json:"imagePullSecrets"`
OverrideImage OverrideImageConfig `yaml:"overrideImage" json:"overrideImage"`
OverrideTag OverrideTagConfig `yaml:"overrideTag" json:"overrideTag"`
}
type ResourcesConfig struct {
@@ -152,7 +158,7 @@ type TelemetryConfig struct {
}
type ResourceGuardConfig struct {
Enabled bool `yaml:"enabled" json:"enabled" default:"true"`
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
}
type SentryConfig struct {

View File

@@ -1,6 +1,6 @@
apiVersion: v2
name: kubeshark
version: "52.3.85"
version: "52.3.88"
description: The API Traffic Analyzer for Kubernetes
home: https://kubeshark.co
keywords:

View File

@@ -104,6 +104,20 @@ helm install kubeshark kubeshark/kubeshark \
Please refer to [metrics](./metrics.md) documentation for details.
## Override Tag, Tags, Images
In addition to using a private registry, you can further override the images' tag, specific image tags and specific image names.
Example for overriding image names:
```yaml
docker:
overrideImage:
worker: docker.io/kubeshark/worker:v52.3.87
front: docker.io/kubeshark/front:v52.3.87
hub: docker.io/kubeshark/hub:v52.3.87
```
## Configuration
| Parameter | Description | Default |
@@ -114,7 +128,8 @@ Please refer to [metrics](./metrics.md) documentation for details.
| `tap.docker.tagLocked` | If `false` - use latest minor tag | `true` |
| `tap.docker.imagePullPolicy` | Kubernetes image pull policy | `Always` |
| `tap.docker.imagePullSecrets` | Kubernetes secrets to pull the images | `[]` |
| `tap.docker.overrideTag` | DANGER: Used to override specific images, when testing custom features from the Kubeshark team | `""` |
| `tap.docker.overrideImage` | Can be used to directly override image names | `""` |
| `tap.docker.overrideTag` | Can be used to override image tags | `""` |
| `tap.proxy.hub.srvPort` | Hub server port. Change if already occupied. | `8898` |
| `tap.proxy.worker.srvPort` | Worker server port. Change if already occupied.| `30001` |
| `tap.proxy.front.port` | Front service port. Change if already occupied.| `8899` |
@@ -172,7 +187,7 @@ Please refer to [metrics](./metrics.md) documentation for details.
| `tap.kernelModule.image` | Container image containing PF_RING kernel module with supported kernel version([details](PF_RING.md)) | "kubeshark/pf-ring-module:all" |
| `tap.kernelModule.unloadOnDestroy` | Create additional container which watches for pod termination and unloads PF_RING kernel module. | `false`|
| `tap.telemetry.enabled` | Enable anonymous usage statistics collection | `true` |
| `tap.resourceGuard.enabled` | Enable resource guard worker process, which watches RAM/disk usage and enables/disables traffic capture based on available resources. This means that for any specific node, if resource utilization (CPU, memory, disk) reaches 90% traffic capture will stop automatically. Traffic capture will restart once resources go back to below the 90% level. | `true` |
| `tap.resourceGuard.enabled` | Enable resource guard worker process, which watches RAM/disk usage and enables/disables traffic capture based on available resources | `false` |
| `tap.sentry.enabled` | Enable sending of error logs to Sentry | `false` |
| `tap.sentry.environment` | Sentry environment to label error logs with | `production` |
| `tap.defaultFilter` | Sets the default dashboard KFL filter (e.g. `http`). By default, this value is set to filter out noisy protocols such as DNS, UDP, ICMP and TCP. The user can easily change this in the Dashboard. You can also change this value to change this behavior. | `"!dns and !tcp and !udp and !icmp"` |

View File

@@ -51,7 +51,9 @@ spec:
value: 'https://api.kubeshark.co'
- name: PROFILING_ENABLED
value: '{{ .Values.tap.pprof.enabled }}'
{{- if .Values.tap.docker.overrideTag.hub }}
{{- if .Values.tap.docker.overrideImage.hub }}
image: '{{ .Values.tap.docker.overrideImage.hub }}'
{{- else if .Values.tap.docker.overrideTag.hub }}
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.overrideTag.hub }}'
{{ else }}
image: '{{ .Values.tap.docker.registry }}/hub:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'
@@ -79,11 +81,19 @@ spec:
port: 8080
resources:
limits:
{{ if ne .Values.tap.resources.hub.limits.cpu "0" }}
cpu: {{ .Values.tap.resources.hub.limits.cpu }}
{{ end }}
{{ if ne .Values.tap.resources.hub.limits.memory "0" }}
memory: {{ .Values.tap.resources.hub.limits.memory }}
{{ end }}
requests:
{{ if ne .Values.tap.resources.hub.requests.cpu "0" }}
cpu: {{ .Values.tap.resources.hub.requests.cpu }}
{{ end }}
{{ if ne .Values.tap.resources.hub.requests.memory "0" }}
memory: {{ .Values.tap.resources.hub.requests.memory }}
{{ end }}
volumeMounts:
- name: saml-x509-volume
mountPath: "/etc/saml/x509"

View File

@@ -66,7 +66,9 @@ spec:
value: '{{ (include "sentry.enabled" .) }}'
- name: REACT_APP_SENTRY_ENVIRONMENT
value: '{{ .Values.tap.sentry.environment }}'
{{- if .Values.tap.docker.overrideTag.front }}
{{- if .Values.tap.docker.overrideImage.front }}
image: '{{ .Values.tap.docker.overrideImage.front }}'
{{- else if .Values.tap.docker.overrideTag.front }}
image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.overrideTag.front }}'
{{ else }}
image: '{{ .Values.tap.docker.registry }}/front:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}'

View File

@@ -83,7 +83,9 @@ spec:
{{- if .Values.tap.debug }}
- -debug
{{- end }}
{{- if .Values.tap.docker.overrideTag.worker }}
{{- if .Values.tap.docker.overrideImage.worker }}
image: '{{ .Values.tap.docker.overrideImage.worker }}'
{{- else if .Values.tap.docker.overrideTag.worker }}
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.overrideTag.worker }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
{{ else }}
image: '{{ .Values.tap.docker.registry }}/worker:{{ not (eq .Values.tap.docker.tag "") | ternary .Values.tap.docker.tag (include "kubeshark.defaultVersion" .) }}{{ include "kubeshark.dockerTagDebugVersion" . }}'
@@ -123,11 +125,19 @@ spec:
value: '{{ .Values.tap.sentry.environment }}'
resources:
limits:
{{ if ne .Values.tap.resources.sniffer.limits.cpu "0" }}
cpu: {{ .Values.tap.resources.sniffer.limits.cpu }}
{{ end }}
{{ if ne .Values.tap.resources.sniffer.limits.memory "0" }}
memory: {{ .Values.tap.resources.sniffer.limits.memory }}
{{ end }}
requests:
{{ if ne .Values.tap.resources.sniffer.requests.cpu "0" }}
cpu: {{ .Values.tap.resources.sniffer.requests.cpu }}
{{ end }}
{{ if ne .Values.tap.resources.sniffer.requests.memory "0" }}
memory: {{ .Values.tap.resources.sniffer.requests.memory }}
{{ end }}
securityContext:
capabilities:
add:
@@ -226,11 +236,19 @@ spec:
value: '{{ .Values.tap.sentry.environment }}'
resources:
limits:
{{ if ne .Values.tap.resources.tracer.limits.cpu "0" }}
cpu: {{ .Values.tap.resources.tracer.limits.cpu }}
{{ end }}
{{ if ne .Values.tap.resources.tracer.limits.memory "0" }}
memory: {{ .Values.tap.resources.tracer.limits.memory }}
{{ end }}
requests:
{{ if ne .Values.tap.resources.tracer.requests.cpu "0" }}
cpu: {{ .Values.tap.resources.tracer.requests.cpu }}
{{ end }}
{{ if ne .Values.tap.resources.tracer.requests.memory "0" }}
memory: {{ .Values.tap.resources.tracer.requests.memory }}
{{ end }}
securityContext:
capabilities:
add:

View File

@@ -6,6 +6,10 @@ tap:
tagLocked: true
imagePullPolicy: Always
imagePullSecrets: []
overrideImage:
worker: ""
hub: ""
front: ""
overrideTag:
worker: ""
hub: ""
@@ -36,24 +40,24 @@ tap:
resources:
hub:
limits:
cpu: ""
cpu: "0"
memory: 5Gi
requests:
cpu: ""
cpu: 50m
memory: 50Mi
sniffer:
limits:
cpu: ""
cpu: "0"
memory: 5Gi
requests:
cpu: ""
cpu: 50m
memory: 50Mi
tracer:
limits:
cpu: ""
cpu: "0"
memory: 5Gi
requests:
cpu: ""
cpu: 50m
memory: 50Mi
serviceMesh: true
tls: true
@@ -99,7 +103,7 @@ tap:
telemetry:
enabled: true
resourceGuard:
enabled: true
enabled: false
sentry:
enabled: false
environment: production

View File

@@ -4,10 +4,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub-network-policy
@@ -31,10 +31,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front-network-policy
@@ -58,10 +58,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-worker-network-policy
@@ -87,10 +87,10 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-service-account
@@ -104,10 +104,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
stringData:
LICENSE: ''
@@ -121,10 +121,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
stringData:
AUTH_SAML_X509_CRT: |
@@ -137,10 +137,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
stringData:
AUTH_SAML_X509_KEY: |
@@ -152,10 +152,10 @@ metadata:
name: kubeshark-nginx-config-map
namespace: default
labels:
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
data:
default.conf: |
@@ -216,10 +216,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
data:
POD_REGEX: '.*'
@@ -266,10 +266,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-cluster-role-default
@@ -314,10 +314,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-cluster-role-binding-default
@@ -336,10 +336,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-config-role
@@ -366,10 +366,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-config-role-binding
@@ -389,10 +389,10 @@ kind: Service
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub
@@ -411,10 +411,10 @@ apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front
@@ -433,10 +433,10 @@ kind: Service
apiVersion: v1
metadata:
labels:
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
annotations:
prometheus.io/scrape: 'true'
@@ -446,10 +446,10 @@ metadata:
spec:
selector:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
ports:
- name: metrics
@@ -464,10 +464,10 @@ metadata:
labels:
app.kubeshark.co/app: worker
sidecar.istio.io/inject: "false"
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-worker-daemon-set
@@ -482,10 +482,10 @@ spec:
metadata:
labels:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
name: kubeshark-worker-daemon-set
namespace: kubeshark
@@ -506,12 +506,11 @@ spec:
- -procfs
- /hostproc
- -disable-ebpf
- -enable-resource-guard
- -resolution-strategy
- 'auto'
- -staletimeout
- '30'
image: 'docker.io/kubeshark/worker:v52.3.85'
image: 'docker.io/kubeshark/worker:v52.3.88'
imagePullPolicy: Always
name: sniffer
ports:
@@ -541,11 +540,17 @@ spec:
value: 'production'
resources:
limits:
cpu:
memory: 5Gi
requests:
cpu:
cpu: 50m
memory: 50Mi
securityContext:
capabilities:
add:
@@ -585,7 +590,7 @@ spec:
- /hostproc
- -disable-ebpf
- -disable-tls-log
image: 'docker.io/kubeshark/worker:v52.3.85'
image: 'docker.io/kubeshark/worker:v52.3.88'
imagePullPolicy: Always
name: tracer
env:
@@ -605,11 +610,17 @@ spec:
value: 'production'
resources:
limits:
cpu:
memory: 5Gi
requests:
cpu:
cpu: 50m
memory: 50Mi
securityContext:
capabilities:
add:
@@ -681,10 +692,10 @@ kind: Deployment
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub
@@ -700,10 +711,10 @@ spec:
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
spec:
dnsPolicy: ClusterFirstWithHostNet
@@ -731,7 +742,7 @@ spec:
value: 'https://api.kubeshark.co'
- name: PROFILING_ENABLED
value: 'false'
image: 'docker.io/kubeshark/hub:v52.3.85'
image: 'docker.io/kubeshark/hub:v52.3.88'
imagePullPolicy: Always
readinessProbe:
periodSeconds: 1
@@ -749,11 +760,17 @@ spec:
port: 8080
resources:
limits:
cpu:
memory: 5Gi
requests:
cpu:
cpu: 50m
memory: 50Mi
volumeMounts:
- name: saml-x509-volume
mountPath: "/etc/saml/x509"
@@ -779,10 +796,10 @@ kind: Deployment
metadata:
labels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front
@@ -798,10 +815,10 @@ spec:
metadata:
labels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-52.3.85
helm.sh/chart: kubeshark-52.3.88
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.85"
app.kubernetes.io/version: "52.3.88"
app.kubernetes.io/managed-by: Helm
spec:
containers:
@@ -836,7 +853,7 @@ spec:
value: 'false'
- name: REACT_APP_SENTRY_ENVIRONMENT
value: 'production'
image: 'docker.io/kubeshark/front:v52.3.85'
image: 'docker.io/kubeshark/front:v52.3.88'
imagePullPolicy: Always
name: kubeshark-front
livenessProbe: