🔖 Bump the Helm chart version to 52.3.81

This commit is contained in:
Alon Girmonsky 2024-09-14 11:53:26 -07:00
parent ca844394fc
commit 88c72cda82
5 changed files with 102 additions and 79 deletions

View File

@ -84,7 +84,7 @@ kubectl-view-kubeshark-resources: ## This command outputs all Kubernetes resourc
./kubectl.sh view-kubeshark-resources
generate-helm-values: ## Generate the Helm values from config.yaml
./bin/kubeshark__ config > ./helm-chart/values.yaml && sed -i 's/^license:.*/license: ""/' helm-chart/values.yaml
./bin/kubeshark__ config > ./helm-chart/values.yaml && sed -i 's/^license:.*/license: ""/' helm-chart/values.yaml && sed -i '1i # find a detail description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md' helm-chart/values.yaml
generate-manifests: ## Generate the manifests from the Helm chart using default configuration
helm template kubeshark -n default ./helm-chart > ./manifests/complete.yaml

View File

@ -1,6 +1,6 @@
apiVersion: v2
name: kubeshark
version: "52.3.79"
version: "52.3.81"
description: The API Traffic Analyzer for Kubernetes
home: https://kubeshark.co
keywords:

View File

@ -115,36 +115,43 @@ Please refer to [metrics](./metrics.md) documentation for details.
| Parameter | Description | Default |
|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------|
| `tap.docker.registry` | Docker registry to pull from | `docker.io/kubeshark` |
| `tap.docker.tag` | Tag of the Docker images | `latest` |
| `tap.docker.registry` | Docker registry to pull from | `docker.io/kubeshark` |
| `tap.docker.tag` | Tag of the Docker images | `latest` |
| `tap.docker.tagLocked` | Lock the Docker image tags to prevent automatic upgrades to the latest branch image version. | `true` |
| `tap.docker.tagLocked` | If `false` - use latest minor tag | `true` |
| `tap.docker.imagePullPolicy` | Kubernetes image pull policy | `Always` |
| `tap.docker.imagePullSecrets` | Kubernetes secrets to pull the images | `[]` |
| `tap.proxy.worker.srvPort` | Worker server port | `30001` |
| `tap.proxy.hub.port` | Hub service port | `8898` |
| `tap.proxy.hub.srvPort` | Hub server port | `8898` |
| `tap.proxy.front.port` | Front-facing service port | `8899` |
| `tap.proxy.host` | Proxy server's IP | `127.0.0.1` |
| `tap.namespaces` | List of namespaces for the traffic capture | `[]` |
| `tap.excludedNamespaces` | List of namespaces to explicitly exclude | `[]` |
| `tap.release.repo` | URL of the Helm chart repository | `https://helm.kubeshark.co` |
| `tap.release.name` | Helm release name | `kubeshark` |
| `tap.release.namespace` | Helm release namespace | `default` |
| `tap.persistentStorage` | Use `persistentVolumeClaim` instead of `emptyDir` | `false` |
| `tap.persistentStorageStatic` | Use static persistent volume provisioning (explicitly defined `PersistentVolume` ) | `false` |
| `tap.efsFileSytemIdAndPath` | [EFS file system ID and, optionally, subpath and/or access point](https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/examples/kubernetes/access_points/README.md) `<FileSystemId>:<Path>:<AccessPointId>` | "" |
| `tap.storageLimit` | Limit of either the `emptyDir` or `persistentVolumeClaim` | `500Mi` |
| `tap.storageClass` | Storage class of the `PersistentVolumeClaim` | `standard` |
| `tap.dryRun` | Preview of all pods matching the regex, without tapping them | `false` |
| `tap.pcap` | | `""` |
| `tap.resources.worker.limits.cpu` | CPU limit for worker | `750m` |
| `tap.resources.worker.limits.memory` | Memory limit for worker | `1Gi` |
| `tap.resources.worker.requests.cpu` | CPU request for worker | `50m` |
| `tap.resources.worker.requests.memory` | Memory request for worker | `50Mi` |
| `tap.resources.hub.limits.cpu` | CPU limit for hub | `750m` |
| `tap.resources.hub.limits.memory` | Memory limit for hub | `1Gi` |
| `tap.docker.imagePullSecrets` | Kubernetes secrets to pull the images | `[]` |
| `tap.docker.overrideTag` | DANGER: Used to override specific images, when testing custom features from the Kubeshark team | `""` |
| `tap.proxy.hub.srvPort` | Hub server port. Change if already occupied. | `8898` |
| `tap.proxy.worker.srvPort` | Worker server port. Change if already occupied.| `30001` |
| `tap.proxy.front.port` | Front service port. Change if already occupied.| `8899` |
| `tap.proxy.host` | Change to 0.0.0.0 top open up to the world. | `127.0.0.1` |
| `tap.regex` | Target (process traffic from) pods that match regex | `.*` |
| `tap.namespaces` | Target pods in namespaces | `[]` |
| `tap.excludedNamespaces` | Exclude pods in namespaces | `[]` |
| `tap.bpfOverride` | When using AF_PACKET as a traffic capture backend, override any existing pod targeting rules and set explicit BPF expression (e.g. `net 0.0.0.0/0`). | `[]` |
| `tap.stopped` | Set to `false` to have traffic processing start automatically. When set to `true`, traffic processing is stopped by default, resulting in almost no resource consumption (e.g. Kubeshark is dormant). This property can be dynamically control via the dashboard. | `true` |
| `tap.release.repo` | URL of the Helm chart repository | `https://helm.kubeshark.co` |
| `tap.release.name` | Helm release name | `kubeshark` |
| `tap.release.namespace` | Helm release namespace | `default` |
| `tap.persistentStorage` | Use `persistentVolumeClaim` instead of `emptyDir` | `false` |
| `tap.persistentStorageStatic` | Use static persistent volume provisioning (explicitly defined `PersistentVolume` ) | `false` |
| `tap.efsFileSytemIdAndPath` | [EFS file system ID and, optionally, subpath and/or access point](https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/examples/kubernetes/access_points/README.md) `<FileSystemId>:<Path>:<AccessPointId>` | "" |
| `tap.storageLimit` | Limit of either the `emptyDir` or `persistentVolumeClaim` | `500Mi` |
| `tap.storageClass` | Storage class of the `PersistentVolumeClaim` | `standard` |
| `tap.dryRun` | Preview of all pods matching the regex, without tapping them | `false` |
| `tap.resources.hub.limits.cpu` | CPU limit for hub | `1000m` |
| `tap.resources.hub.limits.memory` | Memory limit for hub | `1500Mi` |
| `tap.resources.hub.requests.cpu` | CPU request for hub | `50m` |
| `tap.resources.hub.requests.memory` | Memory request for hub | `50Mi` |
| `tap.resources.sniffer.limits.cpu` | CPU limit for sniffer | `1000m` |
| `tap.resources.sniffer.limits.memory` | Memory limit for sniffer | `1500Mi` |
| `tap.resources.sniffer.requests.cpu` | CPU request for sniffer | `50m` |
| `tap.resources.sniffer.requests.memory` | Memory request for sniffer | `50Mi` |
| `tap.resources.tracer.limits.cpu` | CPU limit for tracer | `1000m` |
| `tap.resources.tracer.limits.memory` | Memory limit for tracer | `1500Mi` |
| `tap.resources.tracer.requests.cpu` | CPU request for tracer | `50m` |
| `tap.resources.tracer.requests.memory` | Memory request for tracer | `50Mi` |
| `tap.serviceMesh` | Capture traffic from service meshes like Istio, Linkerd, Consul, etc. | `true` |
| `tap.tls` | Capture the encrypted/TLS traffic from cryptography libraries like OpenSSL | `true` |
| `tap.disableTlsLog` | Suppress logging for TLS/eBPF | `false` |
@ -177,7 +184,6 @@ Please refer to [metrics](./metrics.md) documentation for details.
| `tap.defaultFilter` | Sets the default dashboard KFL filter (e.g. `http`). By default, this value is set to filter out DNS and TCP entries. The user can easily change this in the Dashboard. | `"!dns and !tcp"` |
| `tap.globalFilter` | Prepends to any KFL filter and can be used to limit what is visible in the dashboard. For example, `redact("request.headers.Authorization")` will redact the appropriate field. Another example `!dns` will not show any DNS traffic. | `""` |
| `tap.metrics.port` | Pod port used to expose Prometheus metrics | `49100` |
| `tap.stopped` | A flag indicating whether to start Kubeshark with traffic processing stopped resulting in almost no resource consumption (e.g. Kubeshark is dormant). This property can be dynamically control via the dashboard. | `true` |
| `tap.enabledDissectors` | This is an array of strings representing the list of supported protocols. Remove or comment out redundant protocols (e.g., dns).| The default list includes: amqp, dns , http, icmp, kafka, redis,sctp, syscall, tcp, ws. |
| `logs.file` | Logs dump path | `""` |
| `kube.configPath` | Path to the `kubeconfig` file (`$HOME/.kube/config`) | `""` |

View File

@ -1,3 +1,4 @@
# find a detail description here: https://github.com/kubeshark/kubeshark/blob/master/helm-chart/README.md
tap:
docker:
registry: docker.io/kubeshark

View File

@ -4,10 +4,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub-network-policy
@ -31,10 +31,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front-network-policy
@ -58,10 +58,10 @@ apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
labels:
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-worker-network-policy
@ -87,10 +87,10 @@ apiVersion: v1
kind: ServiceAccount
metadata:
labels:
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-service-account
@ -104,10 +104,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
stringData:
LICENSE: ''
@ -121,10 +121,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
stringData:
AUTH_SAML_X509_CRT: |
@ -137,10 +137,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
stringData:
AUTH_SAML_X509_KEY: |
@ -152,10 +152,10 @@ metadata:
name: kubeshark-nginx-config-map
namespace: default
labels:
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
data:
default.conf: |
@ -216,10 +216,10 @@ metadata:
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
data:
POD_REGEX: '.*'
@ -258,10 +258,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-cluster-role-default
@ -295,10 +295,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-cluster-role-binding-default
@ -317,10 +317,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-config-role
@ -346,10 +346,10 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-config-role-binding
@ -369,10 +369,10 @@ kind: Service
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub
@ -391,10 +391,10 @@ apiVersion: v1
kind: Service
metadata:
labels:
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front
@ -413,10 +413,10 @@ kind: Service
apiVersion: v1
metadata:
labels:
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
annotations:
prometheus.io/scrape: 'true'
@ -426,10 +426,10 @@ metadata:
spec:
selector:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
ports:
- name: metrics
@ -444,10 +444,10 @@ metadata:
labels:
app.kubeshark.co/app: worker
sidecar.istio.io/inject: "false"
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-worker-daemon-set
@ -462,10 +462,10 @@ spec:
metadata:
labels:
app.kubeshark.co/app: worker
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
name: kubeshark-worker-daemon-set
namespace: kubeshark
@ -488,7 +488,7 @@ spec:
- -disable-ebpf
- -resolution-strategy
- 'auto'
image: 'docker.io/kubeshark/worker:v52.3.79'
image: 'docker.io/kubeshark/worker:v52.3.81'
imagePullPolicy: Always
name: sniffer
ports:
@ -512,6 +512,10 @@ spec:
value: 'https://api.kubeshark.co'
- name: PROFILING_ENABLED
value: 'false'
- name: SENTRY_ENABLED
value: 'false'
- name: SENTRY_ENVIRONMENT
value: 'production'
resources:
limits:
cpu: 1000m
@ -557,7 +561,7 @@ spec:
- -procfs
- /hostproc
- -disable-ebpf
image: 'docker.io/kubeshark/worker:v52.3.79'
image: 'docker.io/kubeshark/worker:v52.3.81'
imagePullPolicy: Always
name: tracer
env:
@ -571,6 +575,10 @@ spec:
fieldPath: metadata.namespace
- name: PROFILING_ENABLED
value: 'false'
- name: SENTRY_ENABLED
value: 'false'
- name: SENTRY_ENVIRONMENT
value: 'production'
resources:
limits:
cpu: 1000m
@ -642,10 +650,10 @@ kind: Deployment
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub
@ -661,10 +669,10 @@ spec:
metadata:
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
spec:
dnsPolicy: ClusterFirstWithHostNet
@ -684,11 +692,15 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: SENTRY_ENABLED
value: 'false'
- name: SENTRY_ENVIRONMENT
value: 'production'
- name: KUBESHARK_CLOUD_API_URL
value: 'https://api.kubeshark.co'
- name: PROFILING_ENABLED
value: 'false'
image: 'docker.io/kubeshark/hub:v52.3.79'
image: 'docker.io/kubeshark/hub:v52.3.81'
imagePullPolicy: Always
readinessProbe:
periodSeconds: 1
@ -736,10 +748,10 @@ kind: Deployment
metadata:
labels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front
@ -755,10 +767,10 @@ spec:
metadata:
labels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-52.3.79
helm.sh/chart: kubeshark-52.3.81
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "52.3.79"
app.kubernetes.io/version: "52.3.81"
app.kubernetes.io/managed-by: Helm
spec:
containers:
@ -789,7 +801,11 @@ spec:
value: 'true'
- name: REACT_APP_DISSECTORS_UPDATING_ENABLED
value: 'true'
image: 'docker.io/kubeshark/front:v52.3.79'
- name: REACT_APP_SENTRY_ENABLED
value: 'false'
- name: REACT_APP_SENTRY_ENVIRONMENT
value: 'production'
image: 'docker.io/kubeshark/front:v52.3.81'
imagePullPolicy: Always
name: kubeshark-front
livenessProbe: