Files
kubeshark/helm-chart/templates/09-worker-daemon-set.yaml
2023-10-09 19:56:29 +03:00

179 lines
5.3 KiB
YAML

---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app.kubeshark.co/app: worker
sidecar.istio.io/inject: "false"
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-worker-daemon-set
namespace: {{ .Release.Namespace }}
spec:
selector:
matchLabels:
app.kubeshark.co/app: worker
{{- include "kubeshark.labels" . | nindent 6 }}
template:
metadata:
labels:
app.kubeshark.co/app: worker
{{- include "kubeshark.labels" . | nindent 8 }}
name: kubeshark-worker-daemon-set
namespace: kubeshark
spec:
containers:
- command:
- ./worker
- -i
- any
- -port
- '{{ .Values.tap.proxy.worker.srvport }}'
- -servicemesh
- -procfs
- /hostproc
{{ .Values.tap.debug | ternary "- -debug" "" }}
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}'
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
name: sniffer
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
envFrom:
- secretRef:
name: kubeshark-secret
{{- if .Values.tap.debug }}
env:
- name: PROFILING_ENABLED
value: "true"
- name: PROFILING_DUMP_PATH
value: "pprof"
- name: PROFILING_INTERVAL_SECONDS
value: "60"
{{- end }}
resources:
limits:
cpu: {{ .Values.tap.resources.worker.limits.cpu }}
memory: {{ .Values.tap.resources.worker.limits.memory }}
requests:
cpu: {{ .Values.tap.resources.worker.requests.cpu }}
memory: {{ .Values.tap.resources.worker.requests.memory }}
securityContext:
capabilities:
add:
- NET_RAW
- NET_ADMIN
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_MODULE
drop:
- ALL
readinessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 5
tcpSocket:
port: {{ .Values.tap.proxy.worker.srvport }}
livenessProbe:
periodSeconds: 1
failureThreshold: 3
successThreshold: 1
initialDelaySeconds: 5
tcpSocket:
port: {{ .Values.tap.proxy.worker.srvport }}
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
- mountPath: /app/data
name: data
{{- if .Values.tap.tls }}
- command:
- ./tracer
- -procfs
- /hostproc
{{ .Values.tap.debug | ternary "- -debug" "" }}
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}'
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
name: tracer
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
envFrom:
- secretRef:
name: kubeshark-secret
securityContext:
capabilities:
add:
- NET_RAW
- NET_ADMIN
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
drop:
- ALL
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
- mountPath: /app/data
name: data
{{- end }}
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
terminationGracePeriodSeconds: 0
tolerations:
- effect: NoExecute
operator: Exists
{{- if not .Values.tap.ignoretainted }}
- effect: NoSchedule
operator: Exists
{{- end }}
{{- if gt (len .Values.tap.nodeselectorterms) 0}}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
{{- toYaml .Values.tap.nodeselectorterms | nindent 12 }}
{{- end }}
volumes:
- hostPath:
path: /proc
name: proc
- hostPath:
path: /sys
name: sys
- name: data
{{- if .Values.tap.persistentstorage }}
persistentVolumeClaim:
claimName: kubeshark-persistent-volume-claim
{{- else }}
emptyDir:
sizeLimit: {{ .Values.tap.storagelimit }}
{{- end }}