diff --git a/helm-chart/templates/04-hub-deployment.yaml b/helm-chart/templates/04-hub-deployment.yaml new file mode 100644 index 000000000..507d014a2 --- /dev/null +++ b/helm-chart/templates/04-hub-deployment.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kubeshark.fullname" . }}-hub + namespace: {{ .Release.Namespace }} + labels: + app.kubeshark.co/app: hub + {{- include "kubeshark.labels" . | nindent 4 }} + annotations: + {{- if .Values.tap.annotations }} + {{- toYaml .Values.tap.annotations | nindent 4 }} + {{- end }} +spec: + replicas: 1 # Set the desired number of replicas + selector: + matchLabels: + app.kubeshark.co/app: hub + template: + metadata: + labels: + app.kubeshark.co/app: hub + sidecar.istio.io/inject: "false" + spec: + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ include "kubeshark.serviceAccountName" . }} + containers: + - name: kubeshark-hub + command: + - ./hub + {{ .Values.tap.debug | ternary "- -debug" "" }} + envFrom: + - configMapRef: + name: kubeshark-hub-config + - secretRef: + name: kubeshark-hub-secret + image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}' + imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }} + readinessProbe: + periodSeconds: 1 + initialDelaySeconds: 3 + tcpSocket: + port: 80 + livenessProbe: + periodSeconds: 1 + initialDelaySeconds: 3 + tcpSocket: + port: 80 + resources: + limits: + cpu: {{ .Values.tap.resources.hub.limits.cpu }} + memory: {{ .Values.tap.resources.hub.limits.memory }} + requests: + cpu: {{ .Values.tap.resources.hub.requests.cpu }} + memory: {{ .Values.tap.resources.hub.requests.memory }} diff --git a/helm-chart/templates/04-hub-pod.yaml b/helm-chart/templates/04-hub-pod.yaml deleted file mode 100644 index 34559e010..000000000 --- a/helm-chart/templates/04-hub-pod.yaml +++ /dev/null @@ -1,58 +0,0 @@ ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - app: kubeshark-hub - app.kubeshark.co/app: hub - sidecar.istio.io/inject: "false" - {{- include "kubeshark.labels" . | nindent 4 }} - annotations: - {{- if .Values.tap.annotations }} - {{- toYaml .Values.tap.annotations | nindent 4 }} - {{- end }} - name: kubeshark-hub - namespace: {{ .Release.Namespace }} -spec: - containers: - - command: - - ./hub - {{ .Values.tap.debug | ternary "- -debug" "" }} - env: - - name: POD_REGEX - value: '{{ .Values.tap.regex }}' - - name: NAMESPACES - value: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}' - - name: LICENSE - value: '{{ .Values.license }}' - - name: SCRIPTING_ENV - value: '{{ .Values.scripting.env | toJson }}' - - name: SCRIPTING_SCRIPTS - value: '[]' - - name: AUTH_ENABLED - value: '{{ .Values.tap.auth.enabled | ternary "true" "" }}' - - name: AUTH_APPROVED_EMAILS - value: '{{ gt (len .Values.tap.auth.approvedemails) 0 | ternary (join "," .Values.tap.auth.approvedemails) "" }}' - - name: AUTH_APPROVED_DOMAINS - value: '{{ gt (len .Values.tap.auth.approveddomains) 0 | ternary (join "," .Values.tap.auth.approveddomains) "" }}' - image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}' - imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }} - name: kubeshark-hub - resources: - limits: - cpu: {{ .Values.tap.resources.hub.limits.cpu }} - memory: {{ .Values.tap.resources.hub.limits.memory }} - requests: - cpu: {{ .Values.tap.resources.hub.requests.cpu }} - memory: {{ .Values.tap.resources.hub.requests.memory }} - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: {{ include "kubeshark.serviceAccountName" . }} - terminationGracePeriodSeconds: 0 - tolerations: - - effect: NoExecute - operator: Exists -{{- if not .Values.tap.ignoretainted }} - - effect: NoSchedule - operator: Exists -{{- end }} -status: {} diff --git a/helm-chart/templates/05-hub-service.yaml b/helm-chart/templates/05-hub-service.yaml index d414b3e0c..aa2d45c93 100644 --- a/helm-chart/templates/05-hub-service.yaml +++ b/helm-chart/templates/05-hub-service.yaml @@ -17,7 +17,7 @@ spec: port: 80 targetPort: 80 selector: - app: kubeshark-hub - type: NodePort + app.kubeshark.co/app: hub + type: ClusterIP status: loadBalancer: {} diff --git a/helm-chart/templates/06-front-deployment.yaml b/helm-chart/templates/06-front-deployment.yaml new file mode 100644 index 000000000..731941026 --- /dev/null +++ b/helm-chart/templates/06-front-deployment.yaml @@ -0,0 +1,64 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "kubeshark.fullname" . }}-front + namespace: {{ .Release.Namespace }} + labels: + app.kubeshark.co/app: front + {{- include "kubeshark.labels" . | nindent 4 }} + annotations: + {{- if .Values.tap.annotations }} + {{- toYaml .Values.tap.annotations | nindent 4 }} + {{- end }} +spec: + replicas: 1 # Set the desired number of replicas + selector: + matchLabels: + app.kubeshark.co/app: front + template: + metadata: + labels: + app.kubeshark.co/app: front + spec: + containers: + - env: + - name: REACT_APP_DEFAULT_FILTER + value: ' ' + - name: REACT_APP_HUB_HOST + value: ' ' + - name: REACT_APP_HUB_PORT + value: '{{ .Values.tap.ingress.enabled | ternary "/api" (print ":" .Values.tap.proxy.hub.port) }}' + image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}' + imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }} + name: kubeshark-front + livenessProbe: + failureThreshold: 3 + periodSeconds: 1 + successThreshold: 1 + tcpSocket: + port: 80 + readinessProbe: + failureThreshold: 3 + periodSeconds: 1 + successThreshold: 1 + tcpSocket: + port: 80 + timeoutSeconds: 1 + resources: + limits: + cpu: 750m + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + volumeMounts: + - name: nginx-config + mountPath: /etc/nginx/conf.d/default.conf + subPath: default.conf + readOnly: true + volumes: + - name: nginx-config + configMap: + name: kubeshark-nginx-config + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: {{ include "kubeshark.serviceAccountName" . }} diff --git a/helm-chart/templates/06-front-pod.yaml b/helm-chart/templates/06-front-pod.yaml deleted file mode 100644 index 0a3c654fe..000000000 --- a/helm-chart/templates/06-front-pod.yaml +++ /dev/null @@ -1,61 +0,0 @@ ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - app: kubeshark-front - app.kubeshark.co/app: front - sidecar.istio.io/inject: "false" - {{- include "kubeshark.labels" . | nindent 4 }} - annotations: - {{- if .Values.tap.annotations }} - {{- toYaml .Values.tap.annotations | nindent 4 }} - {{- end }} - name: kubeshark-front - namespace: {{ .Release.Namespace }} -spec: - containers: - - env: - - name: REACT_APP_DEFAULT_FILTER - value: ' ' - - name: REACT_APP_HUB_HOST - value: ' ' - - name: REACT_APP_HUB_PORT - value: '{{ .Values.tap.ingress.enabled | ternary "/api" (print ":" .Values.tap.proxy.hub.port) }}' - image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}' - imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }} - name: kubeshark-front - readinessProbe: - failureThreshold: 3 - periodSeconds: 1 - successThreshold: 1 - tcpSocket: - port: 80 - timeoutSeconds: 1 - resources: - limits: - cpu: 750m - memory: 1Gi - requests: - cpu: 50m - memory: 50Mi - volumeMounts: - - name: nginx-config - mountPath: /etc/nginx/conf.d/default.conf - subPath: default.conf - readOnly: true - volumes: - - name: nginx-config - configMap: - name: kubeshark-nginx-config - dnsPolicy: ClusterFirstWithHostNet - serviceAccountName: {{ include "kubeshark.serviceAccountName" . }} - terminationGracePeriodSeconds: 0 - tolerations: - - effect: NoExecute - operator: Exists -{{- if not .Values.tap.ignoretainted }} - - effect: NoSchedule - operator: Exists -{{- end }} -status: {} diff --git a/helm-chart/templates/07-front-service.yaml b/helm-chart/templates/07-front-service.yaml index 943f0efac..12bc721eb 100644 --- a/helm-chart/templates/07-front-service.yaml +++ b/helm-chart/templates/07-front-service.yaml @@ -16,7 +16,7 @@ spec: port: 80 targetPort: 80 selector: - app: kubeshark-front - type: NodePort + app.kubeshark.co/app: front + type: ClusterIP status: loadBalancer: {} diff --git a/helm-chart/templates/hub-config.yaml b/helm-chart/templates/hub-config.yaml new file mode 100644 index 000000000..413bdd7cc --- /dev/null +++ b/helm-chart/templates/hub-config.yaml @@ -0,0 +1,16 @@ +kind: ConfigMap +apiVersion: v1 +metadata: + name: kubeshark-hub-config + namespace: {{ .Release.Namespace }} + labels: + app.kubeshark.co/app: hub + {{- include "kubeshark.labels" . | nindent 4 }} +data: + POD_REGEX: '{{ .Values.tap.regex }}' + NAMESPACES: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}' + SCRIPTING_ENV: '{{ .Values.scripting.env | toJson }}' + SCRIPTING_SCRIPTS: '[]' + AUTH_ENABLED: '{{ .Values.tap.auth.enabled | ternary "true" "" }}' + AUTH_APPROVED_EMAILS: '{{ gt (len .Values.tap.auth.approvedemails) 0 | ternary (join "," .Values.tap.auth.approvedemails) "" }}' + AUTH_APPROVED_DOMAINS: '{{ gt (len .Values.tap.auth.approveddomains) 0 | ternary (join "," .Values.tap.auth.approveddomains) "" }}' diff --git a/helm-chart/templates/hub-secret.yaml b/helm-chart/templates/hub-secret.yaml new file mode 100644 index 000000000..8f4731780 --- /dev/null +++ b/helm-chart/templates/hub-secret.yaml @@ -0,0 +1,10 @@ +kind: Secret +apiVersion: v1 +metadata: + name: kubeshark-hub-secret + namespace: {{ .Release.Namespace }} + labels: + app.kubeshark.co/app: hub + {{- include "kubeshark.labels" . | nindent 4 }} +stringData: + LICENSE: '{{ .Values.license }}' diff --git a/kubernetes/provider.go b/kubernetes/provider.go index 4574ff278..1c47d34ef 100644 --- a/kubernetes/provider.go +++ b/kubernetes/provider.go @@ -127,6 +127,21 @@ func (provider *Provider) ListAllRunningPodsMatchingRegex(ctx context.Context, r return matchingPods, nil } +func (provider *Provider) ListPodsByAppLabel(ctx context.Context, namespaces string, labels map[string]string) ([]core.Pod, error) { + pods, err := provider.clientSet.CoreV1().Pods(namespaces).List(ctx, metav1.ListOptions{ + LabelSelector: metav1.FormatLabelSelector( + &metav1.LabelSelector{ + MatchLabels: labels, + }, + ), + }) + if err != nil { + return nil, err + } + + return pods.Items, err +} + func (provider *Provider) GetPodLogs(ctx context.Context, namespace string, podName string, containerName string) (string, error) { podLogOpts := core.PodLogOptions{Container: containerName} req := provider.clientSet.CoreV1().Pods(namespace).GetLogs(podName, &podLogOpts) diff --git a/kubernetes/proxy.go b/kubernetes/proxy.go index 571cc7c44..7df22298e 100644 --- a/kubernetes/proxy.go +++ b/kubernetes/proxy.go @@ -24,6 +24,7 @@ const selfServicePort = 80 func StartProxy(kubernetesProvider *Provider, proxyHost string, srcPort uint16, selfNamespace string, selfServiceName string) (*http.Server, error) { log.Info(). + Str("proxy-host", proxyHost). Str("namespace", selfNamespace). Str("service", selfServiceName). Int("src-port", int(srcPort)). @@ -101,7 +102,7 @@ func getRerouteHttpHandlerSelfStatic(proxyHandler http.Handler, selfNamespace st } func NewPortForward(kubernetesProvider *Provider, namespace string, podRegex *regexp.Regexp, srcPort uint16, dstPort uint16, ctx context.Context) (*portforward.PortForwarder, error) { - pods, err := kubernetesProvider.ListAllRunningPodsMatchingRegex(ctx, podRegex, []string{namespace}) + pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, namespace, map[string]string{"app.kubeshark.co/app": "hub"}) if err != nil { return nil, err } else if len(pods) == 0 {