1
0
mirror of https://github.com/kubeshark/kubeshark.git synced 2025-05-09 09:09:02 +00:00

🔨 Add tracer as a separate container to worker DaemonSet ()

* 🔨 Add `tracer` as a separate container to worker `DaemonSet`

* 🔥 Delete some of the unused connector methods

* 🔨 Set `POD_NAME` and `POD_NAMESPACE` environment variables in worker `DeamonSet`

* 🔨 Set `POD_NAME` and `POD_NAMESPACE` environment variables in hub `Deployment`

* Fix the labels

* Fix the self config role

* Restrict it to specific resource names

* Run `make generate-manifests`
This commit is contained in:
M. Mert Yildiran 2023-09-23 18:23:32 -07:00 committed by GitHub
parent d94ce4dce3
commit 41dacbff1a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 226 additions and 167 deletions

View File

@ -69,7 +69,16 @@ func updateLicense(licenseKey string) {
log.Error().Err(err).Send()
}
connector.PostLicenseSingle(config.Config.License)
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
log.Error().Err(err).Send()
return
}
err = kubernetes.SetSecret(kubernetesProvider, "LICENSE", config.Config.License)
if err != nil {
log.Error().Err(err).Send()
return
}
log.Info().Msg("Updated the license. Exiting.")

View File

@ -69,6 +69,7 @@ func tap() {
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
log.Error().Err(err).Send()
return
}
@ -199,7 +200,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
ready.Lock()
ready.Hub = true
ready.Unlock()
postHubStarted(ctx, kubernetesProvider, cancel, false)
postHubStarted(ctx, kubernetesProvider, cancel)
}
ready.Lock()
@ -405,35 +406,7 @@ func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider
}
}
func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, update bool) {
if update {
// Pod regex
connector.PostRegexToHub(config.Config.Tap.PodRegexStr, state.targetNamespaces)
// License
if config.Config.License != "" {
connector.PostLicense(config.Config.License)
}
// Scripting
connector.PostEnv(config.Config.Scripting.Env)
scripts, err := config.Config.Scripting.GetScripts()
if err != nil {
log.Error().Err(err).Send()
}
for _, script := range scripts {
_, err = connector.PostScript(script)
if err != nil {
log.Error().Err(err).Send()
}
}
connector.PostScriptDone()
}
func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts {
watchScripts(false)
}

View File

@ -34,18 +34,19 @@ metadata:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-self-secrets-role
name: kubeshark-self-config-role
namespace: {{ .Release.Namespace }}
rules:
- apiGroups:
- "v1"
- ""
- v1
resourceNames:
- kubeshark-secret
- kubeshark-config-map
resources:
- secrets
- configmaps
verbs:
- get
- watch
- update
- patch

View File

@ -22,19 +22,19 @@ subjects:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubeshark-self-secrets-role-binding
labels:
{{- include "kubeshark.labels" . | nindent 4 }}
annotations:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: kubeshark-self-config-role-binding
namespace: {{ .Release.Namespace }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubeshark-self-config-role
subjects:
- kind: ServiceAccount
name: {{ include "kubeshark.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
roleRef:
kind: Role
name: kubeshark-self-secrets-role
apiGroup: rbac.authorization.k8s.io

View File

@ -1,8 +1,7 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kubeshark.fullname" . }}-hub
namespace: {{ .Release.Namespace }}
labels:
app.kubeshark.co/app: hub
{{- include "kubeshark.labels" . | nindent 4 }}
@ -10,16 +9,19 @@ metadata:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: {{ include "kubeshark.fullname" . }}-hub
namespace: {{ .Release.Namespace }}
spec:
replicas: 1 # Set the desired number of replicas
selector:
matchLabels:
app.kubeshark.co/app: hub
{{- include "kubeshark.labels" . | nindent 6 }}
template:
metadata:
labels:
app.kubeshark.co/app: hub
sidecar.istio.io/inject: "false"
{{- include "kubeshark.labels" . | nindent 8 }}
spec:
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
@ -28,6 +30,15 @@ spec:
command:
- ./hub
{{ .Values.tap.debug | ternary "- -debug" "" }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
envFrom:
- configMapRef:
name: kubeshark-config-map

View File

@ -1,8 +1,6 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kubeshark.fullname" . }}-front
namespace: {{ .Release.Namespace }}
labels:
app.kubeshark.co/app: front
{{- include "kubeshark.labels" . | nindent 4 }}
@ -10,15 +8,19 @@ metadata:
{{- if .Values.tap.annotations }}
{{- toYaml .Values.tap.annotations | nindent 4 }}
{{- end }}
name: {{ include "kubeshark.fullname" . }}-front
namespace: {{ .Release.Namespace }}
spec:
replicas: 1 # Set the desired number of replicas
selector:
matchLabels:
app.kubeshark.co/app: front
{{- include "kubeshark.labels" . | nindent 6 }}
template:
metadata:
labels:
app.kubeshark.co/app: front
{{- include "kubeshark.labels" . | nindent 8 }}
spec:
containers:
- env:

View File

@ -33,13 +33,21 @@ spec:
- -port
- '{{ .Values.tap.proxy.worker.srvport }}'
- -servicemesh
{{ .Values.tap.tls | ternary "- -tls" "" }}
- -procfs
- /hostproc
{{ .Values.tap.debug | ternary "- -debug" "" }}
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}'
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
name: kubeshark-worker-daemon-set
name: sniffer
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
envFrom:
- secretRef:
name: kubeshark-secret
@ -67,7 +75,6 @@ spec:
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
- SYS_MODULE
drop:
- ALL
@ -92,10 +99,60 @@ spec:
- mountPath: /sys
name: sys
readOnly: true
{{- if .Values.tap.persistentstorage }}
{{- if .Values.tap.persistentstorage }}
- mountPath: /app/data
name: kubeshark-persistent-volume
{{- end }}
{{- end }}
{{- if .Values.tap.tls }}
- command:
- ./tracer
- -procfs
- /hostproc
{{ .Values.tap.debug | ternary "- -debug" "" }}
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}'
imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }}
name: tracer
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
envFrom:
- secretRef:
name: kubeshark-secret
resources:
limits:
cpu: {{ .Values.tap.resources.worker.limits.cpu }}
memory: {{ .Values.tap.resources.worker.limits.memory }}
requests:
cpu: {{ .Values.tap.resources.worker.requests.cpu }}
memory: {{ .Values.tap.resources.worker.requests.memory }}
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
- SYS_MODULE
drop:
- ALL
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
{{- if .Values.tap.persistentstorage }}
- mountPath: /app/data
name: kubeshark-persistent-volume
{{- end }}
{{- end }}
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}

View File

@ -10,7 +10,7 @@ data:
POD_REGEX: '{{ .Values.tap.regex }}'
NAMESPACES: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}'
SCRIPTING_ENV: '{{ .Values.scripting.env | toJson }}'
SCRIPTING_SCRIPTS: '[]'
SCRIPTING_SCRIPTS: '{}'
AUTH_ENABLED: '{{ .Values.tap.auth.enabled | ternary "true" "" }}'
AUTH_APPROVED_EMAILS: '{{ gt (len .Values.tap.auth.approvedemails) 0 | ternary (join "," .Values.tap.auth.approvedemails) "" }}'
AUTH_APPROVED_DOMAINS: '{{ gt (len .Values.tap.auth.approveddomains) 0 | ternary (join "," .Values.tap.auth.approveddomains) "" }}'

View File

@ -90,39 +90,6 @@ func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) {
}
}
type postRegexRequest struct {
Regex string `json:"regex"`
Namespaces []string `json:"namespaces"`
}
func (connector *Connector) PostRegexToHub(regex string, namespaces []string) {
postRegexUrl := fmt.Sprintf("%s/pods/regex", connector.url)
payload := postRegexRequest{
Regex: regex,
Namespaces: namespaces,
}
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the pod regex:")
} else {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postRegexUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed sending the pod regex to Hub. Retrying...")
} else {
log.Debug().Str("regex", regex).Strs("namespaces", namespaces).Msg("Reported pod regex to Hub:")
return
}
time.Sleep(DefaultSleep)
}
}
}
type postLicenseRequest struct {
License string `json:"license"`
}
@ -154,53 +121,6 @@ func (connector *Connector) PostLicense(license string) {
}
}
func (connector *Connector) PostLicenseSingle(license string) {
postLicenseUrl := fmt.Sprintf("%s/license", connector.url)
payload := postLicenseRequest{
License: license,
}
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the payload:")
} else {
var resp *http.Response
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
log.Warn().Err(err).Msg("Failed sending the license to Hub.")
} else {
log.Debug().Str("license", license).Msg("Reported license to Hub:")
return
}
}
}
func (connector *Connector) PostEnv(env map[string]interface{}) {
if len(env) == 0 {
return
}
postEnvUrl := fmt.Sprintf("%s/scripts/env", connector.url)
if envMarshalled, err := json.Marshal(env); err != nil {
log.Error().Err(err).Msg("Failed to marshal the env:")
} else {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postEnvUrl, "application/json", bytes.NewBuffer(envMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed sending the scripting environment variables to Hub. Retrying...")
} else {
log.Debug().Interface("env", env).Msg("Reported scripting environment variables to Hub:")
return
}
time.Sleep(DefaultSleep)
}
}
}
func (connector *Connector) PostScript(script *misc.Script) (index int64, err error) {
postScriptUrl := fmt.Sprintf("%s/scripts", connector.url)
@ -323,26 +243,6 @@ func (connector *Connector) DeleteScript(index int64) (err error) {
return
}
func (connector *Connector) PostScriptDone() {
postScripDonetUrl := fmt.Sprintf("%s/scripts/done", connector.url)
ok := false
var err error
for !ok {
var resp *http.Response
if resp, err = utils.Post(postScripDonetUrl, "application/json", nil, connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed sending the POST scripts done to Hub. Retrying...")
} else {
log.Debug().Msg("Reported POST scripts done to Hub.")
return
}
time.Sleep(DefaultSleep)
}
}
func (connector *Connector) PostPcapsMerge(out *os.File) {
postEnvUrl := fmt.Sprintf("%s/pcaps/merge", connector.url)

26
kubernetes/config.go Normal file
View File

@ -0,0 +1,26 @@
package kubernetes
import (
"context"
"github.com/kubeshark/kubeshark/config"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
SUFFIX_SECRET = "secret"
)
func SetSecret(provider *Provider, key string, value string) (err error) {
var secret *v1.Secret
secret, err = provider.clientSet.CoreV1().Secrets(config.Config.Tap.Release.Namespace).Get(context.TODO(), SelfResourcesPrefix+SUFFIX_SECRET, metav1.GetOptions{})
if err != nil {
return
}
secret.StringData[key] = value
_, err = provider.clientSet.CoreV1().Secrets(config.Config.Tap.Release.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{})
return
}

View File

@ -94,7 +94,7 @@ data:
POD_REGEX: '.*'
NAMESPACES: ''
SCRIPTING_ENV: '{}'
SCRIPTING_SCRIPTS: '[]'
SCRIPTING_SCRIPTS: '{}'
AUTH_ENABLED: ''
AUTH_APPROVED_EMAILS: ''
AUTH_APPROVED_DOMAINS: ''
@ -161,27 +161,27 @@ metadata:
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-secrets-role
name: kubeshark-self-config-role
namespace: default
rules:
- apiGroups:
- "v1"
- ""
- v1
resourceNames:
- kubeshark-secret
- kubeshark-config-map
resources:
- secrets
- configmaps
verbs:
- get
- watch
- update
- patch
---
# Source: kubeshark/templates/03-cluster-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubeshark-self-secrets-role-binding
labels:
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
@ -189,15 +189,16 @@ metadata:
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-self-config-role-binding
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubeshark-self-config-role
subjects:
- kind: ServiceAccount
name: kubeshark-service-account
namespace: default
roleRef:
kind: Role
name: kubeshark-self-secrets-role
apiGroup: rbac.authorization.k8s.io
---
# Source: kubeshark/templates/05-hub-service.yaml
apiVersion: v1
@ -288,13 +289,21 @@ spec:
- -port
- '8897'
- -servicemesh
- -tls
- -procfs
- /hostproc
image: 'docker.io/kubeshark/worker:latest'
imagePullPolicy: Always
name: kubeshark-worker-daemon-set
name: sniffer
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
envFrom:
- secretRef:
name: kubeshark-secret
@ -313,7 +322,6 @@ spec:
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
- SYS_MODULE
drop:
- ALL
@ -338,6 +346,50 @@ spec:
- mountPath: /sys
name: sys
readOnly: true
- command:
- ./tracer
- -procfs
- /hostproc
image: 'docker.io/kubeshark/worker:latest'
imagePullPolicy: Always
name: tracer
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
envFrom:
- secretRef:
name: kubeshark-secret
resources:
limits:
cpu: 750m
memory: 1Gi
requests:
cpu: 50m
memory: 50Mi
securityContext:
capabilities:
add:
- SYS_ADMIN
- SYS_PTRACE
- DAC_OVERRIDE
- SYS_RESOURCE
- SYS_MODULE
drop:
- ALL
volumeMounts:
- mountPath: /hostproc
name: proc
readOnly: true
- mountPath: /sys
name: sys
readOnly: true
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
@ -368,8 +420,6 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubeshark-hub
namespace: default
labels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-50.4
@ -378,16 +428,27 @@ metadata:
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-hub
namespace: default
spec:
replicas: 1 # Set the desired number of replicas
selector:
matchLabels:
app.kubeshark.co/app: hub
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
template:
metadata:
labels:
app.kubeshark.co/app: hub
sidecar.istio.io/inject: "false"
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
spec:
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: kubeshark-service-account
@ -396,6 +457,15 @@ spec:
command:
- ./hub
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
envFrom:
- configMapRef:
name: kubeshark-config-map
@ -429,8 +499,6 @@ spec:
apiVersion: apps/v1
kind: Deployment
metadata:
name: kubeshark-front
namespace: default
labels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-50.4
@ -439,15 +507,27 @@ metadata:
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
annotations:
name: kubeshark-front
namespace: default
spec:
replicas: 1 # Set the desired number of replicas
selector:
matchLabels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
template:
metadata:
labels:
app.kubeshark.co/app: front
helm.sh/chart: kubeshark-50.4
app.kubernetes.io/name: kubeshark
app.kubernetes.io/instance: kubeshark
app.kubernetes.io/version: "50.4"
app.kubernetes.io/managed-by: Helm
spec:
containers:
- env: