diff --git a/cmd/pro.go b/cmd/pro.go index e996c669f..9666e0924 100644 --- a/cmd/pro.go +++ b/cmd/pro.go @@ -69,7 +69,16 @@ func updateLicense(licenseKey string) { log.Error().Err(err).Send() } - connector.PostLicenseSingle(config.Config.License) + kubernetesProvider, err := getKubernetesProviderForCli(false, false) + if err != nil { + log.Error().Err(err).Send() + return + } + err = kubernetes.SetSecret(kubernetesProvider, "LICENSE", config.Config.License) + if err != nil { + log.Error().Err(err).Send() + return + } log.Info().Msg("Updated the license. Exiting.") diff --git a/cmd/tapRunner.go b/cmd/tapRunner.go index 037c1f250..da835a581 100644 --- a/cmd/tapRunner.go +++ b/cmd/tapRunner.go @@ -69,6 +69,7 @@ func tap() { kubernetesProvider, err := getKubernetesProviderForCli(false, false) if err != nil { + log.Error().Err(err).Send() return } @@ -199,7 +200,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c ready.Lock() ready.Hub = true ready.Unlock() - postHubStarted(ctx, kubernetesProvider, cancel, false) + postHubStarted(ctx, kubernetesProvider, cancel) } ready.Lock() @@ -405,35 +406,7 @@ func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider } } -func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, update bool) { - - if update { - // Pod regex - connector.PostRegexToHub(config.Config.Tap.PodRegexStr, state.targetNamespaces) - - // License - if config.Config.License != "" { - connector.PostLicense(config.Config.License) - } - - // Scripting - connector.PostEnv(config.Config.Scripting.Env) - - scripts, err := config.Config.Scripting.GetScripts() - if err != nil { - log.Error().Err(err).Send() - } - - for _, script := range scripts { - _, err = connector.PostScript(script) - if err != nil { - log.Error().Err(err).Send() - } - } - - connector.PostScriptDone() - } - +func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) { if config.Config.Scripting.Source != "" && config.Config.Scripting.WatchScripts { watchScripts(false) } diff --git a/helm-chart/templates/02-cluster-role.yaml b/helm-chart/templates/02-cluster-role.yaml index 58be9fcc1..e99dbbae7 100644 --- a/helm-chart/templates/02-cluster-role.yaml +++ b/helm-chart/templates/02-cluster-role.yaml @@ -34,18 +34,19 @@ metadata: {{- if .Values.tap.annotations }} {{- toYaml .Values.tap.annotations | nindent 4 }} {{- end }} - name: kubeshark-self-secrets-role + name: kubeshark-self-config-role namespace: {{ .Release.Namespace }} rules: - apiGroups: - - "v1" - "" + - v1 resourceNames: - kubeshark-secret + - kubeshark-config-map resources: - secrets + - configmaps verbs: - get - watch - update - - patch diff --git a/helm-chart/templates/03-cluster-role-binding.yaml b/helm-chart/templates/03-cluster-role-binding.yaml index 2a7f065db..b20a809b4 100644 --- a/helm-chart/templates/03-cluster-role-binding.yaml +++ b/helm-chart/templates/03-cluster-role-binding.yaml @@ -22,19 +22,19 @@ subjects: apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: kubeshark-self-secrets-role-binding labels: {{- include "kubeshark.labels" . | nindent 4 }} annotations: {{- if .Values.tap.annotations }} {{- toYaml .Values.tap.annotations | nindent 4 }} {{- end }} + name: kubeshark-self-config-role-binding namespace: {{ .Release.Namespace }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubeshark-self-config-role subjects: - kind: ServiceAccount name: {{ include "kubeshark.serviceAccountName" . }} namespace: {{ .Release.Namespace }} -roleRef: - kind: Role - name: kubeshark-self-secrets-role - apiGroup: rbac.authorization.k8s.io diff --git a/helm-chart/templates/04-hub-deployment.yaml b/helm-chart/templates/04-hub-deployment.yaml index b6c12d299..cf6ff5162 100644 --- a/helm-chart/templates/04-hub-deployment.yaml +++ b/helm-chart/templates/04-hub-deployment.yaml @@ -1,8 +1,7 @@ +--- apiVersion: apps/v1 kind: Deployment metadata: - name: {{ include "kubeshark.fullname" . }}-hub - namespace: {{ .Release.Namespace }} labels: app.kubeshark.co/app: hub {{- include "kubeshark.labels" . | nindent 4 }} @@ -10,16 +9,19 @@ metadata: {{- if .Values.tap.annotations }} {{- toYaml .Values.tap.annotations | nindent 4 }} {{- end }} + name: {{ include "kubeshark.fullname" . }}-hub + namespace: {{ .Release.Namespace }} spec: replicas: 1 # Set the desired number of replicas selector: matchLabels: app.kubeshark.co/app: hub + {{- include "kubeshark.labels" . | nindent 6 }} template: metadata: labels: app.kubeshark.co/app: hub - sidecar.istio.io/inject: "false" + {{- include "kubeshark.labels" . | nindent 8 }} spec: dnsPolicy: ClusterFirstWithHostNet serviceAccountName: {{ include "kubeshark.serviceAccountName" . }} @@ -28,6 +30,15 @@ spec: command: - ./hub {{ .Values.tap.debug | ternary "- -debug" "" }} + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace envFrom: - configMapRef: name: kubeshark-config-map diff --git a/helm-chart/templates/06-front-deployment.yaml b/helm-chart/templates/06-front-deployment.yaml index 5998da290..dc885de4f 100644 --- a/helm-chart/templates/06-front-deployment.yaml +++ b/helm-chart/templates/06-front-deployment.yaml @@ -1,8 +1,6 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: {{ include "kubeshark.fullname" . }}-front - namespace: {{ .Release.Namespace }} labels: app.kubeshark.co/app: front {{- include "kubeshark.labels" . | nindent 4 }} @@ -10,15 +8,19 @@ metadata: {{- if .Values.tap.annotations }} {{- toYaml .Values.tap.annotations | nindent 4 }} {{- end }} + name: {{ include "kubeshark.fullname" . }}-front + namespace: {{ .Release.Namespace }} spec: replicas: 1 # Set the desired number of replicas selector: matchLabels: app.kubeshark.co/app: front + {{- include "kubeshark.labels" . | nindent 6 }} template: metadata: labels: app.kubeshark.co/app: front + {{- include "kubeshark.labels" . | nindent 8 }} spec: containers: - env: diff --git a/helm-chart/templates/09-worker-daemon-set.yaml b/helm-chart/templates/09-worker-daemon-set.yaml index 0901127ad..e52f3bce9 100644 --- a/helm-chart/templates/09-worker-daemon-set.yaml +++ b/helm-chart/templates/09-worker-daemon-set.yaml @@ -33,13 +33,21 @@ spec: - -port - '{{ .Values.tap.proxy.worker.srvport }}' - -servicemesh - {{ .Values.tap.tls | ternary "- -tls" "" }} - -procfs - /hostproc {{ .Values.tap.debug | ternary "- -debug" "" }} image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}' imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }} - name: kubeshark-worker-daemon-set + name: sniffer + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace envFrom: - secretRef: name: kubeshark-secret @@ -67,7 +75,6 @@ spec: - SYS_ADMIN - SYS_PTRACE - DAC_OVERRIDE - - SYS_RESOURCE - SYS_MODULE drop: - ALL @@ -92,10 +99,60 @@ spec: - mountPath: /sys name: sys readOnly: true -{{- if .Values.tap.persistentstorage }} + {{- if .Values.tap.persistentstorage }} - mountPath: /app/data name: kubeshark-persistent-volume -{{- end }} + {{- end }} + {{- if .Values.tap.tls }} + - command: + - ./tracer + - -procfs + - /hostproc + {{ .Values.tap.debug | ternary "- -debug" "" }} + image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}' + imagePullPolicy: {{ .Values.tap.docker.imagepullpolicy }} + name: tracer + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + envFrom: + - secretRef: + name: kubeshark-secret + resources: + limits: + cpu: {{ .Values.tap.resources.worker.limits.cpu }} + memory: {{ .Values.tap.resources.worker.limits.memory }} + requests: + cpu: {{ .Values.tap.resources.worker.requests.cpu }} + memory: {{ .Values.tap.resources.worker.requests.memory }} + securityContext: + capabilities: + add: + - SYS_ADMIN + - SYS_PTRACE + - DAC_OVERRIDE + - SYS_RESOURCE + - SYS_MODULE + drop: + - ALL + volumeMounts: + - mountPath: /hostproc + name: proc + readOnly: true + - mountPath: /sys + name: sys + readOnly: true + {{- if .Values.tap.persistentstorage }} + - mountPath: /app/data + name: kubeshark-persistent-volume + {{- end }} + {{- end }} dnsPolicy: ClusterFirstWithHostNet hostNetwork: true serviceAccountName: {{ include "kubeshark.serviceAccountName" . }} diff --git a/helm-chart/templates/12-config-map.yaml b/helm-chart/templates/12-config-map.yaml index 14b56b013..ae3187447 100644 --- a/helm-chart/templates/12-config-map.yaml +++ b/helm-chart/templates/12-config-map.yaml @@ -10,7 +10,7 @@ data: POD_REGEX: '{{ .Values.tap.regex }}' NAMESPACES: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}' SCRIPTING_ENV: '{{ .Values.scripting.env | toJson }}' - SCRIPTING_SCRIPTS: '[]' + SCRIPTING_SCRIPTS: '{}' AUTH_ENABLED: '{{ .Values.tap.auth.enabled | ternary "true" "" }}' AUTH_APPROVED_EMAILS: '{{ gt (len .Values.tap.auth.approvedemails) 0 | ternary (join "," .Values.tap.auth.approvedemails) "" }}' AUTH_APPROVED_DOMAINS: '{{ gt (len .Values.tap.auth.approveddomains) 0 | ternary (join "," .Values.tap.auth.approveddomains) "" }}' diff --git a/internal/connect/hub.go b/internal/connect/hub.go index 40c1d6641..02af3fad3 100644 --- a/internal/connect/hub.go +++ b/internal/connect/hub.go @@ -90,39 +90,6 @@ func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) { } } -type postRegexRequest struct { - Regex string `json:"regex"` - Namespaces []string `json:"namespaces"` -} - -func (connector *Connector) PostRegexToHub(regex string, namespaces []string) { - postRegexUrl := fmt.Sprintf("%s/pods/regex", connector.url) - - payload := postRegexRequest{ - Regex: regex, - Namespaces: namespaces, - } - - if payloadMarshalled, err := json.Marshal(payload); err != nil { - log.Error().Err(err).Msg("Failed to marshal the pod regex:") - } else { - ok := false - for !ok { - var resp *http.Response - if resp, err = utils.Post(postRegexUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK { - if _, ok := err.(*url.Error); ok { - break - } - log.Warn().Err(err).Msg("Failed sending the pod regex to Hub. Retrying...") - } else { - log.Debug().Str("regex", regex).Strs("namespaces", namespaces).Msg("Reported pod regex to Hub:") - return - } - time.Sleep(DefaultSleep) - } - } -} - type postLicenseRequest struct { License string `json:"license"` } @@ -154,53 +121,6 @@ func (connector *Connector) PostLicense(license string) { } } -func (connector *Connector) PostLicenseSingle(license string) { - postLicenseUrl := fmt.Sprintf("%s/license", connector.url) - - payload := postLicenseRequest{ - License: license, - } - - if payloadMarshalled, err := json.Marshal(payload); err != nil { - log.Error().Err(err).Msg("Failed to marshal the payload:") - } else { - var resp *http.Response - if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK { - log.Warn().Err(err).Msg("Failed sending the license to Hub.") - } else { - log.Debug().Str("license", license).Msg("Reported license to Hub:") - return - } - } -} - -func (connector *Connector) PostEnv(env map[string]interface{}) { - if len(env) == 0 { - return - } - - postEnvUrl := fmt.Sprintf("%s/scripts/env", connector.url) - - if envMarshalled, err := json.Marshal(env); err != nil { - log.Error().Err(err).Msg("Failed to marshal the env:") - } else { - ok := false - for !ok { - var resp *http.Response - if resp, err = utils.Post(postEnvUrl, "application/json", bytes.NewBuffer(envMarshalled), connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK { - if _, ok := err.(*url.Error); ok { - break - } - log.Warn().Err(err).Msg("Failed sending the scripting environment variables to Hub. Retrying...") - } else { - log.Debug().Interface("env", env).Msg("Reported scripting environment variables to Hub:") - return - } - time.Sleep(DefaultSleep) - } - } -} - func (connector *Connector) PostScript(script *misc.Script) (index int64, err error) { postScriptUrl := fmt.Sprintf("%s/scripts", connector.url) @@ -323,26 +243,6 @@ func (connector *Connector) DeleteScript(index int64) (err error) { return } -func (connector *Connector) PostScriptDone() { - postScripDonetUrl := fmt.Sprintf("%s/scripts/done", connector.url) - - ok := false - var err error - for !ok { - var resp *http.Response - if resp, err = utils.Post(postScripDonetUrl, "application/json", nil, connector.client, config.Config.License); err != nil || resp.StatusCode != http.StatusOK { - if _, ok := err.(*url.Error); ok { - break - } - log.Warn().Err(err).Msg("Failed sending the POST scripts done to Hub. Retrying...") - } else { - log.Debug().Msg("Reported POST scripts done to Hub.") - return - } - time.Sleep(DefaultSleep) - } -} - func (connector *Connector) PostPcapsMerge(out *os.File) { postEnvUrl := fmt.Sprintf("%s/pcaps/merge", connector.url) diff --git a/kubernetes/config.go b/kubernetes/config.go new file mode 100644 index 000000000..f8d662172 --- /dev/null +++ b/kubernetes/config.go @@ -0,0 +1,26 @@ +package kubernetes + +import ( + "context" + + "github.com/kubeshark/kubeshark/config" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + SUFFIX_SECRET = "secret" +) + +func SetSecret(provider *Provider, key string, value string) (err error) { + var secret *v1.Secret + secret, err = provider.clientSet.CoreV1().Secrets(config.Config.Tap.Release.Namespace).Get(context.TODO(), SelfResourcesPrefix+SUFFIX_SECRET, metav1.GetOptions{}) + if err != nil { + return + } + + secret.StringData[key] = value + + _, err = provider.clientSet.CoreV1().Secrets(config.Config.Tap.Release.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}) + return +} diff --git a/manifests/complete.yaml b/manifests/complete.yaml index 0eaf2ae26..aebf5a4df 100644 --- a/manifests/complete.yaml +++ b/manifests/complete.yaml @@ -94,7 +94,7 @@ data: POD_REGEX: '.*' NAMESPACES: '' SCRIPTING_ENV: '{}' - SCRIPTING_SCRIPTS: '[]' + SCRIPTING_SCRIPTS: '{}' AUTH_ENABLED: '' AUTH_APPROVED_EMAILS: '' AUTH_APPROVED_DOMAINS: '' @@ -161,27 +161,27 @@ metadata: app.kubernetes.io/version: "50.4" app.kubernetes.io/managed-by: Helm annotations: - name: kubeshark-self-secrets-role + name: kubeshark-self-config-role namespace: default rules: - apiGroups: - - "v1" - "" + - v1 resourceNames: - kubeshark-secret + - kubeshark-config-map resources: - secrets + - configmaps verbs: - get - watch - update - - patch --- # Source: kubeshark/templates/03-cluster-role-binding.yaml apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: - name: kubeshark-self-secrets-role-binding labels: helm.sh/chart: kubeshark-50.4 app.kubernetes.io/name: kubeshark @@ -189,15 +189,16 @@ metadata: app.kubernetes.io/version: "50.4" app.kubernetes.io/managed-by: Helm annotations: + name: kubeshark-self-config-role-binding namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubeshark-self-config-role subjects: - kind: ServiceAccount name: kubeshark-service-account namespace: default -roleRef: - kind: Role - name: kubeshark-self-secrets-role - apiGroup: rbac.authorization.k8s.io --- # Source: kubeshark/templates/05-hub-service.yaml apiVersion: v1 @@ -288,13 +289,21 @@ spec: - -port - '8897' - -servicemesh - - -tls - -procfs - /hostproc image: 'docker.io/kubeshark/worker:latest' imagePullPolicy: Always - name: kubeshark-worker-daemon-set + name: sniffer + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace envFrom: - secretRef: name: kubeshark-secret @@ -313,7 +322,6 @@ spec: - SYS_ADMIN - SYS_PTRACE - DAC_OVERRIDE - - SYS_RESOURCE - SYS_MODULE drop: - ALL @@ -338,6 +346,50 @@ spec: - mountPath: /sys name: sys readOnly: true + - command: + - ./tracer + - -procfs + - /hostproc + + image: 'docker.io/kubeshark/worker:latest' + imagePullPolicy: Always + name: tracer + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + envFrom: + - secretRef: + name: kubeshark-secret + resources: + limits: + cpu: 750m + memory: 1Gi + requests: + cpu: 50m + memory: 50Mi + securityContext: + capabilities: + add: + - SYS_ADMIN + - SYS_PTRACE + - DAC_OVERRIDE + - SYS_RESOURCE + - SYS_MODULE + drop: + - ALL + volumeMounts: + - mountPath: /hostproc + name: proc + readOnly: true + - mountPath: /sys + name: sys + readOnly: true dnsPolicy: ClusterFirstWithHostNet hostNetwork: true serviceAccountName: kubeshark-service-account @@ -368,8 +420,6 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - name: kubeshark-hub - namespace: default labels: app.kubeshark.co/app: hub helm.sh/chart: kubeshark-50.4 @@ -378,16 +428,27 @@ metadata: app.kubernetes.io/version: "50.4" app.kubernetes.io/managed-by: Helm annotations: + name: kubeshark-hub + namespace: default spec: replicas: 1 # Set the desired number of replicas selector: matchLabels: app.kubeshark.co/app: hub + helm.sh/chart: kubeshark-50.4 + app.kubernetes.io/name: kubeshark + app.kubernetes.io/instance: kubeshark + app.kubernetes.io/version: "50.4" + app.kubernetes.io/managed-by: Helm template: metadata: labels: app.kubeshark.co/app: hub - sidecar.istio.io/inject: "false" + helm.sh/chart: kubeshark-50.4 + app.kubernetes.io/name: kubeshark + app.kubernetes.io/instance: kubeshark + app.kubernetes.io/version: "50.4" + app.kubernetes.io/managed-by: Helm spec: dnsPolicy: ClusterFirstWithHostNet serviceAccountName: kubeshark-service-account @@ -396,6 +457,15 @@ spec: command: - ./hub + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace envFrom: - configMapRef: name: kubeshark-config-map @@ -429,8 +499,6 @@ spec: apiVersion: apps/v1 kind: Deployment metadata: - name: kubeshark-front - namespace: default labels: app.kubeshark.co/app: front helm.sh/chart: kubeshark-50.4 @@ -439,15 +507,27 @@ metadata: app.kubernetes.io/version: "50.4" app.kubernetes.io/managed-by: Helm annotations: + name: kubeshark-front + namespace: default spec: replicas: 1 # Set the desired number of replicas selector: matchLabels: app.kubeshark.co/app: front + helm.sh/chart: kubeshark-50.4 + app.kubernetes.io/name: kubeshark + app.kubernetes.io/instance: kubeshark + app.kubernetes.io/version: "50.4" + app.kubernetes.io/managed-by: Helm template: metadata: labels: app.kubeshark.co/app: front + helm.sh/chart: kubeshark-50.4 + app.kubernetes.io/name: kubeshark + app.kubernetes.io/instance: kubeshark + app.kubernetes.io/version: "50.4" + app.kubernetes.io/managed-by: Helm spec: containers: - env: