Compare commits

...

31 Commits
39.6 ... 40.2

Author SHA1 Message Date
M. Mert Yildiran
139336d4ee Template hostPort(s) in the Helm chart 2023-05-10 14:38:38 +03:00
M. Mert Yildiran
f68fed0de8 🐛 Fix the effect of proxy config port changes 2023-05-10 01:28:43 +03:00
M. Mert Yildiran
1d7d242e6c Generate the missing new line in 08-persistent-volume-claim.yaml 2023-05-09 00:00:07 +03:00
M. Mert Yildiran
aa904e23c7 Add --persistentstorage option to tap command 2023-05-08 23:57:22 +03:00
M. Mert Yildiran
baf0e65337 Template the Helm chart based on persistentstorage value 2023-05-08 23:52:14 +03:00
M. Mert Yildiran
a33a3467fc Add persistentstorage option 2023-05-08 00:50:56 +03:00
M. Mert Yildiran
a9b598bc41 ⬆️ Bump the Helm chart version 2023-05-04 21:45:55 +03:00
M. Mert Yildiran
0aee367ad5 Omit the license string in helm-chart and manifests commands 2023-05-04 21:37:55 +03:00
M. Mert Yildiran
8c7d9ea8fd Fix the updateLicense method 2023-05-04 21:33:38 +03:00
M. Mert Yildiran
fab0f713ed 🐛 Pass the license string 2023-05-04 21:18:34 +03:00
M. Mert Yildiran
2563cc1922 🐛 Fix the imagePullPolicy to imagepullpolicy in helm-chart command 2023-04-24 02:03:58 +03:00
Jay R. Wren
26c9f42eba 📚 Remove kubeshark tap -A example from README.md (#1339) 2023-04-21 18:35:24 -07:00
M. Mert Yildiran
00dd3a93df Update the Helm chart version 2023-04-20 21:20:50 +03:00
M. Mert Yildiran
d02293ab55 Add a workflow for publishing Helm chart 2023-04-20 21:20:07 +03:00
M. Mert Yildiran
60cfa92efb Apply the same Kubernetes tolerations to all pods 2023-04-20 20:27:22 +03:00
M. Mert Yildiran
01b187aaa3 Update a log message 2023-04-20 20:22:33 +03:00
M. Mert Yildiran
38d121556c Add storageclass option to config.yaml 2023-04-20 20:20:24 +03:00
M. Mert Yildiran
2d73b46b44 Change the Docker Hub repository in the badges to kubeshark/worker 2023-04-20 20:10:52 +03:00
M. Mert Yildiran
466b9099bd Ignore the Kubernetes version check in certain commands while creating the Kubernetes provider 2023-04-20 20:09:19 +03:00
M. Mert Yildiran
bbe3338c3c Rename 08-persistent-volume.yaml to 08-persistent-volume-claim.yaml 2023-04-20 20:04:47 +03:00
M. Mert Yildiran
2780791068 Clean YAML files before generating the new ones in manifests and helm-chart command 2023-04-20 04:00:37 +03:00
M. Mert Yildiran
e65656c1df 🔥 Delete permissionFiles folder 2023-04-20 03:52:15 +03:00
M. Mert Yildiran
df7d1ac10c Give the permission of listing or watching the persistentvolumeclaims to the ClusterRole 2023-04-20 03:01:25 +03:00
M. Mert Yildiran
c342885cae Set the default storage limit to 200Mi 2023-04-20 02:48:18 +03:00
M. Mert Yildiran
44adb397c1 🔥 Remove the old DaemonSet manifests 2023-04-20 00:26:01 +03:00
M. Mert Yildiran
657ea8570c Add PersistentVolumeClaim and mount it to worker DaemonSet 2023-04-20 00:09:22 +03:00
M. Mert Yildiran
686dd5fba1 🔥 Remove the -A flag and allnamespaces field from config.yaml 2023-04-19 20:52:28 +03:00
M. Mert Yildiran
90e6e99386 Run the manifests --dump and helm-chart commands 2023-04-19 20:30:11 +03:00
M. Mert Yildiran
aa9109df12 Update helm-chart command 2023-04-19 20:29:17 +03:00
M. Mert Yildiran
9a37781355 🔥 Remove the grace period 2023-04-19 05:18:59 +03:00
M. Mert Yildiran
5ce10b626f Pass every config through environment variables and don't make HTTP calls in first tap command 2023-04-18 03:21:23 +03:00
48 changed files with 643 additions and 520 deletions

35
.github/workflows/helm.yml vendored Normal file
View File

@@ -0,0 +1,35 @@
on:
push:
tags:
- '*'
name: Release Helm Charts
jobs:
release:
# depending on default permission settings for your org (contents being read-only or read-write for workloads), you will have to add permissions
# see: https://docs.github.com/en/actions/security-guides/automatic-token-authentication#modifying-the-permissions-for-the-github_token
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Configure Git
run: |
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Install Helm
uses: azure/setup-helm@v3
- name: Run chart-releaser
uses: helm/chart-releaser-action@v1.5.0
with:
charts_dir: .
charts_repo_url: https://kubeshark.github.io/kubeshark
env:
CR_TOKEN: "${{ secrets.HELM_TOKEN }}"

View File

@@ -9,10 +9,10 @@
<a href="https://github.com/kubeshark/kubeshark/releases/latest">
<img alt="GitHub Latest Release" src="https://img.shields.io/github/v/release/kubeshark/kubeshark?logo=GitHub&style=flat-square">
</a>
<a href="https://hub.docker.com/r/kubeshark/kubeshark">
<a href="https://hub.docker.com/r/kubeshark/worker">
<img alt="Docker pulls" src="https://img.shields.io/docker/pulls/kubeshark/kubeshark?color=%23099cec&logo=Docker&style=flat-square">
</a>
<a href="https://hub.docker.com/r/kubeshark/kubeshark">
<a href="https://hub.docker.com/r/kubeshark/worker">
<img alt="Image size" src="https://img.shields.io/docker/image-size/kubeshark/kubeshark/latest?logo=Docker&style=flat-square">
</a>
<a href="https://discord.gg/WkvRGMUcx7">
@@ -45,10 +45,6 @@ Download **Kubeshark**'s binary distribution [latest release](https://github.com
kubeshark tap
```
```shell
kubeshark tap -A
```
```shell
kubeshark tap -n sock-shop "(catalo*|front-end*)"
```

View File

@@ -2,48 +2,16 @@ package check
import (
"context"
"embed"
"fmt"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/rs/zerolog/log"
rbac "k8s.io/api/rbac/v1"
"k8s.io/client-go/kubernetes/scheme"
)
func KubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesProvider *kubernetes.Provider) bool {
func KubernetesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "kubernetes-permissions").Msg("Checking:")
var filePath string
if config.Config.IsNsRestrictedMode() {
filePath = "permissionFiles/permissions-ns-tap.yaml"
} else {
filePath = "permissionFiles/permissions-all-namespaces-tap.yaml"
}
data, err := embedFS.ReadFile(filePath)
if err != nil {
log.Error().Err(err).Msg("While checking Kubernetes permissions!")
return false
}
decode := scheme.Codecs.UniversalDeserializer().Decode
obj, _, err := decode(data, nil, nil)
if err != nil {
log.Error().Err(err).Msg("While checking Kubernetes permissions!")
return false
}
switch resource := obj.(type) {
case *rbac.Role:
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, config.Config.Tap.SelfNamespace)
case *rbac.ClusterRole:
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, "")
}
log.Error().Msg("While checking Kubernetes permissions! Resource of types 'Role' or 'ClusterRole' are not found in permission files.")
return false
return checkRulesPermissions(ctx, kubernetesProvider, kubernetesProvider.BuildClusterRole().Rules, "")
}
func checkRulesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider, rules []rbac.PolicyRule, namespace string) bool {

View File

@@ -12,14 +12,14 @@ func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
var connectedToHub, connectedToFront bool
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), "/echo", kubernetesProvider); err != nil {
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), "/echo", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Hub using proxy!")
} else {
connectedToHub = true
log.Info().Msg("Connected successfully to Hub using proxy.")
}
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort), "", kubernetesProvider); err != nil {
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port), "", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Front using proxy!")
} else {
connectedToFront = true

View File

@@ -2,7 +2,6 @@ package cmd
import (
"context"
"embed"
"fmt"
"os"
@@ -12,11 +11,6 @@ import (
"github.com/rs/zerolog/log"
)
var (
//go:embed permissionFiles
embedFS embed.FS
)
func runCheck() {
log.Info().Msg(fmt.Sprintf("Checking the %s resources...", misc.Software))
@@ -30,7 +24,7 @@ func runCheck() {
}
if checkPassed {
checkPassed = check.KubernetesPermissions(ctx, embedFS, kubernetesProvider)
checkPassed = check.KubernetesPermissions(ctx, kubernetesProvider)
}
if checkPassed {

View File

@@ -5,7 +5,7 @@ import (
)
func performCleanCommand() {
kubernetesProvider, err := getKubernetesProviderForCli(false)
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return
}

View File

@@ -58,7 +58,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
}
}
func getKubernetesProviderForCli(silent bool) (*kubernetes.Provider, error) {
func getKubernetesProviderForCli(silent bool, dontCheckVersion bool) (*kubernetes.Provider, error) {
kubeConfigPath := config.Config.KubeConfigPath()
kubernetesProvider, err := kubernetes.NewProvider(kubeConfigPath, config.Config.Kube.Context)
if err != nil {
@@ -75,15 +75,17 @@ func getKubernetesProviderForCli(silent bool) (*kubernetes.Provider, error) {
return nil, err
}
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
handleKubernetesProviderError(err)
return nil, err
}
if !dontCheckVersion {
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
handleKubernetesProviderError(err)
return nil, err
}
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
handleKubernetesProviderError(err)
return nil, err
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
handleKubernetesProviderError(err)
return nil, err
}
}
return kubernetesProvider, nil

View File

@@ -36,12 +36,12 @@ func init() {
log.Debug().Err(err).Send()
}
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
consoleCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
consoleCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
func runConsole() {
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
@@ -51,10 +51,10 @@ func runConsole() {
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.SrcPort).Msg("Connecting to:")
log.Info().Str("host", config.Config.Tap.Proxy.Host).Uint16("port", config.Config.Tap.Proxy.Hub.Port).Msg("Connecting to:")
u := url.URL{
Scheme: "ws",
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.SrcPort),
Host: fmt.Sprintf("%s:%d", config.Config.Tap.Proxy.Host, config.Config.Tap.Proxy.Hub.Port),
Path: "/scripts/logs",
}

View File

@@ -6,10 +6,12 @@ import (
"os"
"path/filepath"
"sort"
"strings"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc"
"github.com/kubeshark/kubeshark/misc/fsUtils"
"github.com/kubeshark/kubeshark/utils"
"github.com/ohler55/ojg/jp"
"github.com/ohler55/ojg/oj"
@@ -128,19 +130,23 @@ var hubPodMappings = map[string]interface{}{
},
{
"name": "NAMESPACES",
"value": "{{ .Values.tap.allnamespaces | ternary \"\" .Values.tap.namespaces }}",
},
{
"name": "STORAGE_LIMIT",
"value": "{{ .Values.tap.storagelimit }}",
"value": "{{ gt (len .Values.tap.namespaces) 0 | ternary (join \",\" .Values.tap.namespaces) \"\" }}",
},
{
"name": "LICENSE",
"value": "{{ .Values.license }}",
},
{
"name": "SCRIPTING_ENV",
"value": "{}",
},
{
"name": "SCRIPTING_SCRIPTS",
"value": "[]",
},
},
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
"spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.hub.limits.cpu }}",
"spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.hub.limits.memory }}",
"spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.hub.requests.cpu }}",
@@ -151,18 +157,24 @@ var hubServiceMappings = serviceAccountMappings
var frontPodMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.containers[0].image": "{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
"spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
}
var frontServiceMappings = serviceAccountMappings
var persistentVolumeMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.resources.requests.storage": "{{ .Values.tap.storagelimit }}",
"spec.storageClassName": "{{ .Values.tap.storageclass }}",
}
var workerDaemonSetMappings = map[string]interface{}{
"metadata.namespace": "{{ .Values.tap.selfnamespace }}",
"spec.template.spec.containers[0].image": "{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}",
"spec.template.spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagePullPolicy }}",
"spec.template.spec.containers[0].imagePullPolicy": "{{ .Values.tap.docker.imagepullpolicy }}",
"spec.template.spec.containers[0].resources.limits.cpu": "{{ .Values.tap.resources.worker.limits.cpu }}",
"spec.template.spec.containers[0].resources.limits.memory": "{{ .Values.tap.resources.worker.limits.memory }}",
"spec.template.spec.containers[0].resources.requests.cpu": "{{ .Values.tap.resources.worker.requests.cpu }}",
"spec.template.spec.containers[0].resources.requests.memory": "{{ .Values.tap.resources.worker.requests.memory }}",
"spec.template.spec.containers[0].command[0]": "{{ .Values.tap.debug | ternary \"./worker -debug\" \"./worker\" }}",
"spec.template.spec.containers[0].command[4]": "{{ .Values.tap.proxy.worker.srvport }}",
"spec.template.spec.containers[0].command[6]": "{{ .Values.tap.packetcapture }}",
}
@@ -179,6 +191,7 @@ func runHelmChart() {
hubService,
frontPod,
frontService,
persistentVolume,
workerDaemonSet,
err := generateManifests()
if err != nil {
@@ -187,15 +200,16 @@ func runHelmChart() {
}
err = dumpHelmChart(map[string]interface{}{
"00-namespace.yaml": template(namespace, namespaceMappings),
"01-service-account.yaml": template(serviceAccount, serviceAccountMappings),
"02-cluster-role.yaml": template(clusterRole, clusterRoleMappings),
"03-cluster-role-binding.yaml": template(clusterRoleBinding, clusterRoleBindingMappings),
"04-hub-pod.yaml": template(hubPod, hubPodMappings),
"05-hub-service.yaml": template(hubService, hubServiceMappings),
"06-front-pod.yaml": template(frontPod, frontPodMappings),
"07-front-service.yaml": template(frontService, frontServiceMappings),
"08-worker-daemon-set.yaml": template(workerDaemonSet, workerDaemonSetMappings),
"00-namespace.yaml": template(namespace, namespaceMappings),
"01-service-account.yaml": template(serviceAccount, serviceAccountMappings),
"02-cluster-role.yaml": template(clusterRole, clusterRoleMappings),
"03-cluster-role-binding.yaml": template(clusterRoleBinding, clusterRoleBindingMappings),
"04-hub-pod.yaml": template(hubPod, hubPodMappings),
"05-hub-service.yaml": template(hubService, hubServiceMappings),
"06-front-pod.yaml": template(frontPod, frontPodMappings),
"07-front-service.yaml": template(frontService, frontServiceMappings),
"08-persistent-volume-claim.yaml": template(persistentVolume, persistentVolumeMappings),
"09-worker-daemon-set.yaml": template(workerDaemonSet, workerDaemonSetMappings),
})
if err != nil {
log.Error().Err(err).Send()
@@ -245,10 +259,76 @@ func template(object interface{}, mappings map[string]interface{}) (template int
return
}
func handleHubPod(manifest string) string {
lines := strings.Split(manifest, "\n")
for i, line := range lines {
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
lines[i] = " hostPort: {{ .Values.tap.proxy.hub.srvport }}"
}
}
return strings.Join(lines, "\n")
}
func handleFrontPod(manifest string) string {
lines := strings.Split(manifest, "\n")
for i, line := range lines {
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
lines[i] = " hostPort: {{ .Values.tap.proxy.front.srvport }}"
}
}
return strings.Join(lines, "\n")
}
func handlePVCManifest(manifest string) string {
return fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s{{- end }}\n", manifest)
}
func handleDaemonSetManifest(manifest string) string {
lines := strings.Split(manifest, "\n")
for i, line := range lines {
if strings.TrimSpace(line) == "- mountPath: /app/data" {
lines[i] = fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s", line)
}
if strings.TrimSpace(line) == "name: kubeshark-persistent-volume" {
lines[i] = fmt.Sprintf("%s\n{{- end }}", line)
}
if strings.TrimSpace(line) == "- name: kubeshark-persistent-volume" {
lines[i] = fmt.Sprintf("{{- if .Values.tap.persistentstorage }}\n%s", line)
}
if strings.TrimSpace(line) == "claimName: kubeshark-persistent-volume-claim" {
lines[i] = fmt.Sprintf("%s\n{{- end }}", line)
}
if strings.HasPrefix(strings.TrimSpace(line), "- containerPort:") {
lines[i] = " - containerPort: {{ .Values.tap.proxy.worker.srvport }}"
}
if strings.HasPrefix(strings.TrimSpace(line), "hostPort:") {
lines[i] = " hostPort: {{ .Values.tap.proxy.worker.srvport }}"
}
}
return strings.Join(lines, "\n")
}
func dumpHelmChart(objects map[string]interface{}) error {
folder := filepath.Join(".", "helm-chart")
templatesFolder := filepath.Join(folder, "templates")
err := os.MkdirAll(templatesFolder, os.ModePerm)
err := fsUtils.RemoveFilesByExtension(templatesFolder, "yaml")
if err != nil {
return err
}
err = os.MkdirAll(templatesFolder, os.ModePerm)
if err != nil {
return err
}
@@ -267,6 +347,22 @@ func dumpHelmChart(objects map[string]interface{}) error {
return err
}
if filename == "04-hub-pod.yaml" {
manifest = handleHubPod(manifest)
}
if filename == "06-front-pod.yaml" {
manifest = handleFrontPod(manifest)
}
if filename == "08-persistent-volume-claim.yaml" {
manifest = handlePVCManifest(manifest)
}
if filename == "09-worker-daemon-set.yaml" {
manifest = handleDaemonSetManifest(manifest)
}
path := filepath.Join(templatesFolder, filename)
err = os.WriteFile(path, []byte(manifestHeader+manifest), 0644)
if err != nil {

View File

@@ -18,7 +18,7 @@ var logsCmd = &cobra.Command{
Use: "logs",
Short: "Create a ZIP file with logs for GitHub issues or troubleshooting",
RunE: func(cmd *cobra.Command, args []string) error {
kubernetesProvider, err := getKubernetesProviderForCli(false)
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return nil
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/misc/fsUtils"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
@@ -49,6 +50,7 @@ func runManifests() {
hubService,
frontPod,
frontService,
persistentVolume,
workerDaemonSet,
err := generateManifests()
if err != nil {
@@ -58,15 +60,16 @@ func runManifests() {
if config.Config.Manifests.Dump {
err = dumpManifests(map[string]interface{}{
"00-namespace.yaml": namespace,
"01-service-account.yaml": serviceAccount,
"02-cluster-role.yaml": clusterRole,
"03-cluster-role-binding.yaml": clusterRoleBinding,
"04-hub-pod.yaml": hubPod,
"05-hub-service.yaml": hubService,
"06-front-pod.yaml": frontPod,
"07-front-service.yaml": frontService,
"08-worker-daemon-set.yaml": workerDaemonSet,
"00-namespace.yaml": namespace,
"01-service-account.yaml": serviceAccount,
"02-cluster-role.yaml": clusterRole,
"03-cluster-role-binding.yaml": clusterRoleBinding,
"04-hub-pod.yaml": hubPod,
"05-hub-service.yaml": hubService,
"06-front-pod.yaml": frontPod,
"07-front-service.yaml": frontService,
"08-persistent-volume-claim.yaml": persistentVolume,
"09-worker-daemon-set.yaml": workerDaemonSet,
})
} else {
err = printManifests([]interface{}{
@@ -96,11 +99,16 @@ func generateManifests() (
hubService *v1.Service,
frontPod *v1.Pod,
frontService *v1.Service,
persistentVolumeClaim *v1.PersistentVolumeClaim,
workerDaemonSet *kubernetes.DaemonSet,
err error,
) {
config.Config.License = ""
persistentStorage := config.Config.Tap.PersistentStorage
config.Config.Tap.PersistentStorage = true
var kubernetesProvider *kubernetes.Provider
kubernetesProvider, err = getKubernetesProviderForCli(true)
kubernetesProvider, err = getKubernetesProviderForCli(true, true)
if err != nil {
return
}
@@ -138,13 +146,18 @@ func generateManifests() (
ImagePullPolicy: config.Config.ImagePullPolicy(),
ImagePullSecrets: config.Config.ImagePullSecrets(),
Debug: config.Config.Tap.Debug,
}, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort))
}, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port))
if err != nil {
return
}
frontService = kubernetesProvider.BuildFrontService(config.Config.Tap.SelfNamespace)
persistentVolumeClaim, err = kubernetesProvider.BuildPersistentVolumeClaim()
if err != nil {
return
}
workerDaemonSet, err = kubernetesProvider.BuildWorkerDaemonSet(
docker.GetWorkerImage(),
kubernetes.WorkerDaemonSetName,
@@ -160,12 +173,20 @@ func generateManifests() (
return
}
config.Config.Tap.PersistentStorage = persistentStorage
return
}
func dumpManifests(objects map[string]interface{}) error {
folder := filepath.Join(".", "manifests")
err := os.MkdirAll(folder, os.ModePerm)
err := fsUtils.RemoveFilesByExtension(folder, "yaml")
if err != nil {
return err
}
err = os.MkdirAll(folder, os.ModePerm)
if err != nil {
return err
}

View File

@@ -1,25 +0,0 @@
# This example shows permissions that enrich the logs with additional info
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-clusterrole
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-runner-debug-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,37 +0,0 @@
# This example shows permissions that are required for Kubeshark to resolve IPs to service names
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-clusterrole
rules:
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "create"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterrolebindings"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["", "apps", "extensions"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["services"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["endpoints"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-resolver-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,40 +0,0 @@
# This example shows the permissions that are required in order to run the `kubeshark tap` command
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-clusterrole
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "watch", "create"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "create"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["create", "patch"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["services/proxy"]
verbs: ["get", "create"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-runner-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,25 +0,0 @@
# This example shows permissions that enrich the logs with additional info in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-role
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-runner-debug-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,37 +0,0 @@
# This example shows permissions that are required for Kubeshark to resolve IPs to service names in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-role
rules:
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["rolebindings"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["", "apps", "extensions"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["services"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["endpoints"]
verbs: ["get", "list", "watch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-resolver-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -1,37 +0,0 @@
# This example shows the permissions that are required in order to run the `kubeshark tap` command in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-role
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "watch", "create"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "create", "delete"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["create", "patch", "delete"]
- apiGroups: [""]
resources: ["services/proxy"]
verbs: ["get", "create", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create", "delete"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-runner-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -40,19 +40,19 @@ func init() {
log.Debug().Err(err).Send()
}
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
proCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
proCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
func acquireLicense() {
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
log.Info().Str("url", PRO_URL).Msg("Opening in the browser:")
utils.OpenBrowser(PRO_URL)
@@ -61,17 +61,19 @@ func acquireLicense() {
}
func updateLicense(licenseKey string) {
log.Info().Str("key", licenseKey).Msg("Received license:")
config.Config.License = licenseKey
err := config.WriteConfig(&config.Config)
if err != nil {
log.Error().Err(err).Send()
}
connector.PostLicenseSingle(config.Config.License)
log.Info().Msg("Updated the license. Exiting.")
go func() {
connector.PostLicense(config.Config.License)
log.Info().Msg("Updated the license. Exiting.")
time.Sleep(2 * time.Second)
os.Exit(0)
}()
@@ -105,8 +107,6 @@ func runLicenseRecieverServer() {
licenseKey := string(data)
log.Info().Str("key", licenseKey).Msg("Received license:")
updateLicense(licenseKey)
})

View File

@@ -24,7 +24,7 @@ func init() {
log.Debug().Err(err).Send()
}
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
proxyCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
proxyCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
}

View File

@@ -15,7 +15,7 @@ import (
)
func runProxy(block bool, noBrowser bool) {
kubernetesProvider, err := getKubernetesProviderForCli(false)
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return
}
@@ -63,12 +63,12 @@ func runProxy(block bool, noBrowser bool) {
var establishedProxy bool
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.HubServiceName).
Int("port", int(config.Config.Tap.Proxy.Hub.SrcPort)).
Int("port", int(config.Config.Tap.Proxy.Hub.Port)).
Msg("Found a running service.")
okToOpen("Hub", hubUrl, true)
@@ -79,8 +79,8 @@ func runProxy(block bool, noBrowser bool) {
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.SrcPort,
config.Config.Tap.Proxy.Hub.DstPort,
config.Config.Tap.Proxy.Hub.Port,
configStructs.ContainerPort,
"/echo",
)
connector := connect.NewConnector(hubUrl, connect.DefaultRetries, connect.DefaultTimeout)
@@ -93,12 +93,12 @@ func runProxy(block bool, noBrowser bool) {
okToOpen("Hub", hubUrl, true)
}
frontUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
frontUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port)
response, err = http.Get(fmt.Sprintf("%s/", frontUrl))
if err == nil && response.StatusCode == 200 {
log.Info().
Str("service", kubernetes.FrontServiceName).
Int("port", int(config.Config.Tap.Proxy.Front.SrcPort)).
Int("port", int(config.Config.Tap.Proxy.Front.Port)).
Msg("Found a running service.")
okToOpen("Kubeshark", frontUrl, noBrowser)
@@ -109,8 +109,8 @@ func runProxy(block bool, noBrowser bool) {
kubernetes.FrontServiceName,
kubernetes.FrontPodName,
configStructs.ProxyFrontPortLabel,
config.Config.Tap.Proxy.Front.SrcPort,
config.Config.Tap.Proxy.Front.DstPort,
config.Config.Tap.Proxy.Front.Port,
configStructs.ContainerPort,
"",
)
connector := connect.NewConnector(frontUrl, connect.DefaultRetries, connect.DefaultTimeout)

View File

@@ -34,7 +34,7 @@ func init() {
log.Debug().Err(err).Send()
}
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub")
scriptsCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub")
scriptsCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the Hub")
}
@@ -44,14 +44,14 @@ func runScripts() {
return
}
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
hubUrl := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
response, err := http.Get(fmt.Sprintf("%s/echo", hubUrl))
if err != nil || response.StatusCode != 200 {
log.Info().Msg(fmt.Sprintf(utils.Yellow, "Couldn't connect to Hub. Establishing proxy..."))
runProxy(false, true)
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
watchScripts(true)
}

View File

@@ -47,13 +47,14 @@ func init() {
tapCmd.Flags().StringP(configStructs.DockerTagLabel, "t", defaultTapConfig.Docker.Tag, "The tag of the Docker images that are going to be pulled")
tapCmd.Flags().String(configStructs.DockerImagePullPolicy, defaultTapConfig.Docker.ImagePullPolicy, "ImagePullPolicy for the Docker images")
tapCmd.Flags().StringSlice(configStructs.DockerImagePullSecrets, defaultTapConfig.Docker.ImagePullSecrets, "ImagePullSecrets for the Docker images")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.SrcPort, "Provide a custom port for the front-end proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.SrcPort, "Provide a custom port for the Hub proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the front-end proxy/port-forward")
tapCmd.Flags().Uint16(configStructs.ProxyHubPortLabel, defaultTapConfig.Proxy.Hub.Port, "Provide a custom port for the Hub proxy/port-forward")
tapCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
tapCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultTapConfig.Namespaces, "Namespaces selector")
tapCmd.Flags().BoolP(configStructs.AllNamespacesLabel, "A", defaultTapConfig.AllNamespaces, "Tap all namespaces")
tapCmd.Flags().StringP(configStructs.SelfNamespaceLabel, "s", defaultTapConfig.SelfNamespace, "Self-namespace of Kubeshark")
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit. (per node)")
tapCmd.Flags().Bool(configStructs.PersistentStorageLabel, defaultTapConfig.PersistentStorage, "Enable persistent storage (PersistentVolumeClaim)")
tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default storage limit (per node)")
tapCmd.Flags().String(configStructs.StorageClassLabel, defaultTapConfig.StorageClass, "Override the default storage class of the PersistentVolumeClaim (per node)")
tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
tapCmd.Flags().StringP(configStructs.PcapLabel, "p", defaultTapConfig.Pcap, fmt.Sprintf("Capture from a PCAP snapshot of %s (.tar.gz) using your Docker Daemon instead of Kubernetes", misc.Software))
tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS")

View File

@@ -13,6 +13,7 @@ import (
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
@@ -141,10 +142,10 @@ func createAndStartContainers(
hostConfigFront := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Front.DstPort)): []nat.PortBinding{
nat.Port(fmt.Sprintf("%d/tcp", configStructs.ContainerPort)): []nat.PortBinding{
{
HostIP: hostIP,
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.SrcPort),
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Front.Port),
},
},
},
@@ -156,7 +157,7 @@ func createAndStartContainers(
Env: []string{
"REACT_APP_DEFAULT_FILTER= ",
"REACT_APP_HUB_HOST= ",
fmt.Sprintf("REACT_APP_HUB_PORT=%d", config.Config.Tap.Proxy.Hub.SrcPort),
fmt.Sprintf("REACT_APP_HUB_PORT=%d", config.Config.Tap.Proxy.Hub.Port),
},
}, hostConfigFront, nil, nil, nameFront)
if err != nil {
@@ -165,16 +166,16 @@ func createAndStartContainers(
hostConfigHub := &container.HostConfig{
PortBindings: nat.PortMap{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): []nat.PortBinding{
nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): []nat.PortBinding{
{
HostIP: hostIP,
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort),
HostPort: fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port),
},
},
},
}
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.DstPort)}
cmdHub := []string{"-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrvPort)}
if config.DebugMode {
cmdHub = append(cmdHub, fmt.Sprintf("-%s", config.DebugFlag))
}
@@ -183,13 +184,13 @@ func createAndStartContainers(
Image: imageHub,
Cmd: cmdHub,
Tty: false,
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.DstPort)): {}},
ExposedPorts: nat.PortSet{nat.Port(fmt.Sprintf("%d/tcp", config.Config.Tap.Proxy.Hub.SrvPort)): {}},
}, hostConfigHub, nil, nil, nameHub)
if err != nil {
return
}
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.DstPort)}
cmdWorker := []string{"-f", "./import", "-port", fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.SrvPort)}
if config.DebugMode {
cmdWorker = append(cmdWorker, fmt.Sprintf("-%s", config.DebugFlag))
}
@@ -328,14 +329,19 @@ func pcap(tarPath string) {
},
}
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
connector.PostWorkerPodToHub(workerPod)
// License
if config.Config.License != "" {
connector.PostLicense(config.Config.License)
}
log.Info().
Str("url", kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)).
Str("url", kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)).
Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
if !config.Config.HeadlessMode {

View File

@@ -58,11 +58,11 @@ func tap() {
log.Info().
Str("limit", config.Config.Tap.StorageLimit).
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP streams will be removed once the limit is reached.", misc.Software))
Msg(fmt.Sprintf("%s will store the traffic up to a limit (per node). Oldest TCP/UDP streams will be removed once the limit is reached.", misc.Software))
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port), connect.DefaultRetries, connect.DefaultTimeout)
kubernetesProvider, err := getKubernetesProviderForCli(false)
kubernetesProvider, err := getKubernetesProviderForCli(false, false)
if err != nil {
return
}
@@ -409,8 +409,8 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
kubernetes.HubServiceName,
kubernetes.HubPodName,
configStructs.ProxyHubPortLabel,
config.Config.Tap.Proxy.Hub.SrcPort,
config.Config.Tap.Proxy.Hub.DstPort,
config.Config.Tap.Proxy.Hub.Port,
configStructs.ContainerPort,
"/echo",
)
@@ -431,43 +431,36 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider
if err != nil {
log.Error().Err(err).Send()
}
} else {
// Pod regex
connector.PostRegexToHub(config.Config.Tap.PodRegexStr, state.targetNamespaces)
// Grace period
log.Info().Msg("Waiting for worker containers...")
time.Sleep(5 * time.Second)
}
// License
if config.Config.License != "" {
connector.PostLicense(config.Config.License)
}
// Storage limit
connector.PostStorageLimitToHub(config.Config.Tap.StorageLimitBytes())
// Scripting
connector.PostEnv(config.Config.Scripting.Env)
// Pod regex
connector.PostRegexToHub(config.Config.Tap.PodRegexStr, state.targetNamespaces)
// License
if config.Config.License != "" {
connector.PostLicense(config.Config.License)
}
// Scripting
connector.PostEnv(config.Config.Scripting.Env)
scripts, err := config.Config.Scripting.GetScripts()
if err != nil {
log.Error().Err(err).Send()
}
for _, script := range scripts {
_, err = connector.PostScript(script)
scripts, err := config.Config.Scripting.GetScripts()
if err != nil {
log.Error().Err(err).Send()
}
}
connector.PostScriptDone()
for _, script := range scripts {
_, err = connector.PostScript(script)
if err != nil {
log.Error().Err(err).Send()
}
}
connector.PostScriptDone()
}
if !update {
// Hub proxy URL
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.SrcPort)
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Hub.Port)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Hub is available at:"))
}
@@ -483,12 +476,12 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid
kubernetes.FrontServiceName,
kubernetes.FrontPodName,
configStructs.ProxyFrontPortLabel,
config.Config.Tap.Proxy.Front.SrcPort,
config.Config.Tap.Proxy.Front.DstPort,
config.Config.Tap.Proxy.Front.Port,
configStructs.ContainerPort,
"",
)
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort)
url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.Port)
log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software)))
if !config.Config.HeadlessMode {

View File

@@ -4,8 +4,6 @@ import (
"fmt"
"regexp"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
v1 "k8s.io/api/core/v1"
)
@@ -18,15 +16,18 @@ const (
ProxyHubPortLabel = "proxy-hub-port"
ProxyHostLabel = "proxy-host"
NamespacesLabel = "namespaces"
AllNamespacesLabel = "allnamespaces"
SelfNamespaceLabel = "selfnamespace"
PersistentStorageLabel = "persistentstorage"
StorageLimitLabel = "storagelimit"
StorageClassLabel = "storageclass"
DryRunLabel = "dryrun"
PcapLabel = "pcap"
ServiceMeshLabel = "servicemesh"
TlsLabel = "tls"
IgnoreTaintedLabel = "ignoreTainted"
DebugLabel = "debug"
ContainerPort = 80
ContainerPortStr = "80"
)
type ResourceLimits struct {
@@ -45,18 +46,17 @@ type ResourceRequirements struct {
}
type WorkerConfig struct {
SrcPort uint16 `yaml:"port" default:"8897"`
DstPort uint16 `yaml:"srvport" default:"8897"`
SrvPort uint16 `yaml:"srvport" default:"8897"`
}
type HubConfig struct {
SrcPort uint16 `yaml:"port" default:"8898"`
DstPort uint16 `yaml:"srvport" default:"80"`
Port uint16 `yaml:"port" default:"8898"`
SrvPort uint16 `yaml:"srvport" default:"8898"`
}
type FrontConfig struct {
SrcPort uint16 `yaml:"port" default:"8899"`
DstPort uint16 `yaml:"srvport" default:"80"`
Port uint16 `yaml:"port" default:"8899"`
SrvPort uint16 `yaml:"srvport" default:"8899"`
}
type ProxyConfig struct {
@@ -83,9 +83,10 @@ type TapConfig struct {
Proxy ProxyConfig `yaml:"proxy"`
PodRegexStr string `yaml:"regex" default:".*"`
Namespaces []string `yaml:"namespaces"`
AllNamespaces bool `yaml:"allnamespaces" default:"true"`
SelfNamespace string `yaml:"selfnamespace" default:"kubeshark"`
StorageLimit string `yaml:"storagelimit" default:"200MB"`
PersistentStorage bool `yaml:"persistentstorage" default:"false"`
StorageLimit string `yaml:"storagelimit" default:"200Mi"`
StorageClass string `yaml:"storageclass" default:"standard"`
DryRun bool `yaml:"dryrun" default:"false"`
Pcap string `yaml:"pcap" default:""`
Resources ResourcesConfig `yaml:"resources"`
@@ -103,24 +104,11 @@ func (config *TapConfig) PodRegex() *regexp.Regexp {
return podRegex
}
func (config *TapConfig) StorageLimitBytes() int64 {
storageLimitBytes, err := utils.HumanReadableToBytes(config.StorageLimit)
if err != nil {
log.Fatal().Err(err).Send()
}
return storageLimitBytes
}
func (config *TapConfig) Validate() error {
_, compileErr := regexp.Compile(config.PodRegexStr)
if compileErr != nil {
return fmt.Errorf("%s is not a valid regex %s", config.PodRegexStr, compileErr)
}
_, parseHumanDataSizeErr := utils.HumanReadableToBytes(config.StorageLimit)
if parseHumanDataSizeErr != nil {
return fmt.Errorf("Could not parse --%s value %s", StorageLimitLabel, config.StorageLimit)
}
return nil
}

2
go.mod
View File

@@ -6,7 +6,6 @@ require (
github.com/creasty/defaults v1.5.2
github.com/docker/docker v20.10.24+incompatible
github.com/docker/go-connections v0.4.0
github.com/docker/go-units v0.4.0
github.com/fsnotify/fsnotify v1.5.1
github.com/gin-gonic/gin v1.7.7
github.com/google/go-github/v37 v37.0.0
@@ -40,6 +39,7 @@ require (
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.0+incompatible // indirect
github.com/docker/go-units v0.4.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/fvbommel/sortorder v1.0.2 // indirect

17
go.sum
View File

@@ -27,6 +27,7 @@ cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y=
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
@@ -113,8 +114,10 @@ github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
@@ -522,6 +525,7 @@ github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zM
github.com/otiai10/copy v1.10.0 h1:znyI7l134wNg/wDktoVQPxPkgvhDfGCYUasey+h0rDQ=
github.com/otiai10/copy v1.10.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww=
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
@@ -631,6 +635,7 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
@@ -645,6 +650,7 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
@@ -786,6 +792,8 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -818,6 +826,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -901,11 +910,17 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -917,6 +932,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1158,6 +1174,7 @@ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/readline.v1 v1.0.0-20160726135117-62c6fe619375/go.mod h1:lNEQeAhU009zbRxng+XOj5ITVgY24WcbNnQopyfKoYQ=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/sourcemap.v1 v1.0.5 h1:inv58fC9f9J3TK2Y2R1NPntXEn3/wjWHkonhIUODNTI=
gopkg.in/sourcemap.v1 v1.0.5/go.mod h1:2RlvNNSMglmRrcvhfuzp4hQHwOtjxlbjX7UPY/GXb78=

View File

@@ -1,5 +1,5 @@
apiVersion: v2
appVersion: "39.6"
appVersion: "40.1"
description: The API Traffic Analyzer for Kubernetes
home: https://kubeshark.co
keywords:
@@ -22,4 +22,4 @@ name: kubeshark
sources:
- https://github.com/kubeshark/kubeshark/tree/master/helm-chart
type: application
version: "39.6"
version: "40.1"

View File

@@ -19,6 +19,7 @@ rules:
- pods
- services
- endpoints
- persistentvolumeclaims
verbs:
- list
- get

View File

@@ -18,14 +18,19 @@ spec:
- name: POD_REGEX
value: '{{ .Values.tap.regex }}'
- name: NAMESPACES
value: '{{ .Values.tap.allnamespaces | ternary "" .Values.tap.namespaces }}'
- name: STORAGE_LIMIT
value: '{{ .Values.tap.storagelimit }}'
value: '{{ gt (len .Values.tap.namespaces) 0 | ternary (join "," .Values.tap.namespaces) "" }}'
- name: LICENSE
value: '{{ .Values.license }}'
- name: SCRIPTING_ENV
value: '{}'
- name: SCRIPTING_SCRIPTS
value: '[]'
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
name: kubeshark-hub
ports:
- containerPort: 80
hostPort: {{ .Values.tap.proxy.hub.srvport }}
resources:
limits:
cpu: '{{ .Values.tap.resources.hub.limits.cpu }}'

View File

@@ -20,8 +20,11 @@ spec:
- name: REACT_APP_HUB_PORT
value: "8898"
image: '{{ .Values.tap.docker.registry }}/front:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
name: kubeshark-front
ports:
- containerPort: 80
hostPort: {{ .Values.tap.proxy.front.srvport }}
readinessProbe:
failureThreshold: 3
periodSeconds: 1

View File

@@ -0,0 +1,22 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
{{- if .Values.tap.persistentstorage }}
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-persistent-volume-claim
namespace: '{{ .Values.tap.selfnamespace }}'
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: '{{ .Values.tap.storagelimit }}'
storageClassName: '{{ .Values.tap.storageclass }}'
status: {}
{{- end }}

View File

@@ -32,7 +32,7 @@ spec:
- -i
- any
- -port
- "8897"
- '{{ .Values.tap.proxy.worker.srvport }}'
- -packet-capture
- '{{ .Values.tap.packetcapture }}'
- -servicemesh
@@ -40,8 +40,11 @@ spec:
- -procfs
- /hostproc
image: '{{ .Values.tap.docker.registry }}/worker:{{ .Values.tap.docker.tag }}'
imagePullPolicy: '{{ .Values.tap.docker.imagePullPolicy }}'
imagePullPolicy: '{{ .Values.tap.docker.imagepullpolicy }}'
name: kubeshark-worker-daemon-set
ports:
- containerPort: {{ .Values.tap.proxy.worker.srvport }}
hostPort: {{ .Values.tap.proxy.worker.srvport }}
resources:
limits:
cpu: '{{ .Values.tap.resources.worker.limits.cpu }}'
@@ -67,6 +70,10 @@ spec:
- mountPath: /sys
name: sys
readOnly: true
{{- if .Values.tap.persistentstorage }}
- mountPath: /app/data
name: kubeshark-persistent-volume
{{- end }}
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
@@ -83,3 +90,8 @@ spec:
- hostPath:
path: /sys
name: sys
{{- if .Values.tap.persistentstorage }}
- name: kubeshark-persistent-volume
persistentVolumeClaim:
claimName: kubeshark-persistent-volume-claim
{{- end }}

View File

@@ -6,20 +6,20 @@ tap:
imagepullsecrets: []
proxy:
worker:
port: 8897
srvport: 8897
hub:
port: 8898
srvport: 80
srvport: 8898
front:
port: 8899
srvport: 80
srvport: 8899
host: 127.0.0.1
regex: .*
namespaces: []
allnamespaces: true
selfnamespace: kubeshark
storagelimit: 200MB
persistentstorage: false
storagelimit: 200Mi
storageclass: standard
dryrun: false
pcap: ""
resources:

View File

@@ -87,36 +87,6 @@ func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) {
}
}
type postStorageLimit struct {
Limit int64 `json:"limit"`
}
func (connector *Connector) PostStorageLimitToHub(limit int64) {
payload := &postStorageLimit{
Limit: limit,
}
postStorageLimitUrl := fmt.Sprintf("%s/pcaps/set-storage-limit", connector.url)
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the storage limit:")
} else {
ok := false
for !ok {
var resp *http.Response
if resp, err = utils.Post(postStorageLimitUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
if _, ok := err.(*url.Error); ok {
break
}
log.Warn().Err(err).Msg("Failed sending the storage limit to Hub. Retrying...")
} else {
log.Debug().Int("limit", int(limit)).Msg("Reported storage limit to Hub:")
return
}
time.Sleep(DefaultSleep)
}
}
}
type postRegexRequest struct {
Regex string `json:"regex"`
Namespaces []string `json:"namespaces"`
@@ -181,6 +151,26 @@ func (connector *Connector) PostLicense(license string) {
}
}
func (connector *Connector) PostLicenseSingle(license string) {
postLicenseUrl := fmt.Sprintf("%s/license", connector.url)
payload := postLicenseRequest{
License: license,
}
if payloadMarshalled, err := json.Marshal(payload); err != nil {
log.Error().Err(err).Msg("Failed to marshal the payload:")
} else {
var resp *http.Response
if resp, err = utils.Post(postLicenseUrl, "application/json", bytes.NewBuffer(payloadMarshalled), connector.client); err != nil || resp.StatusCode != http.StatusOK {
log.Warn().Err(err).Msg("Failed sending the license to Hub.")
} else {
log.Debug().Str("license", license).Msg("Reported license to Hub:")
return
}
}
}
func (connector *Connector) PostEnv(env map[string]interface{}) {
if len(env) == 0 {
return

View File

@@ -14,6 +14,9 @@ const (
ServiceAccountName = SelfResourcesPrefix + "service-account"
WorkerDaemonSetName = SelfResourcesPrefix + "worker-daemon-set"
WorkerPodName = SelfResourcesPrefix + "worker"
PersistentVolumeName = SelfResourcesPrefix + "persistent-volume"
PersistentVolumeClaimName = SelfResourcesPrefix + "persistent-volume-claim"
PersistentVolumeHostPath = "/app/data"
MinKubernetesServerVersion = "1.16.0"
)

View File

@@ -3,7 +3,7 @@ package kubernetes
import (
"bytes"
"context"
"errors"
"encoding/json"
"fmt"
"io"
"net/url"
@@ -110,14 +110,6 @@ func NewProviderInCluster() (*Provider, error) {
}, nil
}
func (provider *Provider) CurrentNamespace() (string, error) {
if provider.kubernetesConfig == nil {
return "", errors.New("kubernetesConfig is nil, The CLI will not work with in-cluster kubernetes config, use a kubeconfig file when initializing the Provider")
}
ns, _, err := provider.kubernetesConfig.Namespace()
return ns, err
}
func (provider *Provider) WaitUtilNamespaceDeleted(ctx context.Context, name string) error {
fieldSelector := fmt.Sprintf("metadata.name=%s", name)
var limit int64 = 1
@@ -212,10 +204,33 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
command = append(command, "-debug")
}
// Scripting environment variables
scriptingEnvMarshalled, err := json.Marshal(config.Config.Scripting.Env)
if err != nil {
return nil, err
}
// Scripting scripts
scripts, err := config.Config.Scripting.GetScripts()
if err != nil {
return nil, err
}
if scripts == nil {
scripts = []*misc.Script{}
}
scriptsMarshalled, err := json.Marshal(scripts)
if err != nil {
return nil, err
}
containers := []core.Container{
{
Name: opts.PodName,
Image: opts.PodImage,
Name: opts.PodName,
Image: opts.PodImage,
Ports: []core.ContainerPort{{
HostPort: int32(config.Config.Tap.Proxy.Hub.SrvPort),
ContainerPort: configStructs.ContainerPort,
}},
ImagePullPolicy: opts.ImagePullPolicy,
Command: command,
Resources: core.ResourceRequirements{
@@ -238,12 +253,16 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
Value: strings.Join(provider.GetNamespaces(), ","),
},
{
Name: "STORAGE_LIMIT",
Value: config.Config.Tap.StorageLimit,
Name: "LICENSE",
Value: config.Config.License,
},
{
Name: "LICENSE",
Value: "",
Name: "SCRIPTING_ENV",
Value: string(scriptingEnvMarshalled),
},
{
Name: "SCRIPTING_SCRIPTS",
Value: string(scriptsMarshalled),
},
},
},
@@ -266,17 +285,8 @@ func (provider *Provider) BuildHubPod(opts *PodOptions) (*core.Pod, error) {
Containers: containers,
DNSPolicy: core.DNSClusterFirstWithHostNet,
TerminationGracePeriodSeconds: new(int64),
Tolerations: []core.Toleration{
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoExecute,
},
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoSchedule,
},
},
ImagePullSecrets: opts.ImagePullSecrets,
Tolerations: provider.BuildTolerations(),
ImagePullSecrets: opts.ImagePullSecrets,
},
}
@@ -316,15 +326,19 @@ func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPor
containers := []core.Container{
{
Name: opts.PodName,
Image: docker.GetFrontImage(),
Name: opts.PodName,
Image: docker.GetFrontImage(),
Ports: []core.ContainerPort{{
HostPort: int32(config.Config.Tap.Proxy.Front.SrvPort),
ContainerPort: configStructs.ContainerPort,
}},
ImagePullPolicy: opts.ImagePullPolicy,
VolumeMounts: volumeMounts,
ReadinessProbe: &core.Probe{
FailureThreshold: 3,
ProbeHandler: core.ProbeHandler{
TCPSocket: &core.TCPSocketAction{
Port: intstr.Parse("80"),
Port: intstr.Parse(configStructs.ContainerPortStr),
},
},
PeriodSeconds: 1,
@@ -376,17 +390,8 @@ func (provider *Provider) BuildFrontPod(opts *PodOptions, hubHost string, hubPor
Volumes: volumes,
DNSPolicy: core.DNSClusterFirstWithHostNet,
TerminationGracePeriodSeconds: new(int64),
Tolerations: []core.Toleration{
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoExecute,
},
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoSchedule,
},
},
ImagePullSecrets: opts.ImagePullSecrets,
Tolerations: provider.BuildTolerations(),
ImagePullSecrets: opts.ImagePullSecrets,
},
}
@@ -422,8 +427,8 @@ func (provider *Provider) BuildHubService(namespace string) *core.Service {
Ports: []core.ServicePort{
{
Name: HubServiceName,
TargetPort: intstr.FromInt(80),
Port: 80,
TargetPort: intstr.FromInt(configStructs.ContainerPort),
Port: configStructs.ContainerPort,
},
},
Type: core.ServiceTypeClusterIP,
@@ -447,8 +452,8 @@ func (provider *Provider) BuildFrontService(namespace string) *core.Service {
Ports: []core.ServicePort{
{
Name: FrontServiceName,
TargetPort: intstr.FromInt(80),
Port: 80,
TargetPort: intstr.FromInt(configStructs.ContainerPort),
Port: configStructs.ContainerPort,
},
},
Type: core.ServiceTypeClusterIP,
@@ -560,9 +565,22 @@ func (provider *Provider) BuildClusterRole() *rbac.ClusterRole {
},
Rules: []rbac.PolicyRule{
{
APIGroups: []string{"", "extensions", "apps"},
Resources: []string{"pods", "services", "endpoints"},
Verbs: []string{"list", "get", "watch"},
APIGroups: []string{
"",
"extensions",
"apps",
},
Resources: []string{
"pods",
"services",
"endpoints",
"persistentvolumeclaims",
},
Verbs: []string{
"list",
"get",
"watch",
},
},
},
}
@@ -661,6 +679,11 @@ func (provider *Provider) RemoveService(ctx context.Context, namespace string, s
return provider.handleRemovalError(err)
}
func (provider *Provider) RemovePersistentVolumeClaim(ctx context.Context, namespace string, persistentVolumeClaimName string) error {
err := provider.clientSet.CoreV1().PersistentVolumeClaims(namespace).Delete(ctx, persistentVolumeClaimName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
}
func (provider *Provider) RemoveDaemonSet(ctx context.Context, namespace string, daemonSetName string) error {
err := provider.clientSet.AppsV1().DaemonSets(namespace).Delete(ctx, daemonSetName, metav1.DeleteOptions{})
return provider.handleRemovalError(err)
@@ -676,6 +699,38 @@ func (provider *Provider) handleRemovalError(err error) error {
return err
}
func (provider *Provider) BuildPersistentVolumeClaim() (*core.PersistentVolumeClaim, error) {
capacity, err := resource.ParseQuantity(config.Config.Tap.StorageLimit)
if err != nil {
return nil, fmt.Errorf("invalid capacity for the workers: %s", config.Config.Tap.StorageLimit)
}
storageClassName := config.Config.Tap.StorageClass
return &core.PersistentVolumeClaim{
TypeMeta: metav1.TypeMeta{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: PersistentVolumeClaimName,
Namespace: config.Config.Tap.SelfNamespace,
Labels: buildWithDefaultLabels(map[string]string{
fmt.Sprintf("%s-cli-version", misc.Program): misc.RBACVersion,
}, provider),
},
Spec: core.PersistentVolumeClaimSpec{
Resources: core.ResourceRequirements{
Requests: core.ResourceList{
core.ResourceStorage: capacity,
},
},
AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteMany},
StorageClassName: &storageClassName,
},
}, nil
}
func (provider *Provider) BuildWorkerDaemonSet(
podImage string,
podName string,
@@ -711,7 +766,7 @@ func (provider *Provider) BuildWorkerDaemonSet(
"-i",
"any",
"-port",
"8897",
fmt.Sprintf("%d", config.Config.Tap.Proxy.Worker.SrvPort),
"-packet-capture",
config.Config.Tap.PacketCapture,
}
@@ -794,13 +849,40 @@ func (provider *Provider) BuildWorkerDaemonSet(
ReadOnly: true,
}
// Persistent volume and its mount
persistentVolume := core.Volume{
Name: PersistentVolumeName,
VolumeSource: core.VolumeSource{
PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{
ClaimName: PersistentVolumeClaimName,
},
},
}
persistentVolumeMount := core.VolumeMount{
Name: PersistentVolumeName,
MountPath: PersistentVolumeHostPath,
}
// VolumeMount(s)
volumeMounts := []core.VolumeMount{
procfsVolumeMount,
sysfsVolumeMount,
}
if config.Config.Tap.PersistentStorage {
volumeMounts = append(volumeMounts, persistentVolumeMount)
}
// Containers
containers := []core.Container{
{
Name: podName,
Image: podImage,
Name: podName,
Image: podImage,
Ports: []core.ContainerPort{{
HostPort: int32(config.Config.Tap.Proxy.Worker.SrvPort),
ContainerPort: int32(config.Config.Tap.Proxy.Worker.SrvPort),
}},
ImagePullPolicy: imagePullPolicy,
VolumeMounts: []core.VolumeMount{procfsVolumeMount, sysfsVolumeMount},
VolumeMounts: volumeMounts,
Command: command,
Resources: core.ResourceRequirements{
Limits: core.ResourceList{
@@ -822,18 +904,13 @@ func (provider *Provider) BuildWorkerDaemonSet(
},
}
// Tolerations
tolerations := []core.Toleration{
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoExecute,
},
// Volume(s)
volumes := []core.Volume{
procfsVolume,
sysfsVolume,
}
if !config.Config.Tap.IgnoreTainted {
tolerations = append(tolerations, core.Toleration{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoSchedule,
})
if config.Config.Tap.PersistentStorage {
volumes = append(volumes, persistentVolume)
}
// Pod
@@ -849,10 +926,10 @@ func (provider *Provider) BuildWorkerDaemonSet(
ServiceAccountName: ServiceAccountName,
HostNetwork: true,
Containers: containers,
Volumes: []core.Volume{procfsVolume, sysfsVolume},
Volumes: volumes,
DNSPolicy: core.DNSClusterFirstWithHostNet,
TerminationGracePeriodSeconds: new(int64),
Tolerations: tolerations,
Tolerations: provider.BuildTolerations(),
ImagePullSecrets: imagePullSecrets,
},
}
@@ -890,6 +967,28 @@ func (provider *Provider) BuildWorkerDaemonSet(
}, nil
}
func (provider *Provider) BuildTolerations() []core.Toleration {
tolerations := []core.Toleration{
{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoExecute,
},
}
if !config.Config.Tap.IgnoreTainted {
tolerations = append(tolerations, core.Toleration{
Operator: core.TolerationOpExists,
Effect: core.TaintEffectNoSchedule,
})
}
return tolerations
}
func (provider *Provider) CreatePersistentVolumeClaim(ctx context.Context, namespace string, persistentVolumeClaim *core.PersistentVolumeClaim) (*core.PersistentVolumeClaim, error) {
return provider.clientSet.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, persistentVolumeClaim, metav1.CreateOptions{})
}
func (provider *Provider) ApplyWorkerDaemonSet(
ctx context.Context,
namespace string,
@@ -1105,16 +1204,10 @@ func (provider *Provider) GetKubernetesVersion() (*semver.SemVersion, error) {
}
func (provider *Provider) GetNamespaces() []string {
if config.Config.Tap.AllNamespaces {
return []string{K8sAllNamespaces}
} else if len(config.Config.Tap.Namespaces) > 0 {
if len(config.Config.Tap.Namespaces) > 0 {
return utils.Unique(config.Config.Tap.Namespaces)
} else {
currentNamespace, err := provider.CurrentNamespace()
if err != nil {
log.Fatal().Err(err).Msg("Error getting current namespace!")
}
return []string{currentNamespace}
return []string{K8sAllNamespaces}
}
}

View File

@@ -111,7 +111,12 @@ func (d *DaemonSet) GenerateApplyConfiguration(name string, namespace string, po
// Volumes
for _, v := range p.Volumes {
volume := applyconfcore.Volume()
volume.WithName(v.Name).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath(v.HostPath.Path))
if v.HostPath != nil {
volume.WithName(v.Name).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath(v.HostPath.Path))
}
if v.PersistentVolumeClaim != nil {
volume.WithName(v.Name).WithPersistentVolumeClaim(applyconfcore.PersistentVolumeClaimVolumeSource().WithClaimName(v.PersistentVolumeClaim.ClaimName))
}
podSpec.WithVolumes(volume)
}

View File

@@ -3,6 +3,7 @@ package kubernetes
import (
"context"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/docker"
"github.com/rs/zerolog/log"
@@ -21,6 +22,21 @@ func CreateWorkers(
tls bool,
debug bool,
) error {
if config.Config.Tap.PersistentStorage {
persistentVolumeClaim, err := kubernetesProvider.BuildPersistentVolumeClaim()
if err != nil {
return err
}
if _, err = kubernetesProvider.CreatePersistentVolumeClaim(
ctx,
namespace,
persistentVolumeClaim,
); err != nil {
return err
}
}
image := docker.GetWorkerImage()
var serviceAccountName string

View File

@@ -19,6 +19,7 @@ rules:
- pods
- services
- endpoints
- persistentvolumeclaims
verbs:
- list
- get

View File

@@ -18,12 +18,17 @@ spec:
- name: POD_REGEX
value: .*
- name: NAMESPACES
- name: STORAGE_LIMIT
value: 200MB
- name: LICENSE
- name: SCRIPTING_ENV
value: '{}'
- name: SCRIPTING_SCRIPTS
value: '[]'
image: docker.io/kubeshark/hub:latest
imagePullPolicy: Always
name: kubeshark-hub
ports:
- containerPort: 80
hostPort: 8898
resources:
limits:
cpu: 750m

View File

@@ -22,6 +22,9 @@ spec:
image: docker.io/kubeshark/front:latest
imagePullPolicy: Always
name: kubeshark-front
ports:
- containerPort: 80
hostPort: 8899
readinessProbe:
failureThreshold: 3
periodSeconds: 1

View File

@@ -0,0 +1,20 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY KUBESHARK CLI. DO NOT EDIT!
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
kubeshark-cli-version: v1
kubeshark-created-by: kubeshark
kubeshark-managed-by: kubeshark
name: kubeshark-persistent-volume-claim
namespace: kubeshark
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 200Mi
storageClassName: standard
status: {}

View File

@@ -42,6 +42,9 @@ spec:
image: docker.io/kubeshark/worker:latest
imagePullPolicy: Always
name: kubeshark-worker-daemon-set
ports:
- containerPort: 8897
hostPort: 8897
resources:
limits:
cpu: 750m
@@ -67,6 +70,8 @@ spec:
- mountPath: /sys
name: sys
readOnly: true
- mountPath: /app/data
name: kubeshark-persistent-volume
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
serviceAccountName: kubeshark-service-account
@@ -83,3 +88,6 @@ spec:
- hostPath:
path: /sys
name: sys
- name: kubeshark-persistent-volume
persistentVolumeClaim:
claimName: kubeshark-persistent-volume-claim

22
misc/fsUtils/globUtils.go Normal file
View File

@@ -0,0 +1,22 @@
package fsUtils
import (
"fmt"
"os"
"path/filepath"
)
func RemoveFilesByExtension(dirPath string, ext string) error {
files, err := filepath.Glob(filepath.Join(dirPath, fmt.Sprintf("/*.%s", ext)))
if err != nil {
return err
}
for _, f := range files {
if err := os.Remove(f); err != nil {
return err
}
}
return nil
}

View File

@@ -108,6 +108,11 @@ func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.P
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemovePersistentVolumeClaim(ctx, selfResourcesNamespace, kubernetes.PersistentVolumeClaimName); err != nil {
resourceDesc := fmt.Sprintf("Persistent Volume %s in namespace %s", kubernetes.PersistentVolumeClaimName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)
}
if err := kubernetesProvider.RemoveDaemonSet(ctx, selfResourcesNamespace, kubernetes.WorkerDaemonSetName); err != nil {
resourceDesc := fmt.Sprintf("DaemonSet %s in namespace %s", kubernetes.WorkerDaemonSetName, selfResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources)

View File

@@ -94,7 +94,7 @@ func createSelfHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provid
}
func createFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error {
pod, err := kubernetesProvider.BuildFrontPod(opts, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.SrcPort))
pod, err := kubernetesProvider.BuildFrontPod(opts, config.Config.Tap.Proxy.Host, fmt.Sprintf("%d", config.Config.Tap.Proxy.Hub.Port))
if err != nil {
return err
}

View File

@@ -1,7 +0,0 @@
package utils
import "github.com/docker/go-units"
func HumanReadableToBytes(humanReadableSize string) (int64, error) {
return units.FromHumanSize(humanReadableSize)
}