diff --git a/Makefile b/Makefile index e11d22086..247593930 100644 --- a/Makefile +++ b/Makefile @@ -14,15 +14,15 @@ export VER?=0.0 help: ## This help. @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) -build-debug: ## Build kubeshark CLI for debuging. +build-debug: ## Build CLI for debuging. export GCLFAGS='-gcflags="all=-N -l"' ${MAKE} build-base -build: ## Build kubeshark CLI. +build: ## Build CLI. export LDFLAGS_EXT='-s -w' ${MAKE} build-base -build-base: ## Build kubeshark CLI binary (select the platform via GOOS / GOARCH env variables). +build-base: ## Build CLI binary (select the platform via GOOS / GOARCH env variables). go build ${GCLFAGS} -ldflags="${LDFLAGS_EXT} \ -X 'github.com/kubeshark/kubeshark/kubeshark.GitCommitHash=$(COMMIT_HASH)' \ -X 'github.com/kubeshark/kubeshark/kubeshark.Branch=$(GIT_BRANCH)' \ @@ -32,7 +32,7 @@ build-base: ## Build kubeshark CLI binary (select the platform via GOOS / GOARCH -o bin/kubeshark_$(SUFFIX) kubeshark.go && \ cd bin && shasum -a 256 kubeshark_${SUFFIX} > kubeshark_${SUFFIX}.sha256 -build-all: ## Build kubeshark CLI for all supported platforms. +build-all: ## Build CLI for all supported platforms. echo "Compiling for every OS and Platform" && \ mkdir -p bin && sed s/_VER_/$(VER)/g RELEASE.md.TEMPLATE > bin/README.md && \ $(MAKE) build GOOS=linux GOARCH=amd64 && \ diff --git a/cmd/check.go b/cmd/check.go index 9c561cfd8..5be1bca1b 100644 --- a/cmd/check.go +++ b/cmd/check.go @@ -1,14 +1,17 @@ package cmd import ( + "fmt" + + "github.com/kubeshark/kubeshark/misc" "github.com/spf13/cobra" ) var checkCmd = &cobra.Command{ Use: "check", - Short: "Check the Kubeshark resources for potential problems", + Short: fmt.Sprintf("Check the %s resources for potential problems", misc.Software), RunE: func(cmd *cobra.Command, args []string) error { - runKubesharkCheck() + runCheck() return nil }, } diff --git a/cmd/check/imagePullInCluster.go b/cmd/check/imagePullInCluster.go index 0ff0334b2..31571ec4b 100644 --- a/cmd/check/imagePullInCluster.go +++ b/cmd/check/imagePullInCluster.go @@ -8,6 +8,7 @@ import ( "github.com/kubeshark/kubeshark/docker" "github.com/kubeshark/kubeshark/kubernetes" + "github.com/kubeshark/kubeshark/misc" "github.com/kubeshark/kubeshark/utils" "github.com/rs/zerolog/log" core "k8s.io/api/core/v1" @@ -18,7 +19,7 @@ func ImagePullInCluster(ctx context.Context, kubernetesProvider *kubernetes.Prov log.Info().Str("procedure", "image-pull-in-cluster").Msg("Checking:") namespace := "default" - podName := "kubeshark-test" + podName := fmt.Sprintf("%s-test", misc.Program) defer func() { if err := kubernetesProvider.RemovePod(ctx, namespace, podName); err != nil { @@ -40,7 +41,7 @@ func ImagePullInCluster(ctx context.Context, kubernetesProvider *kubernetes.Prov } if err := checkImagePulled(ctx, kubernetesProvider, namespace, podName); err != nil { - log.Printf("%v cluster is not able to pull kubeshark containers from docker hub, err: %v", fmt.Sprintf(utils.Red, "✗"), err) + log.Printf("%v cluster is not able to pull %s containers from docker hub, err: %v", misc.Program, fmt.Sprintf(utils.Red, "✗"), err) log.Error(). Str("namespace", namespace). Str("pod", podName). diff --git a/cmd/checkRunner.go b/cmd/checkRunner.go index 69765518b..bf20455a7 100644 --- a/cmd/checkRunner.go +++ b/cmd/checkRunner.go @@ -7,6 +7,7 @@ import ( "os" "github.com/kubeshark/kubeshark/cmd/check" + "github.com/kubeshark/kubeshark/misc" "github.com/kubeshark/kubeshark/utils" "github.com/rs/zerolog/log" ) @@ -16,8 +17,8 @@ var ( embedFS embed.FS ) -func runKubesharkCheck() { - log.Info().Msg("Checking the Kubeshark resources...") +func runCheck() { + log.Info().Msg(fmt.Sprintf("Checking the %s resources...", misc.Software)) ctx, cancel := context.WithCancel(context.Background()) defer cancel() // cancel will be called when this function exits @@ -47,9 +48,9 @@ func runKubesharkCheck() { log.Info().Msg(fmt.Sprintf(utils.Green, "All checks are passed.")) } else { log.Error(). - Str("command1", fmt.Sprintf("kubeshark %s", cleanCmd.Use)). - Str("command2", fmt.Sprintf("kubeshark %s", tapCmd.Use)). - Msg(fmt.Sprintf(utils.Red, "There are issues in your Kubeshark resources! Run these commands:")) + Str("command1", fmt.Sprintf("%s %s", misc.Program, cleanCmd.Use)). + Str("command2", fmt.Sprintf("%s %s", misc.Program, tapCmd.Use)). + Msg(fmt.Sprintf(utils.Red, fmt.Sprintf("There are issues in your %s resources! Run these commands:", misc.Software))) os.Exit(1) } } diff --git a/cmd/clean.go b/cmd/clean.go index 958775ba9..ea04490e5 100644 --- a/cmd/clean.go +++ b/cmd/clean.go @@ -1,12 +1,15 @@ package cmd import ( + "fmt" + + "github.com/kubeshark/kubeshark/misc" "github.com/spf13/cobra" ) var cleanCmd = &cobra.Command{ Use: "clean", - Short: "Removes all kubeshark resources", + Short: fmt.Sprintf("Removes all %s resources", misc.Software), RunE: func(cmd *cobra.Command, args []string) error { performCleanCommand() return nil diff --git a/cmd/cleanRunner.go b/cmd/cleanRunner.go index fa89d1d19..8d0675073 100644 --- a/cmd/cleanRunner.go +++ b/cmd/cleanRunner.go @@ -10,5 +10,5 @@ func performCleanCommand() { return } - finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace) + finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace) } diff --git a/cmd/common.go b/cmd/common.go index 7f84bf8c3..84d83a080 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -88,25 +88,25 @@ func getKubernetesProviderForCli() (*kubernetes.Provider, error) { func handleKubernetesProviderError(err error) { var clusterBehindProxyErr *kubernetes.ClusterBehindProxyError if ok := errors.As(err, &clusterBehindProxyErr); ok { - log.Error().Msg(fmt.Sprintf("Cannot establish http-proxy connection to the Kubernetes cluster. If you’re using Lens or similar tool, please run kubeshark with regular kubectl config using --%v %v=$HOME/.kube/config flag", config.SetCommandName, config.KubeConfigPathConfigName)) + log.Error().Msg(fmt.Sprintf("Cannot establish http-proxy connection to the Kubernetes cluster. If you’re using Lens or similar tool, please run '%s' with regular kubectl config using --%v %v=$HOME/.kube/config flag", misc.Program, config.SetCommandName, config.KubeConfigPathConfigName)) } else { log.Error().Err(err).Send() } } -func finishKubesharkExecution(kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, kubesharkResourcesNamespace string) { +func finishSelfExecution(kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string) { removalCtx, cancel := context.WithTimeout(context.Background(), cleanupTimeout) defer cancel() dumpLogsIfNeeded(removalCtx, kubernetesProvider) - resources.CleanUpKubesharkResources(removalCtx, cancel, kubernetesProvider, isNsRestrictedMode, kubesharkResourcesNamespace) + resources.CleanUpSelfResources(removalCtx, cancel, kubernetesProvider, isNsRestrictedMode, selfNamespace) } func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provider) { if !config.Config.DumpLogs { return } - kubesharkDir := misc.GetDotFolderPath() - filePath := path.Join(kubesharkDir, fmt.Sprintf("kubeshark_logs_%s.zip", time.Now().Format("2006_01_02__15_04_05"))) + dotDir := misc.GetDotFolderPath() + filePath := path.Join(dotDir, fmt.Sprintf("%s_logs_%s.zip", misc.Program, time.Now().Format("2006_01_02__15_04_05"))) if err := fsUtils.DumpLogs(ctx, kubernetesProvider, filePath); err != nil { log.Error().Err(err).Msg("Failed to dump logs.") } diff --git a/cmd/config.go b/cmd/config.go index 77dcacabc..15ac9bd8a 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -6,6 +6,7 @@ import ( "github.com/creasty/defaults" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/config/configStructs" + "github.com/kubeshark/kubeshark/misc" "github.com/kubeshark/kubeshark/utils" "github.com/rs/zerolog/log" "github.com/spf13/cobra" @@ -13,7 +14,7 @@ import ( var configCmd = &cobra.Command{ Use: "config", - Short: "Generate Kubeshark config with default values", + Short: fmt.Sprintf("Generate %s config with default values", misc.Software), RunE: func(cmd *cobra.Command, args []string) error { configWithDefaults, err := config.GetConfigWithDefaults() if err != nil { diff --git a/cmd/logs.go b/cmd/logs.go index 3b8e8c65b..2b0bacfd6 100644 --- a/cmd/logs.go +++ b/cmd/logs.go @@ -2,11 +2,13 @@ package cmd import ( "context" + "fmt" "github.com/creasty/defaults" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/config/configStructs" "github.com/kubeshark/kubeshark/errormessage" + "github.com/kubeshark/kubeshark/misc" "github.com/kubeshark/kubeshark/misc/fsUtils" "github.com/rs/zerolog/log" "github.com/spf13/cobra" @@ -44,5 +46,5 @@ func init() { log.Debug().Err(err).Send() } - logsCmd.Flags().StringP(configStructs.FileLogsName, "f", defaultLogsConfig.FileStr, "Path for zip file (default current \\kubeshark_logs.zip)") + logsCmd.Flags().StringP(configStructs.FileLogsName, "f", defaultLogsConfig.FileStr, fmt.Sprintf("Path for zip file (default current \\%s_logs.zip)", misc.Program)) } diff --git a/cmd/proxyRunner.go b/cmd/proxyRunner.go index ea835d67f..f6be44a98 100644 --- a/cmd/proxyRunner.go +++ b/cmd/proxyRunner.go @@ -9,6 +9,7 @@ import ( "github.com/kubeshark/kubeshark/config/configStructs" "github.com/kubeshark/kubeshark/internal/connect" "github.com/kubeshark/kubeshark/kubernetes" + "github.com/kubeshark/kubeshark/misc" "github.com/kubeshark/kubeshark/utils" "github.com/rs/zerolog/log" ) @@ -25,7 +26,7 @@ func runProxy() { exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.SelfNamespace, kubernetes.FrontServiceName) if err != nil { log.Error(). - Str("service", "kubeshark"). + Str("service", misc.Program). Err(err). Msg("Failed to found service!") cancel() @@ -35,7 +36,7 @@ func runProxy() { if !exists { log.Error(). Str("service", kubernetes.FrontServiceName). - Str("command", fmt.Sprintf("kubeshark %s", tapCmd.Use)). + Str("command", fmt.Sprintf("%s %s", misc.Program, tapCmd.Use)). Msg("Service not found! You should run the command first:") cancel() return @@ -68,7 +69,7 @@ func runProxy() { } func okToOpen(url string) { - log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Kubeshark is available at:")) + log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software))) if !config.Config.HeadlessMode { utils.OpenBrowser(url) diff --git a/cmd/root.go b/cmd/root.go index f4d7330da..8b8c4c28e 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -5,16 +5,17 @@ import ( "github.com/creasty/defaults" "github.com/kubeshark/kubeshark/config" + "github.com/kubeshark/kubeshark/misc" "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) var rootCmd = &cobra.Command{ Use: "kubeshark", - Short: "Kubeshark: The Observability and Monitoring Tool For Kubernetes", - Long: `Kubeshark: The Observability and Monitoring Tool For Kubernetes + Short: fmt.Sprintf("%s: The Observability and Monitoring Tool For Kubernetes", misc.Software), + Long: fmt.Sprintf(`%s: The Observability and Monitoring Tool For Kubernetes An extensible Kubernetes-aware network sniffer and kernel tracer. -For more info: https://kubeshark.co`, +For more info: %s`, misc.Software, misc.Website), PersistentPreRunE: func(cmd *cobra.Command, args []string) error { if err := config.InitConfig(cmd); err != nil { log.Fatal().Err(err).Send() diff --git a/cmd/tap.go b/cmd/tap.go index 7cef6ba0f..efc0454ac 100644 --- a/cmd/tap.go +++ b/cmd/tap.go @@ -2,11 +2,13 @@ package cmd import ( "errors" + "fmt" "github.com/creasty/defaults" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/config/configStructs" "github.com/kubeshark/kubeshark/errormessage" + "github.com/kubeshark/kubeshark/misc" "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -32,7 +34,7 @@ var tapCmd = &cobra.Command{ log.Info(). Str("limit", config.Config.Tap.StorageLimit). - Msg("Kubeshark will store the traffic up to a limit. Oldest entries will be removed once the limit is reached.") + Msg(fmt.Sprintf("%s will store the traffic up to a limit. Oldest entries will be removed once the limit is reached.", misc.Software)) return nil }, @@ -55,7 +57,7 @@ func init() { tapCmd.Flags().BoolP(configStructs.AllNamespacesLabel, "A", defaultTapConfig.AllNamespaces, "Tap all namespaces.") tapCmd.Flags().String(configStructs.StorageLimitLabel, defaultTapConfig.StorageLimit, "Override the default max entries db size.") tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them.") - tapCmd.Flags().StringP(configStructs.PcapLabel, "p", defaultTapConfig.Pcap, "Capture from a PCAP snapshot of Kubeshark (.tar.gz) using your Docker Daemon instead of Kubernetes.") + tapCmd.Flags().StringP(configStructs.PcapLabel, "p", defaultTapConfig.Pcap, fmt.Sprintf("Capture from a PCAP snapshot of %s (.tar.gz) using your Docker Daemon instead of Kubernetes.", misc.Software)) tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS.") tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries.") tapCmd.Flags().Bool(configStructs.DebugLabel, defaultTapConfig.Debug, "Enable the debug mode.") diff --git a/cmd/tapPcapRunner.go b/cmd/tapPcapRunner.go index df2c1d959..00a0653ff 100644 --- a/cmd/tapPcapRunner.go +++ b/cmd/tapPcapRunner.go @@ -16,6 +16,7 @@ import ( "github.com/kubeshark/kubeshark/docker" "github.com/kubeshark/kubeshark/internal/connect" "github.com/kubeshark/kubeshark/kubernetes" + "github.com/kubeshark/kubeshark/misc" "github.com/kubeshark/kubeshark/utils" "github.com/rs/zerolog/log" v1 "k8s.io/api/core/v1" @@ -127,9 +128,9 @@ func createAndStartContainers( ) { log.Info().Msg("Creating containers...") - nameFront := "kubeshark-front" - nameHub := "kubeshark-hub" - nameWorker := "kubeshark-worker" + nameFront := fmt.Sprintf("%s-front", misc.Program) + nameHub := fmt.Sprintf("%s-hub", misc.Program) + nameWorker := fmt.Sprintf("%s-worker", misc.Program) err = cleanUpOldContainers(ctx, cli, nameFront, nameHub, nameWorker) if err != nil { @@ -336,7 +337,7 @@ func pcap(tarPath string) { Msg(fmt.Sprintf(utils.Green, "Hub is available at:")) url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort) - log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Kubeshark is available at:")) + log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software))) if !config.Config.HeadlessMode { utils.OpenBrowser(url) diff --git a/cmd/tapRunner.go b/cmd/tapRunner.go index 1a505cd32..beb0c8338 100644 --- a/cmd/tapRunner.go +++ b/cmd/tapRunner.go @@ -9,6 +9,7 @@ import ( "github.com/kubeshark/kubeshark/docker" "github.com/kubeshark/kubeshark/internal/connect" + "github.com/kubeshark/kubeshark/misc" "github.com/kubeshark/kubeshark/resources" "github.com/kubeshark/kubeshark/utils" @@ -26,9 +27,9 @@ import ( const cleanupTimeout = time.Minute type tapState struct { - startTime time.Time - targetNamespaces []string - kubesharkServiceAccountExists bool + startTime time.Time + targetNamespaces []string + selfServiceAccountExists bool } var state tapState @@ -61,7 +62,7 @@ func tap() { if config.Config.IsNsRestrictedMode() { if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.SelfNamespace) { - log.Error().Msg(fmt.Sprintf("Kubeshark can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", configStructs.NamespacesLabel, config.SelfNamespaceConfigName)) + log.Error().Msg(fmt.Sprintf("%s can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", misc.Software, configStructs.NamespacesLabel, config.SelfNamespaceConfigName)) return } } @@ -76,13 +77,13 @@ func tap() { return } - log.Info().Msg("Waiting for the creation of Kubeshark resources...") - if state.kubesharkServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace, config.Config.Tap.Resources.Hub, config.Config.ImagePullPolicy(), config.Config.Tap.Debug); err != nil { + log.Info().Msg(fmt.Sprintf("Waiting for the creation of %s resources...", misc.Software)) + if state.selfServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace, config.Config.Tap.Resources.Hub, config.Config.ImagePullPolicy(), config.Config.Tap.Debug); err != nil { var statusError *k8serrors.StatusError if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) { - log.Warn().Msg("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance") + log.Warn().Msg(fmt.Sprintf("%s is already running in this namespace, change the `selfnamespace` configuration or run `%s clean` to remove the currently running %s instance", misc.Software, misc.Program, misc.Software)) } else { - defer resources.CleanUpKubesharkResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace) + defer resources.CleanUpSelfResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace) log.Error().Err(errormessage.FormatError(err)).Msg("Error creating resources!") } @@ -100,7 +101,7 @@ func tap() { } func finishTapExecution(kubernetesProvider *kubernetes.Provider) { - finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace) + finishSelfExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace) } /* @@ -124,15 +125,15 @@ func printTargettedPodsPreview(ctx context.Context, kubernetesProvider *kubernet func startWorkerSyncer(ctx context.Context, cancel context.CancelFunc, provider *kubernetes.Provider, targetNamespaces []string, startTime time.Time) error { workerSyncer, err := kubernetes.CreateAndStartWorkerSyncer(ctx, provider, kubernetes.WorkerSyncerConfig{ - TargetNamespaces: targetNamespaces, - PodFilterRegex: *config.Config.Tap.PodRegex(), - SelfNamespace: config.Config.SelfNamespace, - WorkerResources: config.Config.Tap.Resources.Worker, - ImagePullPolicy: config.Config.ImagePullPolicy(), - KubesharkServiceAccountExists: state.kubesharkServiceAccountExists, - ServiceMesh: config.Config.Tap.ServiceMesh, - Tls: config.Config.Tap.Tls, - Debug: config.Config.Tap.Debug, + TargetNamespaces: targetNamespaces, + PodFilterRegex: *config.Config.Tap.PodRegex(), + SelfNamespace: config.Config.SelfNamespace, + WorkerResources: config.Config.Tap.Resources.Worker, + ImagePullPolicy: config.Config.ImagePullPolicy(), + SelfServiceAccountExists: state.selfServiceAccountExists, + ServiceMesh: config.Config.Tap.ServiceMesh, + Tls: config.Config.Tap.Tls, + Debug: config.Config.Tap.Debug, }, startTime) if err != nil { @@ -176,7 +177,7 @@ func printNoPodsFoundSuggestion(targetNamespaces []string) { if !utils.Contains(targetNamespaces, kubernetes.K8sAllNamespaces) { suggestionStr = ". You can also try selecting a different namespace with -n or target all namespaces with -A" } - log.Warn().Msg(fmt.Sprintf("Did not find any currently running pods that match the regex argument, kubeshark will automatically target matching pods if any are created later%s", suggestionStr)) + log.Warn().Msg(fmt.Sprintf("Did not find any currently running pods that match the regex argument, %s will automatically target matching pods if any are created later%s", misc.Software, suggestionStr)) } func getK8sTapManagerErrorText(err kubernetes.K8sTapManagerError) string { @@ -436,7 +437,7 @@ func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provid startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, kubernetes.FrontServiceName, configStructs.ProxyHubPortLabel, config.Config.Tap.Proxy.Front.SrcPort, config.Config.Tap.Proxy.Front.DstPort, "") url := kubernetes.GetLocalhostOnPort(config.Config.Tap.Proxy.Front.SrcPort) - log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, "Kubeshark is available at:")) + log.Info().Str("url", url).Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("%s is available at:", misc.Software))) if !config.Config.HeadlessMode { utils.OpenBrowser(url) diff --git a/config/configStruct.go b/config/configStruct.go index 4f0a533bb..12923666d 100644 --- a/config/configStruct.go +++ b/config/configStruct.go @@ -46,7 +46,7 @@ func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy { } func (config *ConfigStruct) IsNsRestrictedMode() bool { - return config.SelfNamespace != "kubeshark" // Notice "kubeshark" string must match the default SelfNamespace + return config.SelfNamespace != misc.Program // Notice "kubeshark" string must match the default SelfNamespace } func (config *ConfigStruct) KubeConfigPath() string { diff --git a/config/configStructs/logsConfig.go b/config/configStructs/logsConfig.go index 8f100adf4..7392b267e 100644 --- a/config/configStructs/logsConfig.go +++ b/config/configStructs/logsConfig.go @@ -4,6 +4,8 @@ import ( "fmt" "os" "path" + + "github.com/kubeshark/kubeshark/misc" ) const ( @@ -18,7 +20,7 @@ func (config *LogsConfig) Validate() error { if config.FileStr == "" { _, err := os.Getwd() if err != nil { - return fmt.Errorf("failed to get PWD, %v (try using `kubeshark logs -f )`", err) + return fmt.Errorf("failed to get PWD, %v (try using `%s logs -f )`", err, misc.Program) } } @@ -28,7 +30,7 @@ func (config *LogsConfig) Validate() error { func (config *LogsConfig) FilePath() string { if config.FileStr == "" { pwd, _ := os.Getwd() - return path.Join(pwd, "kubeshark_logs.zip") + return path.Join(pwd, fmt.Sprintf("%s_logs.zip", misc.Program)) } return config.FileStr diff --git a/errormessage/errormessage.go b/errormessage/errormessage.go index a904518d5..fa7c3dff4 100644 --- a/errormessage/errormessage.go +++ b/errormessage/errormessage.go @@ -6,6 +6,7 @@ import ( regexpsyntax "regexp/syntax" "github.com/kubeshark/kubeshark/config" + "github.com/kubeshark/kubeshark/misc" k8serrors "k8s.io/apimachinery/pkg/api/errors" ) @@ -16,9 +17,10 @@ func FormatError(err error) error { var errorNew error if k8serrors.IsForbidden(err) { errorNew = fmt.Errorf("insufficient permissions: %w. "+ - "supply the required permission or control Kubeshark's access to namespaces by setting %s "+ + "supply the required permission or control %s's access to namespaces by setting %s "+ "in the config file or setting the targetted namespace with --%s %s=", err, + misc.Software, config.SelfNamespaceConfigName, config.SetCommandName, config.SelfNamespaceConfigName) diff --git a/kubernetes/consts.go b/kubernetes/consts.go index 0c32f342d..7a221ef36 100644 --- a/kubernetes/consts.go +++ b/kubernetes/consts.go @@ -1,26 +1,25 @@ package kubernetes const ( - KubesharkResourcesPrefix = "kubeshark-" - FrontPodName = KubesharkResourcesPrefix + "front" + SelfResourcesPrefix = "kubeshark-" + FrontPodName = SelfResourcesPrefix + "front" FrontServiceName = FrontPodName - HubPodName = KubesharkResourcesPrefix + "hub" + HubPodName = SelfResourcesPrefix + "hub" HubServiceName = HubPodName - ClusterRoleBindingName = KubesharkResourcesPrefix + "cluster-role-binding" - ClusterRoleName = KubesharkResourcesPrefix + "cluster-role" + ClusterRoleBindingName = SelfResourcesPrefix + "cluster-role-binding" + ClusterRoleName = SelfResourcesPrefix + "cluster-role" K8sAllNamespaces = "" - RoleBindingName = KubesharkResourcesPrefix + "role-binding" - RoleName = KubesharkResourcesPrefix + "role" - ServiceAccountName = KubesharkResourcesPrefix + "service-account" - WorkerDaemonSetName = KubesharkResourcesPrefix + "worker-daemon-set" - WorkerPodName = KubesharkResourcesPrefix + "worker" - ConfigMapName = KubesharkResourcesPrefix + "config" + RoleBindingName = SelfResourcesPrefix + "role-binding" + RoleName = SelfResourcesPrefix + "role" + ServiceAccountName = SelfResourcesPrefix + "service-account" + WorkerDaemonSetName = SelfResourcesPrefix + "worker-daemon-set" + WorkerPodName = SelfResourcesPrefix + "worker" + ConfigMapName = SelfResourcesPrefix + "config" MinKubernetesServerVersion = "1.16.0" ) const ( - LabelPrefixApp = "app.kubernetes.io/" - LabelManagedBy = LabelPrefixApp + "managed-by" - LabelCreatedBy = LabelPrefixApp + "created-by" - LabelValueKubeshark = "kubeshark" + LabelPrefixApp = "app.kubernetes.io/" + LabelManagedBy = LabelPrefixApp + "managed-by" + LabelCreatedBy = LabelPrefixApp + "created-by" ) diff --git a/kubernetes/provider.go b/kubernetes/provider.go index 778a3cde9..e751aa5c4 100644 --- a/kubernetes/provider.go +++ b/kubernetes/provider.go @@ -11,6 +11,7 @@ import ( "regexp" "github.com/kubeshark/kubeshark/docker" + "github.com/kubeshark/kubeshark/misc" "github.com/kubeshark/kubeshark/semver" "github.com/rs/zerolog/log" auth "k8s.io/api/authorization/v1" @@ -56,21 +57,21 @@ func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) { if err != nil { if clientcmd.IsEmptyConfig(err) { return nil, fmt.Errorf("couldn't find the kube config file, or file is empty (%s)\n"+ - "you can set alternative kube config file path by adding the kube-config-path field to the kubeshark config file, err: %w", kubeConfigPath, err) + "you can set alternative kube config file path by adding the kube-config-path field to the %s config file, err: %w", kubeConfigPath, misc.Program, err) } if clientcmd.IsConfigurationInvalid(err) { return nil, fmt.Errorf("invalid kube config file (%s)\n"+ - "you can set alternative kube config file path by adding the kube-config-path field to the kubeshark config file, err: %w", kubeConfigPath, err) + "you can set alternative kube config file path by adding the kube-config-path field to the %s config file, err: %w", kubeConfigPath, misc.Program, err) } return nil, fmt.Errorf("error while using kube config (%s)\n"+ - "you can set alternative kube config file path by adding the kube-config-path field to the kubeshark config file, err: %w", kubeConfigPath, err) + "you can set alternative kube config file path by adding the kube-config-path field to the %s config file, err: %w", kubeConfigPath, misc.Program, err) } clientSet, err := getClientSet(restClientConfig) if err != nil { return nil, fmt.Errorf("error while using kube config (%s)\n"+ - "you can set alternative kube config file path by adding the kube-config-path field to the kubeshark config file, err: %w", kubeConfigPath, err) + "you can set alternative kube config file path by adding the kube-config-path field to the %s config file, err: %w", kubeConfigPath, misc.Program, err) } log.Debug(). @@ -83,8 +84,8 @@ func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) { clientSet: clientSet, kubernetesConfig: kubernetesConfig, clientConfig: *restClientConfig, - managedBy: LabelValueKubeshark, - createdBy: LabelValueKubeshark, + managedBy: misc.Program, + createdBy: misc.Program, }, nil } @@ -103,14 +104,14 @@ func NewProviderInCluster() (*Provider, error) { clientSet: clientSet, kubernetesConfig: nil, // not relevant in cluster clientConfig: *restClientConfig, - managedBy: LabelValueKubeshark, - createdBy: LabelValueKubeshark, + managedBy: misc.Program, + createdBy: misc.Program, }, nil } func (provider *Provider) CurrentNamespace() (string, error) { if provider.kubernetesConfig == nil { - return "", errors.New("kubernetesConfig is nil, kubeshark cli will not work with in-cluster kubernetes config, use a kubeconfig file when initializing the Provider") + return "", errors.New("kubernetesConfig is nil, The CLI will not work with in-cluster kubernetes config, use a kubeconfig file when initializing the Provider") } ns, _, err := provider.kubernetesConfig.Namespace() return ns, err @@ -466,14 +467,14 @@ func (provider *Provider) doesResourceExist(resource interface{}, err error) (bo return resource != nil, nil } -func (provider *Provider) CreateKubesharkRBAC(ctx context.Context, namespace string, serviceAccountName string, clusterRoleName string, clusterRoleBindingName string, version string, resources []string) error { +func (provider *Provider) CreateSelfRBAC(ctx context.Context, namespace string, serviceAccountName string, clusterRoleName string, clusterRoleBindingName string, version string, resources []string) error { serviceAccount := &core.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: serviceAccountName, Labels: map[string]string{ - "kubeshark-cli-version": version, - LabelManagedBy: provider.managedBy, - LabelCreatedBy: provider.createdBy, + fmt.Sprintf("%s-cli-version", misc.Program): version, + LabelManagedBy: provider.managedBy, + LabelCreatedBy: provider.createdBy, }, }, } @@ -481,9 +482,9 @@ func (provider *Provider) CreateKubesharkRBAC(ctx context.Context, namespace str ObjectMeta: metav1.ObjectMeta{ Name: clusterRoleName, Labels: map[string]string{ - "kubeshark-cli-version": version, - LabelManagedBy: provider.managedBy, - LabelCreatedBy: provider.createdBy, + fmt.Sprintf("%s-cli-version", misc.Program): version, + LabelManagedBy: provider.managedBy, + LabelCreatedBy: provider.createdBy, }, }, Rules: []rbac.PolicyRule{ @@ -498,9 +499,9 @@ func (provider *Provider) CreateKubesharkRBAC(ctx context.Context, namespace str ObjectMeta: metav1.ObjectMeta{ Name: clusterRoleBindingName, Labels: map[string]string{ - "kubeshark-cli-version": version, - LabelManagedBy: provider.managedBy, - LabelCreatedBy: provider.createdBy, + fmt.Sprintf("%s-cli-version", misc.Program): version, + LabelManagedBy: provider.managedBy, + LabelCreatedBy: provider.createdBy, }, }, RoleRef: rbac.RoleRef{ @@ -531,14 +532,14 @@ func (provider *Provider) CreateKubesharkRBAC(ctx context.Context, namespace str return nil } -func (provider *Provider) CreateKubesharkRBACNamespaceRestricted(ctx context.Context, namespace string, serviceAccountName string, roleName string, roleBindingName string, version string) error { +func (provider *Provider) CreateSelfRBACNamespaceRestricted(ctx context.Context, namespace string, serviceAccountName string, roleName string, roleBindingName string, version string) error { serviceAccount := &core.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: serviceAccountName, Labels: map[string]string{ - "kubeshark-cli-version": version, - LabelManagedBy: provider.managedBy, - LabelCreatedBy: provider.createdBy, + fmt.Sprintf("%s-cli-version", misc.Program): version, + LabelManagedBy: provider.managedBy, + LabelCreatedBy: provider.createdBy, }, }, } @@ -546,9 +547,9 @@ func (provider *Provider) CreateKubesharkRBACNamespaceRestricted(ctx context.Con ObjectMeta: metav1.ObjectMeta{ Name: roleName, Labels: map[string]string{ - "kubeshark-cli-version": version, - LabelManagedBy: provider.managedBy, - LabelCreatedBy: provider.createdBy, + fmt.Sprintf("%s-cli-version", misc.Program): version, + LabelManagedBy: provider.managedBy, + LabelCreatedBy: provider.createdBy, }, }, Rules: []rbac.PolicyRule{ @@ -563,9 +564,9 @@ func (provider *Provider) CreateKubesharkRBACNamespaceRestricted(ctx context.Con ObjectMeta: metav1.ObjectMeta{ Name: roleBindingName, Labels: map[string]string{ - "kubeshark-cli-version": version, - LabelManagedBy: provider.managedBy, - LabelCreatedBy: provider.createdBy, + fmt.Sprintf("%s-cli-version", misc.Program): version, + LabelManagedBy: provider.managedBy, + LabelCreatedBy: provider.createdBy, }, }, RoleRef: rbac.RoleRef{ @@ -840,7 +841,7 @@ func (provider *Provider) ResetWorkerDaemonSet(ctx context.Context, namespace st workerContainer.WithImage(podImage) nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement() - nodeSelectorRequirement.WithKey("kubeshark-non-existing-label") + nodeSelectorRequirement.WithKey(fmt.Sprintf("%s-non-existing-label", misc.Program)) nodeSelectorRequirement.WithOperator(core.NodeSelectorOpExists) nodeSelectorTerm := applyconfcore.NodeSelectorTerm() nodeSelectorTerm.WithMatchExpressions(nodeSelectorRequirement) diff --git a/kubernetes/proxy.go b/kubernetes/proxy.go index 36a43e948..9d39aa5c1 100644 --- a/kubernetes/proxy.go +++ b/kubernetes/proxy.go @@ -19,12 +19,12 @@ import ( ) const k8sProxyApiPrefix = "/" -const kubesharkServicePort = 80 +const selfServicePort = 80 -func StartProxy(kubernetesProvider *Provider, proxyHost string, srcPort uint16, kubesharkNamespace string, kubesharkServiceName string, cancel context.CancelFunc) (*http.Server, error) { +func StartProxy(kubernetesProvider *Provider, proxyHost string, srcPort uint16, selfNamespace string, selfServiceName string, cancel context.CancelFunc) (*http.Server, error) { log.Info(). - Str("namespace", kubesharkNamespace). - Str("service", kubesharkServiceName). + Str("namespace", selfNamespace). + Str("service", selfServiceName). Int("src-port", int(srcPort)). Msg("Starting proxy...") @@ -40,8 +40,8 @@ func StartProxy(kubernetesProvider *Provider, proxyHost string, srcPort uint16, return nil, err } mux := http.NewServeMux() - mux.Handle(k8sProxyApiPrefix, getRerouteHttpHandlerKubesharkAPI(proxyHandler, kubesharkNamespace, kubesharkServiceName)) - mux.Handle("/static/", getRerouteHttpHandlerKubesharkStatic(proxyHandler, kubesharkNamespace, kubesharkServiceName)) + mux.Handle(k8sProxyApiPrefix, getRerouteHttpHandlerSelfAPI(proxyHandler, selfNamespace, selfServiceName)) + mux.Handle("/static/", getRerouteHttpHandlerSelfStatic(proxyHandler, selfNamespace, selfServiceName)) l, err := net.Listen("tcp", fmt.Sprintf("%s:%d", proxyHost, int(srcPort))) if err != nil { @@ -62,15 +62,15 @@ func StartProxy(kubernetesProvider *Provider, proxyHost string, srcPort uint16, return server, nil } -func getKubesharkHubProxiedHostAndPath(kubesharkNamespace string, kubesharkServiceName string) string { - return fmt.Sprintf("/api/v1/namespaces/%s/services/%s:%d/proxy", kubesharkNamespace, kubesharkServiceName, kubesharkServicePort) +func getSelfHubProxiedHostAndPath(selfNamespace string, selfServiceName string) string { + return fmt.Sprintf("/api/v1/namespaces/%s/services/%s:%d/proxy", selfNamespace, selfServiceName, selfServicePort) } func GetLocalhostOnPort(port uint16) string { return fmt.Sprintf("http://localhost:%d", port) } -func getRerouteHttpHandlerKubesharkAPI(proxyHandler http.Handler, kubesharkNamespace string, kubesharkServiceName string) http.Handler { +func getRerouteHttpHandlerSelfAPI(proxyHandler http.Handler, selfNamespace string, selfServiceName string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Credentials", "true") @@ -82,19 +82,19 @@ func getRerouteHttpHandlerKubesharkAPI(proxyHandler http.Handler, kubesharkNames return } - proxiedPath := getKubesharkHubProxiedHostAndPath(kubesharkNamespace, kubesharkServiceName) + proxiedPath := getSelfHubProxiedHostAndPath(selfNamespace, selfServiceName) //avoid redirecting several times if !strings.Contains(r.URL.Path, proxiedPath) { - r.URL.Path = fmt.Sprintf("%s%s", getKubesharkHubProxiedHostAndPath(kubesharkNamespace, kubesharkServiceName), r.URL.Path) + r.URL.Path = fmt.Sprintf("%s%s", getSelfHubProxiedHostAndPath(selfNamespace, selfServiceName), r.URL.Path) } proxyHandler.ServeHTTP(w, r) }) } -func getRerouteHttpHandlerKubesharkStatic(proxyHandler http.Handler, kubesharkNamespace string, kubesharkServiceName string) http.Handler { +func getRerouteHttpHandlerSelfStatic(proxyHandler http.Handler, selfNamespace string, selfServiceName string) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - r.URL.Path = strings.Replace(r.URL.Path, "/static/", fmt.Sprintf("%s/static/", getKubesharkHubProxiedHostAndPath(kubesharkNamespace, kubesharkServiceName)), 1) + r.URL.Path = strings.Replace(r.URL.Path, "/static/", fmt.Sprintf("%s/static/", getSelfHubProxiedHostAndPath(selfNamespace, selfServiceName)), 1) proxyHandler.ServeHTTP(w, r) }) } diff --git a/kubernetes/utils.go b/kubernetes/utils.go index b23feb37c..1fe11e7ed 100644 --- a/kubernetes/utils.go +++ b/kubernetes/utils.go @@ -49,16 +49,16 @@ func getMinimizedContainerStatuses(fullPod core.Pod) []core.ContainerStatus { } func excludeSelfPods(pods []core.Pod) []core.Pod { - kubesharkPrefixRegex := regexp.MustCompile("^" + KubesharkResourcesPrefix) + selfPrefixRegex := regexp.MustCompile("^" + SelfResourcesPrefix) - nonKubesharkPods := make([]core.Pod, 0) + nonSelfPods := make([]core.Pod, 0) for _, pod := range pods { - if !kubesharkPrefixRegex.MatchString(pod.Name) { - nonKubesharkPods = append(nonKubesharkPods, pod) + if !selfPrefixRegex.MatchString(pod.Name) { + nonSelfPods = append(nonSelfPods, pod) } } - return nonKubesharkPods + return nonSelfPods } func getPodArrayDiff(oldPods []core.Pod, newPods []core.Pod) (added []core.Pod, removed []core.Pod) { diff --git a/kubernetes/workerSyncer.go b/kubernetes/workerSyncer.go index 67b964c77..49ea4c464 100644 --- a/kubernetes/workerSyncer.go +++ b/kubernetes/workerSyncer.go @@ -9,6 +9,7 @@ import ( "github.com/kubeshark/base/pkg/models" "github.com/kubeshark/kubeshark/debounce" "github.com/kubeshark/kubeshark/docker" + "github.com/kubeshark/kubeshark/misc" "github.com/kubeshark/kubeshark/utils" "github.com/rs/zerolog/log" v1 "k8s.io/api/core/v1" @@ -36,15 +37,15 @@ type WorkerSyncer struct { } type WorkerSyncerConfig struct { - TargetNamespaces []string - PodFilterRegex regexp.Regexp - SelfNamespace string - WorkerResources Resources - ImagePullPolicy v1.PullPolicy - KubesharkServiceAccountExists bool - ServiceMesh bool - Tls bool - Debug bool + TargetNamespaces []string + PodFilterRegex regexp.Regexp + SelfNamespace string + WorkerResources Resources + ImagePullPolicy v1.PullPolicy + SelfServiceAccountExists bool + ServiceMesh bool + Tls bool + Debug bool } func CreateAndStartWorkerSyncer(ctx context.Context, kubernetesProvider *Provider, config WorkerSyncerConfig, startTime time.Time) (*WorkerSyncer, error) { @@ -74,8 +75,8 @@ func CreateAndStartWorkerSyncer(ctx context.Context, kubernetesProvider *Provide } func (workerSyncer *WorkerSyncer) watchWorkerPods() { - kubesharkResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", WorkerPodName)) - podWatchHelper := NewPodWatchHelper(workerSyncer.kubernetesProvider, kubesharkResourceRegex) + selfResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", WorkerPodName)) + podWatchHelper := NewPodWatchHelper(workerSyncer.kubernetesProvider, selfResourceRegex) eventChan, errorChan := FilteredWatch(workerSyncer.context, podWatchHelper, []string{workerSyncer.config.SelfNamespace}, podWatchHelper) for { @@ -88,7 +89,7 @@ func (workerSyncer *WorkerSyncer) watchWorkerPods() { pod, err := wEvent.ToPod() if err != nil { - log.Error().Str("pod", WorkerPodName).Err(err).Msg("While parsing Kubeshark resource!") + log.Error().Str("pod", WorkerPodName).Err(err).Msg(fmt.Sprintf("While parsing %s resource!", misc.Software)) continue } @@ -118,8 +119,8 @@ func (workerSyncer *WorkerSyncer) watchWorkerPods() { } func (workerSyncer *WorkerSyncer) watchWorkerEvents() { - kubesharkResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", WorkerPodName)) - eventWatchHelper := NewEventWatchHelper(workerSyncer.kubernetesProvider, kubesharkResourceRegex, "pod") + selfResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", WorkerPodName)) + eventWatchHelper := NewEventWatchHelper(workerSyncer.kubernetesProvider, selfResourceRegex, "pod") eventChan, errorChan := FilteredWatch(workerSyncer.context, eventWatchHelper, []string{workerSyncer.config.SelfNamespace}, eventWatchHelper) for { @@ -341,7 +342,7 @@ func (workerSyncer *WorkerSyncer) updateWorkers() error { if len(workerSyncer.nodeToTargettedPodMap) > 0 { var serviceAccountName string - if workerSyncer.config.KubesharkServiceAccountExists { + if workerSyncer.config.SelfServiceAccountExists { serviceAccountName = ServiceAccountName } else { serviceAccountName = "" diff --git a/misc/consts.go b/misc/consts.go index 11a7cb7bd..eaf10237c 100644 --- a/misc/consts.go +++ b/misc/consts.go @@ -1,11 +1,15 @@ package misc import ( + "fmt" "os" "path" ) var ( + Software = "Kubeshark" + Program = "kubeshark" + Website = "https://kubeshark.co" Ver = "0.0" Branch = "develop" GitCommitHash = "" // this var is overridden using ldflags in makefile when building @@ -19,5 +23,5 @@ func GetDotFolderPath() string { if homeDirErr != nil { return "" } - return path.Join(home, ".kubeshark") + return path.Join(home, fmt.Sprintf(".%s", Program)) } diff --git a/misc/fsUtils/kubesharkLogsUtils.go b/misc/fsUtils/kubesharkLogsUtils.go index ea252275f..bc0728f1a 100644 --- a/misc/fsUtils/kubesharkLogsUtils.go +++ b/misc/fsUtils/kubesharkLogsUtils.go @@ -9,18 +9,19 @@ import ( "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/kubernetes" + "github.com/kubeshark/kubeshark/misc" "github.com/rs/zerolog/log" ) func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath string) error { - podExactRegex := regexp.MustCompile("^" + kubernetes.KubesharkResourcesPrefix) + podExactRegex := regexp.MustCompile("^" + kubernetes.SelfResourcesPrefix) pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.SelfNamespace}) if err != nil { return err } if len(pods) == 0 { - return fmt.Errorf("no kubeshark pods found in namespace %s", config.Config.SelfNamespace) + return fmt.Errorf("No %s pods found in namespace %s", misc.Software, config.Config.SelfNamespace) } newZipFile, err := os.Create(filePath) diff --git a/misc/version/versionCheck.go b/misc/version/versionCheck.go index 33a9a9498..d3b0405e8 100644 --- a/misc/version/versionCheck.go +++ b/misc/version/versionCheck.go @@ -21,7 +21,7 @@ func CheckNewerVersion() { log.Info().Msg("Checking for a newer version...") start := time.Now() client := github.NewClient(nil) - latestRelease, _, err := client.Repositories.GetLatestRelease(context.Background(), "kubeshark", "kubeshark") + latestRelease, _, err := client.Repositories.GetLatestRelease(context.Background(), misc.Program, misc.Program) if err != nil { log.Error().Msg("Failed to get latest release.") return @@ -72,9 +72,9 @@ func CheckNewerVersion() { if greater { var downloadCommand string if runtime.GOOS == "windows" { - downloadCommand = fmt.Sprintf("curl -LO %v/kubeshark.exe", strings.Replace(*latestRelease.HTMLURL, "tag", "download", 1)) + downloadCommand = fmt.Sprintf("curl -LO %v/%s.exe", strings.Replace(*latestRelease.HTMLURL, "tag", "download", 1), misc.Program) } else { - downloadCommand = "sh <(curl -Ls https://kubeshark.co/install)" + downloadCommand = fmt.Sprintf("sh <(curl -Ls %s/install)", misc.Website) } msg := fmt.Sprintf("Update available! %v -> %v run:", misc.Ver, gitHubVersion) log.Warn().Str("command", downloadCommand).Msg(fmt.Sprintf(utils.Yellow, msg)) diff --git a/resources/cleanResources.go b/resources/cleanResources.go index 19db3cf77..46de640bc 100644 --- a/resources/cleanResources.go +++ b/resources/cleanResources.go @@ -6,20 +6,21 @@ import ( "github.com/kubeshark/kubeshark/errormessage" "github.com/kubeshark/kubeshark/kubernetes" + "github.com/kubeshark/kubeshark/misc" "github.com/kubeshark/kubeshark/utils" "github.com/rs/zerolog/log" "k8s.io/apimachinery/pkg/util/wait" ) -func CleanUpKubesharkResources(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, kubesharkResourcesNamespace string) { - log.Warn().Msg("Removing Kubeshark resources...") +func CleanUpSelfResources(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfResourcesNamespace string) { + log.Warn().Msg(fmt.Sprintf("Removing %s resources...", misc.Software)) var leftoverResources []string if isNsRestrictedMode { - leftoverResources = cleanUpRestrictedMode(ctx, kubernetesProvider, kubesharkResourcesNamespace) + leftoverResources = cleanUpRestrictedMode(ctx, kubernetesProvider, selfResourcesNamespace) } else { - leftoverResources = cleanUpNonRestrictedMode(ctx, cancel, kubernetesProvider, kubesharkResourcesNamespace) + leftoverResources = cleanUpNonRestrictedMode(ctx, cancel, kubernetesProvider, selfResourcesNamespace) } if len(leftoverResources) > 0 { @@ -31,14 +32,14 @@ func CleanUpKubesharkResources(ctx context.Context, cancel context.CancelFunc, k } } -func cleanUpNonRestrictedMode(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, kubesharkResourcesNamespace string) []string { +func cleanUpNonRestrictedMode(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) []string { leftoverResources := make([]string, 0) - if err := kubernetesProvider.RemoveNamespace(ctx, kubesharkResourcesNamespace); err != nil { - resourceDesc := fmt.Sprintf("Namespace %s", kubesharkResourcesNamespace) + if err := kubernetesProvider.RemoveNamespace(ctx, selfResourcesNamespace); err != nil { + resourceDesc := fmt.Sprintf("Namespace %s", selfResourcesNamespace) handleDeletionError(err, resourceDesc, &leftoverResources) } else { - defer waitUntilNamespaceDeleted(ctx, cancel, kubernetesProvider, kubesharkResourcesNamespace) + defer waitUntilNamespaceDeleted(ctx, cancel, kubernetesProvider, selfResourcesNamespace) } if resources, err := kubernetesProvider.ListManagedClusterRoles(ctx); err != nil { @@ -68,98 +69,98 @@ func cleanUpNonRestrictedMode(ctx context.Context, cancel context.CancelFunc, ku return leftoverResources } -func waitUntilNamespaceDeleted(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, kubesharkResourcesNamespace string) { +func waitUntilNamespaceDeleted(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) { // Call cancel if a terminating signal was received. Allows user to skip the wait. go func() { utils.WaitForTermination(ctx, cancel) }() - if err := kubernetesProvider.WaitUtilNamespaceDeleted(ctx, kubesharkResourcesNamespace); err != nil { + if err := kubernetesProvider.WaitUtilNamespaceDeleted(ctx, selfResourcesNamespace); err != nil { switch { case ctx.Err() == context.Canceled: log.Printf("Do nothing. User interrupted the wait") log.Warn(). - Str("namespace", kubesharkResourcesNamespace). + Str("namespace", selfResourcesNamespace). Msg("Did nothing. User interrupted the wait.") case err == wait.ErrWaitTimeout: log.Warn(). - Str("namespace", kubesharkResourcesNamespace). + Str("namespace", selfResourcesNamespace). Msg("Timed out while deleting the namespace.") default: log.Warn(). Err(errormessage.FormatError(err)). - Str("namespace", kubesharkResourcesNamespace). + Str("namespace", selfResourcesNamespace). Msg("Unknown error while deleting the namespace.") } } } -func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.Provider, kubesharkResourcesNamespace string) []string { +func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfResourcesNamespace string) []string { leftoverResources := make([]string, 0) - if err := kubernetesProvider.RemoveService(ctx, kubesharkResourcesNamespace, kubernetes.FrontServiceName); err != nil { - resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.FrontServiceName, kubesharkResourcesNamespace) + if err := kubernetesProvider.RemoveService(ctx, selfResourcesNamespace, kubernetes.FrontServiceName); err != nil { + resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.FrontServiceName, selfResourcesNamespace) handleDeletionError(err, resourceDesc, &leftoverResources) } - if err := kubernetesProvider.RemoveService(ctx, kubesharkResourcesNamespace, kubernetes.HubServiceName); err != nil { - resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.HubServiceName, kubesharkResourcesNamespace) + if err := kubernetesProvider.RemoveService(ctx, selfResourcesNamespace, kubernetes.HubServiceName); err != nil { + resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.HubServiceName, selfResourcesNamespace) handleDeletionError(err, resourceDesc, &leftoverResources) } - if err := kubernetesProvider.RemoveDaemonSet(ctx, kubesharkResourcesNamespace, kubernetes.WorkerDaemonSetName); err != nil { - resourceDesc := fmt.Sprintf("DaemonSet %s in namespace %s", kubernetes.WorkerDaemonSetName, kubesharkResourcesNamespace) + if err := kubernetesProvider.RemoveDaemonSet(ctx, selfResourcesNamespace, kubernetes.WorkerDaemonSetName); err != nil { + resourceDesc := fmt.Sprintf("DaemonSet %s in namespace %s", kubernetes.WorkerDaemonSetName, selfResourcesNamespace) handleDeletionError(err, resourceDesc, &leftoverResources) } - if err := kubernetesProvider.RemoveConfigMap(ctx, kubesharkResourcesNamespace, kubernetes.ConfigMapName); err != nil { - resourceDesc := fmt.Sprintf("ConfigMap %s in namespace %s", kubernetes.ConfigMapName, kubesharkResourcesNamespace) + if err := kubernetesProvider.RemoveConfigMap(ctx, selfResourcesNamespace, kubernetes.ConfigMapName); err != nil { + resourceDesc := fmt.Sprintf("ConfigMap %s in namespace %s", kubernetes.ConfigMapName, selfResourcesNamespace) handleDeletionError(err, resourceDesc, &leftoverResources) } - if resources, err := kubernetesProvider.ListManagedServiceAccounts(ctx, kubesharkResourcesNamespace); err != nil { - resourceDesc := fmt.Sprintf("ServiceAccounts in namespace %s", kubesharkResourcesNamespace) + if resources, err := kubernetesProvider.ListManagedServiceAccounts(ctx, selfResourcesNamespace); err != nil { + resourceDesc := fmt.Sprintf("ServiceAccounts in namespace %s", selfResourcesNamespace) handleDeletionError(err, resourceDesc, &leftoverResources) } else { for _, resource := range resources.Items { - if err := kubernetesProvider.RemoveServiceAccount(ctx, kubesharkResourcesNamespace, resource.Name); err != nil { - resourceDesc := fmt.Sprintf("ServiceAccount %s in namespace %s", resource.Name, kubesharkResourcesNamespace) + if err := kubernetesProvider.RemoveServiceAccount(ctx, selfResourcesNamespace, resource.Name); err != nil { + resourceDesc := fmt.Sprintf("ServiceAccount %s in namespace %s", resource.Name, selfResourcesNamespace) handleDeletionError(err, resourceDesc, &leftoverResources) } } } - if resources, err := kubernetesProvider.ListManagedRoles(ctx, kubesharkResourcesNamespace); err != nil { - resourceDesc := fmt.Sprintf("Roles in namespace %s", kubesharkResourcesNamespace) + if resources, err := kubernetesProvider.ListManagedRoles(ctx, selfResourcesNamespace); err != nil { + resourceDesc := fmt.Sprintf("Roles in namespace %s", selfResourcesNamespace) handleDeletionError(err, resourceDesc, &leftoverResources) } else { for _, resource := range resources.Items { - if err := kubernetesProvider.RemoveRole(ctx, kubesharkResourcesNamespace, resource.Name); err != nil { - resourceDesc := fmt.Sprintf("Role %s in namespace %s", resource.Name, kubesharkResourcesNamespace) + if err := kubernetesProvider.RemoveRole(ctx, selfResourcesNamespace, resource.Name); err != nil { + resourceDesc := fmt.Sprintf("Role %s in namespace %s", resource.Name, selfResourcesNamespace) handleDeletionError(err, resourceDesc, &leftoverResources) } } } - if resources, err := kubernetesProvider.ListManagedRoleBindings(ctx, kubesharkResourcesNamespace); err != nil { - resourceDesc := fmt.Sprintf("RoleBindings in namespace %s", kubesharkResourcesNamespace) + if resources, err := kubernetesProvider.ListManagedRoleBindings(ctx, selfResourcesNamespace); err != nil { + resourceDesc := fmt.Sprintf("RoleBindings in namespace %s", selfResourcesNamespace) handleDeletionError(err, resourceDesc, &leftoverResources) } else { for _, resource := range resources.Items { - if err := kubernetesProvider.RemoveRoleBinding(ctx, kubesharkResourcesNamespace, resource.Name); err != nil { - resourceDesc := fmt.Sprintf("RoleBinding %s in namespace %s", resource.Name, kubesharkResourcesNamespace) + if err := kubernetesProvider.RemoveRoleBinding(ctx, selfResourcesNamespace, resource.Name); err != nil { + resourceDesc := fmt.Sprintf("RoleBinding %s in namespace %s", resource.Name, selfResourcesNamespace) handleDeletionError(err, resourceDesc, &leftoverResources) } } } - if err := kubernetesProvider.RemovePod(ctx, kubesharkResourcesNamespace, kubernetes.HubPodName); err != nil { - resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.HubPodName, kubesharkResourcesNamespace) + if err := kubernetesProvider.RemovePod(ctx, selfResourcesNamespace, kubernetes.HubPodName); err != nil { + resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.HubPodName, selfResourcesNamespace) handleDeletionError(err, resourceDesc, &leftoverResources) } - if err := kubernetesProvider.RemovePod(ctx, kubesharkResourcesNamespace, kubernetes.FrontPodName); err != nil { - resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.FrontPodName, kubesharkResourcesNamespace) + if err := kubernetesProvider.RemovePod(ctx, selfResourcesNamespace, kubernetes.FrontPodName); err != nil { + resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.FrontPodName, selfResourcesNamespace) handleDeletionError(err, resourceDesc, &leftoverResources) } diff --git a/resources/createResources.go b/resources/createResources.go index 35d6fcab6..11925e57b 100644 --- a/resources/createResources.go +++ b/resources/createResources.go @@ -2,6 +2,7 @@ package resources import ( "context" + "fmt" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/docker" @@ -19,13 +20,13 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov } } - kubesharkServiceAccountExists, err := createRBACIfNecessary(ctx, kubernetesProvider, isNsRestrictedMode, selfNamespace, []string{"pods", "services", "endpoints"}) + selfServiceAccountExists, err := createRBACIfNecessary(ctx, kubernetesProvider, isNsRestrictedMode, selfNamespace, []string{"pods", "services", "endpoints"}) if err != nil { - log.Warn().Err(errormessage.FormatError(err)).Msg("Failed to ensure the resources required for IP resolving. Kubeshark will not resolve target IPs to names.") + log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Failed to ensure the resources required for IP resolving. %s will not resolve target IPs to names.", misc.Software)) } var serviceAccountName string - if kubesharkServiceAccountExists { + if selfServiceAccountExists { serviceAccountName = kubernetes.ServiceAccountName } else { serviceAccountName = "" @@ -51,30 +52,30 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov Debug: debug, } - if err := createKubesharkHubPod(ctx, kubernetesProvider, opts); err != nil { - return kubesharkServiceAccountExists, err + if err := createSelfHubPod(ctx, kubernetesProvider, opts); err != nil { + return selfServiceAccountExists, err } if err := createFrontPod(ctx, kubernetesProvider, frontOpts); err != nil { - return kubesharkServiceAccountExists, err + return selfServiceAccountExists, err } // TODO: Why the port values need to be 80? _, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetes.HubServiceName, kubernetes.HubServiceName, 80, 80) if err != nil { - return kubesharkServiceAccountExists, err + return selfServiceAccountExists, err } log.Info().Str("service", kubernetes.HubServiceName).Msg("Successfully created a service.") _, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetes.FrontServiceName, kubernetes.FrontServiceName, 80, int32(config.Config.Tap.Proxy.Front.DstPort)) if err != nil { - return kubesharkServiceAccountExists, err + return selfServiceAccountExists, err } log.Info().Str("service", kubernetes.FrontServiceName).Msg("Successfully created a service.") - return kubesharkServiceAccountExists, nil + return selfServiceAccountExists, nil } func createSelfNamespace(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfNamespace string) error { @@ -84,11 +85,11 @@ func createSelfNamespace(ctx context.Context, kubernetesProvider *kubernetes.Pro func createRBACIfNecessary(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, resources []string) (bool, error) { if !isNsRestrictedMode { - if err := kubernetesProvider.CreateKubesharkRBAC(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.ClusterRoleName, kubernetes.ClusterRoleBindingName, misc.RBACVersion, resources); err != nil { + if err := kubernetesProvider.CreateSelfRBAC(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.ClusterRoleName, kubernetes.ClusterRoleBindingName, misc.RBACVersion, resources); err != nil { return false, err } } else { - if err := kubernetesProvider.CreateKubesharkRBACNamespaceRestricted(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.RoleName, kubernetes.RoleBindingName, misc.RBACVersion); err != nil { + if err := kubernetesProvider.CreateSelfRBACNamespaceRestricted(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.RoleName, kubernetes.RoleBindingName, misc.RBACVersion); err != nil { return false, err } } @@ -96,7 +97,7 @@ func createRBACIfNecessary(ctx context.Context, kubernetesProvider *kubernetes.P return true, nil } -func createKubesharkHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error { +func createSelfHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.PodOptions) error { pod, err := kubernetesProvider.BuildHubPod(opts) if err != nil { return err