diff --git a/cmd/check/kubernetesPermissions.go b/cmd/check/kubernetesPermissions.go index 4a85f41f8..c4052b6ee 100644 --- a/cmd/check/kubernetesPermissions.go +++ b/cmd/check/kubernetesPermissions.go @@ -41,7 +41,7 @@ func TapKubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesP switch resource := obj.(type) { case *rbac.Role: - return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, config.Config.KubesharkResourcesNamespace) + return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, config.Config.ResourcesNamespace) case *rbac.ClusterRole: return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, "") } diff --git a/cmd/check/kubernetesResources.go b/cmd/check/kubernetesResources.go index e02d81dad..eb51d3051 100644 --- a/cmd/check/kubernetesResources.go +++ b/cmd/check/kubernetesResources.go @@ -13,20 +13,20 @@ import ( func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool { log.Printf("\nk8s-components\n--------------------") - exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.KubesharkResourcesNamespace) - allResourcesExist := checkResourceExist(config.Config.KubesharkResourcesNamespace, "namespace", exist, err) + exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.ResourcesNamespace) + allResourcesExist := checkResourceExist(config.Config.ResourcesNamespace, "namespace", exist, err) - exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.ConfigMapName) + exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.ResourcesNamespace, kubernetes.ConfigMapName) allResourcesExist = checkResourceExist(kubernetes.ConfigMapName, "config map", exist, err) && allResourcesExist - exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.ServiceAccountName) + exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.ResourcesNamespace, kubernetes.ServiceAccountName) allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist if config.Config.IsNsRestrictedMode() { - exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.RoleName) + exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.ResourcesNamespace, kubernetes.RoleName) allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist - exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.RoleBindingName) + exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.ResourcesNamespace, kubernetes.RoleBindingName) allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist } else { exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName) @@ -36,7 +36,7 @@ func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Pro allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist } - exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.HubServiceName) + exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.ResourcesNamespace, kubernetes.HubServiceName) allResourcesExist = checkResourceExist(kubernetes.HubServiceName, "service", exist, err) && allResourcesExist allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist @@ -45,7 +45,7 @@ func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Pro } func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool { - if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.HubPodName); err != nil { + if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.ResourcesNamespace, kubernetes.HubPodName); err != nil { log.Printf("%v error checking if '%v' pod is running, err: %v", fmt.Sprintf(utils.Red, "✗"), kubernetes.HubPodName, err) return false } else if len(pods) == 0 { @@ -58,7 +58,7 @@ func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes. log.Printf("%v '%v' pod running", fmt.Sprintf(utils.Green, "√"), kubernetes.HubPodName) - if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.TapperPodName); err != nil { + if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.ResourcesNamespace, kubernetes.TapperPodName); err != nil { log.Printf("%v error checking if '%v' pods are running, err: %v", fmt.Sprintf(utils.Red, "✗"), kubernetes.TapperPodName, err) return false } else { diff --git a/cmd/check/serverConnection.go b/cmd/check/serverConnection.go index 3155619cb..3ac9182e3 100644 --- a/cmd/check/serverConnection.go +++ b/cmd/check/serverConnection.go @@ -46,7 +46,7 @@ func checkProxy(serverUrl string, kubernetesProvider *kubernetes.Provider) error ctx, cancel := context.WithCancel(context.Background()) defer cancel() - httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, config.Config.KubesharkResourcesNamespace, kubernetes.HubServiceName, cancel) + httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, config.Config.ResourcesNamespace, kubernetes.HubServiceName, cancel) if err != nil { return err } @@ -68,7 +68,7 @@ func checkPortForward(serverUrl string, kubernetesProvider *kubernetes.Provider) defer cancel() podRegex, _ := regexp.Compile(kubernetes.HubPodName) - forwarder, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.KubesharkResourcesNamespace, podRegex, config.Config.Tap.GuiPort, config.Config.Tap.GuiPort, ctx, cancel) + forwarder, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.ResourcesNamespace, podRegex, config.Config.Tap.GuiPort, config.Config.Tap.GuiPort, ctx, cancel) if err != nil { return err } diff --git a/cmd/cleanRunner.go b/cmd/cleanRunner.go index 31e24fe7b..f86067fe2 100644 --- a/cmd/cleanRunner.go +++ b/cmd/cleanRunner.go @@ -10,5 +10,5 @@ func performCleanCommand() { return } - finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace) + finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace) } diff --git a/cmd/common.go b/cmd/common.go index 0e024dd9f..9ce0a2faf 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -24,7 +24,7 @@ import ( ) func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx context.Context, cancel context.CancelFunc, serviceName string, srcPort uint16, dstPort uint16, healthCheck string) { - httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, srcPort, dstPort, config.Config.KubesharkResourcesNamespace, serviceName, cancel) + httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, srcPort, dstPort, config.Config.ResourcesNamespace, serviceName, cancel) if err != nil { log.Printf(utils.Error, fmt.Sprintf("Error occured while running k8s proxy %v\n"+ "Try setting different port by using --%s", errormessage.FormatError(err), configStructs.GuiPortTapName)) @@ -40,7 +40,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con } podRegex, _ := regexp.Compile(kubernetes.HubPodName) - if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.KubesharkResourcesNamespace, podRegex, srcPort, dstPort, ctx, cancel); err != nil { + if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.ResourcesNamespace, podRegex, srcPort, dstPort, ctx, cancel); err != nil { log.Printf(utils.Error, fmt.Sprintf("Error occured while running port forward [%s] %v\n"+ "Try setting different port by using --%s", podRegex, errormessage.FormatError(err), configStructs.GuiPortTapName)) cancel() @@ -109,8 +109,8 @@ func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provid } } -func getSerializedKubesharkAgentConfig(kubesharkAgentConfig *models.Config) (string, error) { - serializedConfig, err := json.Marshal(kubesharkAgentConfig) +func getSerializedTapConfig(conf *models.Config) (string, error) { + serializedConfig, err := json.Marshal(conf) if err != nil { return "", err } diff --git a/cmd/tapRunner.go b/cmd/tapRunner.go index 8a11a0099..ee117f45e 100644 --- a/cmd/tapRunner.go +++ b/cmd/tapRunner.go @@ -55,17 +55,17 @@ func RunKubesharkTap() { state.targetNamespaces = getNamespaces(kubernetesProvider) - kubesharkAgentConfig := getTapKubesharkAgentConfig() - serializedKubesharkConfig, err := getSerializedKubesharkAgentConfig(kubesharkAgentConfig) + conf := getTapConfig() + serializedKubesharkConfig, err := getSerializedTapConfig(conf) if err != nil { log.Printf(utils.Error, fmt.Sprintf("Error serializing kubeshark config: %v", errormessage.FormatError(err))) return } if config.Config.IsNsRestrictedMode() { - if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.KubesharkResourcesNamespace) { + if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.ResourcesNamespace) { log.Printf("Not supported mode. Kubeshark can't resolve IPs in other namespaces when running in namespace restricted mode.\n"+ - "You can use the same namespace for --%s and --%s", configStructs.NamespacesTapName, config.KubesharkResourcesNamespaceConfigName) + "You can use the same namespace for --%s and --%s", configStructs.NamespacesTapName, config.ResourcesNamespaceConfigName) return } } @@ -87,13 +87,13 @@ func RunKubesharkTap() { return } - log.Printf("Waiting for Kubeshark Agent to start...") - if state.kubesharkServiceAccountExists, err = resources.CreateTapKubesharkResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace, config.Config.AgentImage, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil { + log.Printf("Waiting for Kubeshark deployment to finish...") + if state.kubesharkServiceAccountExists, err = resources.CreateTapKubesharkResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil { var statusError *k8serrors.StatusError if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) { log.Print("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance") } else { - defer resources.CleanUpKubesharkResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace) + defer resources.CleanUpKubesharkResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace) log.Printf(utils.Error, fmt.Sprintf("Error creating resources: %v", errormessage.FormatError(err))) } @@ -111,24 +111,23 @@ func RunKubesharkTap() { } func finishTapExecution(kubernetesProvider *kubernetes.Provider) { - finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace) + finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace) } -func getTapKubesharkAgentConfig() *models.Config { - kubesharkAgentConfig := models.Config{ +func getTapConfig() *models.Config { + conf := models.Config{ MaxDBSizeBytes: config.Config.Tap.MaxEntriesDBSizeBytes(), InsertionFilter: config.Config.Tap.GetInsertionFilter(), - AgentImage: config.Config.AgentImage, PullPolicy: config.Config.ImagePullPolicyStr, LogLevel: config.Config.LogLevel(), TapperResources: config.Config.Tap.TapperResources, - KubesharkResourcesNamespace: config.Config.KubesharkResourcesNamespace, + KubesharkResourcesNamespace: config.Config.ResourcesNamespace, AgentDatabasePath: models.DataDirPath, ServiceMap: config.Config.ServiceMap, OAS: config.Config.OAS, } - return &kubesharkAgentConfig + return &conf } /* @@ -154,8 +153,7 @@ func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider tapperSyncer, err := kubernetes.CreateAndStartKubesharkTapperSyncer(ctx, provider, kubernetes.TapperSyncerConfig{ TargetNamespaces: targetNamespaces, PodFilterRegex: *config.Config.Tap.PodRegex(), - KubesharkResourcesNamespace: config.Config.KubesharkResourcesNamespace, - AgentImage: config.Config.AgentImage, + KubesharkResourcesNamespace: config.Config.ResourcesNamespace, TapperResources: config.Config.Tap.TapperResources, ImagePullPolicy: config.Config.ImagePullPolicy(), LogLevel: config.Config.LogLevel(), @@ -232,7 +230,7 @@ func getErrorDisplayTextForK8sTapManagerError(err kubernetes.K8sTapManagerError) func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) { podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.HubPodName)) podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex) - eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, podWatchHelper) + eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.ResourcesNamespace}, podWatchHelper) isPodReady := false hubTimeoutSec := config.GetIntEnvConfig(config.HubTimeoutSec, 120) @@ -283,7 +281,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c continue } - log.Printf("[ERROR] Agent creation, watching %v namespace, error: %v", config.Config.KubesharkResourcesNamespace, err) + log.Printf("[ERROR] Hub pod creation, watching %v namespace, error: %v", config.Config.ResourcesNamespace, err) cancel() case <-timeAfter: @@ -301,7 +299,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) { podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.FrontPodName)) podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex) - eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, podWatchHelper) + eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.ResourcesNamespace}, podWatchHelper) isPodReady := false hubTimeoutSec := config.GetIntEnvConfig(config.HubTimeoutSec, 120) @@ -351,7 +349,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, continue } - log.Printf("[ERROR] Agent creation, watching %v namespace, error: %v", config.Config.KubesharkResourcesNamespace, err) + log.Printf("[ERROR] Front pod creation, watching %v namespace, error: %v", config.Config.ResourcesNamespace, err) cancel() case <-timeAfter: @@ -360,7 +358,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel() } case <-ctx.Done(): - log.Printf("Watching Hub pod loop, ctx done") + log.Printf("Watching Front pod loop, ctx done") return } } @@ -369,7 +367,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) { podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.HubPodName)) eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, podExactRegex, "pod") - eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, eventWatchHelper) + eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.ResourcesNamespace}, eventWatchHelper) for { select { case wEvent, ok := <-eventChan: diff --git a/cmd/viewRunner.go b/cmd/viewRunner.go index 2465bf607..12645ffcd 100644 --- a/cmd/viewRunner.go +++ b/cmd/viewRunner.go @@ -25,7 +25,7 @@ func runKubesharkView() { url := config.Config.View.Url if url == "" { - exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.HubServiceName) + exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.ResourcesNamespace, kubernetes.HubServiceName) if err != nil { log.Printf("Failed to found kubeshark service %v", err) cancel() diff --git a/config/configStruct.go b/config/configStruct.go index 2d6fa3bac..3bb3458d7 100644 --- a/config/configStruct.go +++ b/config/configStruct.go @@ -15,9 +15,9 @@ import ( ) const ( - KubesharkResourcesNamespaceConfigName = "kubeshark-resources-namespace" - ConfigFilePathCommandName = "config-path" - KubeConfigPathConfigName = "kube-config-path" + ResourcesNamespaceConfigName = "resources-namespace" + ConfigFilePathCommandName = "config-path" + KubeConfigPathConfigName = "kube-config-path" ) type PortForward struct { @@ -54,26 +54,25 @@ func CreateDefaultConfig() ConfigStruct { } type ConfigStruct struct { - Hub HubConfig `yaml:"hub"` - Front FrontConfig `yaml:"front"` - Tap configStructs.TapConfig `yaml:"tap"` - Check configStructs.CheckConfig `yaml:"check"` - Install configStructs.InstallConfig `yaml:"install"` - Version configStructs.VersionConfig `yaml:"version"` - View configStructs.ViewConfig `yaml:"view"` - Logs configStructs.LogsConfig `yaml:"logs"` - Config configStructs.ConfigConfig `yaml:"config,omitempty"` - AgentImage string `yaml:"agent-image,omitempty" readonly:""` - ImagePullPolicyStr string `yaml:"image-pull-policy" default:"Always"` - KubesharkResourcesNamespace string `yaml:"kubeshark-resources-namespace" default:"kubeshark"` - DumpLogs bool `yaml:"dump-logs" default:"false"` - KubeConfigPathStr string `yaml:"kube-config-path"` - KubeContext string `yaml:"kube-context"` - ConfigFilePath string `yaml:"config-path,omitempty" readonly:""` - HeadlessMode bool `yaml:"headless" default:"false"` - LogLevelStr string `yaml:"log-level,omitempty" default:"INFO" readonly:""` - ServiceMap bool `yaml:"service-map" default:"true"` - OAS models.OASConfig `yaml:"oas"` + Hub HubConfig `yaml:"hub"` + Front FrontConfig `yaml:"front"` + Tap configStructs.TapConfig `yaml:"tap"` + Check configStructs.CheckConfig `yaml:"check"` + Install configStructs.InstallConfig `yaml:"install"` + Version configStructs.VersionConfig `yaml:"version"` + View configStructs.ViewConfig `yaml:"view"` + Logs configStructs.LogsConfig `yaml:"logs"` + Config configStructs.ConfigConfig `yaml:"config,omitempty"` + ImagePullPolicyStr string `yaml:"image-pull-policy" default:"Always"` + ResourcesNamespace string `yaml:"resources-namespace" default:"kubeshark"` + DumpLogs bool `yaml:"dump-logs" default:"false"` + KubeConfigPathStr string `yaml:"kube-config-path"` + KubeContext string `yaml:"kube-context"` + ConfigFilePath string `yaml:"config-path,omitempty" readonly:""` + HeadlessMode bool `yaml:"headless" default:"false"` + LogLevelStr string `yaml:"log-level,omitempty" default:"INFO" readonly:""` + ServiceMap bool `yaml:"service-map" default:"true"` + OAS models.OASConfig `yaml:"oas"` } func (config *ConfigStruct) validate() error { @@ -85,7 +84,6 @@ func (config *ConfigStruct) validate() error { } func (config *ConfigStruct) SetDefaults() { - config.AgentImage = "kubeshark/hub:latest" config.ConfigFilePath = path.Join(kubeshark.GetKubesharkFolderPath(), "config.yaml") } @@ -94,7 +92,7 @@ func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy { } func (config *ConfigStruct) IsNsRestrictedMode() bool { - return config.KubesharkResourcesNamespace != "kubeshark" // Notice "kubeshark" string must match the default KubesharkResourcesNamespace + return config.ResourcesNamespace != "kubeshark" // Notice "kubeshark" string must match the default KubesharkResourcesNamespace } func (config *ConfigStruct) KubeConfigPath() string { diff --git a/errormessage/errormessage.go b/errormessage/errormessage.go index 6c478ec99..31681850c 100644 --- a/errormessage/errormessage.go +++ b/errormessage/errormessage.go @@ -19,9 +19,9 @@ func FormatError(err error) error { "supply the required permission or control Kubeshark's access to namespaces by setting %s "+ "in the config file or setting the tapped namespace with --%s %s=", err, - config.KubesharkResourcesNamespaceConfigName, + config.ResourcesNamespaceConfigName, config.SetCommandName, - config.KubesharkResourcesNamespaceConfigName) + config.ResourcesNamespaceConfigName) } else if syntaxError, isSyntaxError := asRegexSyntaxError(err); isSyntaxError { errorNew = fmt.Errorf("regex %s is invalid: %w", syntaxError.Expr, err) } else { diff --git a/kubernetes/consts.go b/kubernetes/consts.go index ac24ce6c0..3e4fe7c1b 100644 --- a/kubernetes/consts.go +++ b/kubernetes/consts.go @@ -19,10 +19,8 @@ const ( ) const ( - LabelPrefixApp = "app.kubernetes.io/" - LabelManagedBy = LabelPrefixApp + "managed-by" - LabelCreatedBy = LabelPrefixApp + "created-by" - LabelValueKubeshark = "kubeshark" - LabelValueKubesharkCLI = "kubeshark-cli" - LabelValueKubesharkAgent = "kubeshark-agent" + LabelPrefixApp = "app.kubernetes.io/" + LabelManagedBy = LabelPrefixApp + "managed-by" + LabelCreatedBy = LabelPrefixApp + "created-by" + LabelValueKubeshark = "kubeshark" ) diff --git a/kubernetes/kubesharkTapperSyncer.go b/kubernetes/kubesharkTapperSyncer.go index ef0a87ef9..4ecebf057 100644 --- a/kubernetes/kubesharkTapperSyncer.go +++ b/kubernetes/kubesharkTapperSyncer.go @@ -40,7 +40,6 @@ type TapperSyncerConfig struct { TargetNamespaces []string PodFilterRegex regexp.Regexp KubesharkResourcesNamespace string - AgentImage string TapperResources models.Resources ImagePullPolicy core.PullPolicy LogLevel logging.Level @@ -312,6 +311,8 @@ func (tapperSyncer *KubesharkTapperSyncer) updateKubesharkTappers() error { log.Printf("Updating DaemonSet to run on nodes: %v", nodesToTap) + image := "kubeshark/worker:latest" + if len(tapperSyncer.nodeToTappedPodMap) > 0 { var serviceAccountName string if tapperSyncer.config.KubesharkServiceAccountExists { @@ -329,7 +330,7 @@ func (tapperSyncer *KubesharkTapperSyncer) updateKubesharkTappers() error { tapperSyncer.context, tapperSyncer.config.KubesharkResourcesNamespace, TapperDaemonSetName, - "kubeshark/worker:latest", + image, TapperPodName, fmt.Sprintf("%s.%s.svc", HubPodName, tapperSyncer.config.KubesharkResourcesNamespace), nodeNames, @@ -350,7 +351,7 @@ func (tapperSyncer *KubesharkTapperSyncer) updateKubesharkTappers() error { tapperSyncer.context, tapperSyncer.config.KubesharkResourcesNamespace, TapperDaemonSetName, - tapperSyncer.config.AgentImage, + image, TapperPodName); err != nil { return err } diff --git a/kubernetes/provider.go b/kubernetes/provider.go index e2050b13a..7031312b8 100644 --- a/kubernetes/provider.go +++ b/kubernetes/provider.go @@ -85,7 +85,7 @@ func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) { kubernetesConfig: kubernetesConfig, clientConfig: *restClientConfig, managedBy: LabelValueKubeshark, - createdBy: LabelValueKubesharkCLI, + createdBy: LabelValueKubeshark, }, nil } @@ -105,7 +105,7 @@ func NewProviderInCluster() (*Provider, error) { kubernetesConfig: nil, // not relevant in cluster clientConfig: *restClientConfig, managedBy: LabelValueKubeshark, - createdBy: LabelValueKubesharkAgent, + createdBy: LabelValueKubeshark, }, nil } @@ -838,10 +838,10 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam kubesharkCmd = append(kubesharkCmd, "--procfs", procfsMountPath) } - agentContainer := applyconfcore.Container() - agentContainer.WithName(tapperPodName) - agentContainer.WithImage(podImage) - agentContainer.WithImagePullPolicy(imagePullPolicy) + workerContainer := applyconfcore.Container() + workerContainer.WithName(tapperPodName) + workerContainer.WithImage(podImage) + workerContainer.WithImagePullPolicy(imagePullPolicy) caps := applyconfcore.Capabilities().WithDrop("ALL") @@ -860,15 +860,15 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam } } - agentContainer.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps)) + workerContainer.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps)) - agentContainer.WithCommand(kubesharkCmd...) - agentContainer.WithEnv( + workerContainer.WithCommand(kubesharkCmd...) + workerContainer.WithEnv( applyconfcore.EnvVar().WithName(utils.LogLevelEnvVar).WithValue(logLevel.String()), applyconfcore.EnvVar().WithName(utils.HostModeEnvVar).WithValue("1"), applyconfcore.EnvVar().WithName(utils.KubesharkFilteringOptionsEnvVar).WithValue(string(kubesharkApiFilteringOptionsJsonStr)), ) - agentContainer.WithEnv( + workerContainer.WithEnv( applyconfcore.EnvVar().WithName(utils.NodeNameEnvVar).WithValueFrom( applyconfcore.EnvVarSource().WithFieldRef( applyconfcore.ObjectFieldSelector().WithAPIVersion("v1").WithFieldPath("spec.nodeName"), @@ -891,16 +891,16 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam if err != nil { return fmt.Errorf("invalid memory request for %s container", tapperPodName) } - agentResourceLimits := core.ResourceList{ + workerResourceLimits := core.ResourceList{ "cpu": cpuLimit, "memory": memLimit, } - agentResourceRequests := core.ResourceList{ + workerResourceRequests := core.ResourceList{ "cpu": cpuRequests, "memory": memRequests, } - agentResources := applyconfcore.ResourceRequirements().WithRequests(agentResourceRequests).WithLimits(agentResourceLimits) - agentContainer.WithResources(agentResources) + workerResources := applyconfcore.ResourceRequirements().WithRequests(workerResourceRequests).WithLimits(workerResourceLimits) + workerContainer.WithResources(workerResources) matchFields := make([]*applyconfcore.NodeSelectorTermApplyConfiguration, 0) for _, nodeName := range nodeNames { @@ -934,14 +934,14 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam procfsVolume := applyconfcore.Volume() procfsVolume.WithName(procfsVolumeName).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath("/proc")) procfsVolumeMount := applyconfcore.VolumeMount().WithName(procfsVolumeName).WithMountPath(procfsMountPath).WithReadOnly(true) - agentContainer.WithVolumeMounts(procfsVolumeMount) + workerContainer.WithVolumeMounts(procfsVolumeMount) // We need access to /sys in order to install certain eBPF tracepoints // sysfsVolume := applyconfcore.Volume() sysfsVolume.WithName(sysfsVolumeName).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath("/sys")) sysfsVolumeMount := applyconfcore.VolumeMount().WithName(sysfsVolumeName).WithMountPath(sysfsMountPath).WithReadOnly(true) - agentContainer.WithVolumeMounts(sysfsVolumeMount) + workerContainer.WithVolumeMounts(sysfsVolumeMount) podSpec := applyconfcore.PodSpec() podSpec.WithHostNetwork(true) @@ -950,7 +950,7 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam if serviceAccountName != "" { podSpec.WithServiceAccountName(serviceAccountName) } - podSpec.WithContainers(agentContainer) + podSpec.WithContainers(workerContainer) podSpec.WithAffinity(affinity) podSpec.WithTolerations(noExecuteToleration, noScheduleToleration) podSpec.WithVolumes(procfsVolume, sysfsVolume) @@ -984,9 +984,9 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam } func (provider *Provider) ResetKubesharkTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string) error { - agentContainer := applyconfcore.Container() - agentContainer.WithName(tapperPodName) - agentContainer.WithImage(podImage) + workerContainer := applyconfcore.Container() + workerContainer.WithName(tapperPodName) + workerContainer.WithImage(podImage) nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement() nodeSelectorRequirement.WithKey("kubeshark-non-existing-label") @@ -1001,7 +1001,7 @@ func (provider *Provider) ResetKubesharkTapperDaemonSet(ctx context.Context, nam affinity.WithNodeAffinity(nodeAffinity) podSpec := applyconfcore.PodSpec() - podSpec.WithContainers(agentContainer) + podSpec.WithContainers(workerContainer) podSpec.WithAffinity(affinity) podTemplate := applyconfcore.PodTemplateSpec() diff --git a/kubeshark/fsUtils/kubesharkLogsUtils.go b/kubeshark/fsUtils/kubesharkLogsUtils.go index e9034324e..1d10b3056 100644 --- a/kubeshark/fsUtils/kubesharkLogsUtils.go +++ b/kubeshark/fsUtils/kubesharkLogsUtils.go @@ -14,13 +14,13 @@ import ( func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath string) error { podExactRegex := regexp.MustCompile("^" + kubernetes.KubesharkResourcesPrefix) - pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.KubesharkResourcesNamespace}) + pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.ResourcesNamespace}) if err != nil { return err } if len(pods) == 0 { - return fmt.Errorf("no kubeshark pods found in namespace %s", config.Config.KubesharkResourcesNamespace) + return fmt.Errorf("no kubeshark pods found in namespace %s", config.Config.ResourcesNamespace) } newZipFile, err := os.Create(filePath) @@ -49,17 +49,17 @@ func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath strin } } - events, err := provider.GetNamespaceEvents(ctx, config.Config.KubesharkResourcesNamespace) + events, err := provider.GetNamespaceEvents(ctx, config.Config.ResourcesNamespace) if err != nil { log.Printf("Failed to get k8b events, %v", err) } else { - log.Printf("Successfully read events for k8b namespace: %s", config.Config.KubesharkResourcesNamespace) + log.Printf("Successfully read events for k8b namespace: %s", config.Config.ResourcesNamespace) } - if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.KubesharkResourcesNamespace)); err != nil { + if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.ResourcesNamespace)); err != nil { log.Printf("Failed write logs, %v", err) } else { - log.Printf("Successfully added events for k8b namespace: %s", config.Config.KubesharkResourcesNamespace) + log.Printf("Successfully added events for k8b namespace: %s", config.Config.ResourcesNamespace) } if err := AddFileToZip(zipWriter, config.Config.ConfigFilePath); err != nil { diff --git a/resources/createResources.go b/resources/createResources.go index 3668da03b..fe9c41125 100644 --- a/resources/createResources.go +++ b/resources/createResources.go @@ -15,7 +15,7 @@ import ( core "k8s.io/api/core/v1" ) -func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, agentImage string, maxEntriesDBSizeBytes int64, hubResources models.Resources, imagePullPolicy core.PullPolicy, logLevel logging.Level, profiler bool) (bool, error) { +func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, maxEntriesDBSizeBytes int64, hubResources models.Resources, imagePullPolicy core.PullPolicy, logLevel logging.Level, profiler bool) (bool, error) { if !isNsRestrictedMode { if err := createKubesharkNamespace(ctx, kubernetesProvider, kubesharkResourcesNamespace); err != nil { return false, err @@ -41,7 +41,7 @@ func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubern opts := &kubernetes.HubOptions{ Namespace: kubesharkResourcesNamespace, PodName: kubernetes.HubPodName, - PodImage: agentImage, + PodImage: "kubeshark/hub:latest", KratosImage: "", KetoImage: "", ServiceAccountName: serviceAccountName, @@ -56,7 +56,7 @@ func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubern frontOpts := &kubernetes.HubOptions{ Namespace: kubesharkResourcesNamespace, PodName: kubernetes.FrontPodName, - PodImage: agentImage, + PodImage: "kubeshark/worker:latest", KratosImage: "", KetoImage: "", ServiceAccountName: serviceAccountName,