mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-09-03 03:25:26 +00:00
🔨 Replace the Agent
naming in functions and variables
This commit is contained in:
@@ -41,7 +41,7 @@ func TapKubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesP
|
||||
|
||||
switch resource := obj.(type) {
|
||||
case *rbac.Role:
|
||||
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, config.Config.KubesharkResourcesNamespace)
|
||||
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, config.Config.ResourcesNamespace)
|
||||
case *rbac.ClusterRole:
|
||||
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, "")
|
||||
}
|
||||
|
@@ -13,20 +13,20 @@ import (
|
||||
func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
log.Printf("\nk8s-components\n--------------------")
|
||||
|
||||
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.KubesharkResourcesNamespace)
|
||||
allResourcesExist := checkResourceExist(config.Config.KubesharkResourcesNamespace, "namespace", exist, err)
|
||||
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.ResourcesNamespace)
|
||||
allResourcesExist := checkResourceExist(config.Config.ResourcesNamespace, "namespace", exist, err)
|
||||
|
||||
exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.ConfigMapName)
|
||||
exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.ResourcesNamespace, kubernetes.ConfigMapName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ConfigMapName, "config map", exist, err) && allResourcesExist
|
||||
|
||||
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.ServiceAccountName)
|
||||
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.ResourcesNamespace, kubernetes.ServiceAccountName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
|
||||
|
||||
if config.Config.IsNsRestrictedMode() {
|
||||
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.RoleName)
|
||||
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.ResourcesNamespace, kubernetes.RoleName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
|
||||
|
||||
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.RoleBindingName)
|
||||
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.ResourcesNamespace, kubernetes.RoleBindingName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
|
||||
} else {
|
||||
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
|
||||
@@ -36,7 +36,7 @@ func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Pro
|
||||
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
|
||||
}
|
||||
|
||||
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.HubServiceName)
|
||||
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.ResourcesNamespace, kubernetes.HubServiceName)
|
||||
allResourcesExist = checkResourceExist(kubernetes.HubServiceName, "service", exist, err) && allResourcesExist
|
||||
|
||||
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
|
||||
@@ -45,7 +45,7 @@ func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Pro
|
||||
}
|
||||
|
||||
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.HubPodName); err != nil {
|
||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.ResourcesNamespace, kubernetes.HubPodName); err != nil {
|
||||
log.Printf("%v error checking if '%v' pod is running, err: %v", fmt.Sprintf(utils.Red, "✗"), kubernetes.HubPodName, err)
|
||||
return false
|
||||
} else if len(pods) == 0 {
|
||||
@@ -58,7 +58,7 @@ func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.
|
||||
|
||||
log.Printf("%v '%v' pod running", fmt.Sprintf(utils.Green, "√"), kubernetes.HubPodName)
|
||||
|
||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.TapperPodName); err != nil {
|
||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.ResourcesNamespace, kubernetes.TapperPodName); err != nil {
|
||||
log.Printf("%v error checking if '%v' pods are running, err: %v", fmt.Sprintf(utils.Red, "✗"), kubernetes.TapperPodName, err)
|
||||
return false
|
||||
} else {
|
||||
|
@@ -46,7 +46,7 @@ func checkProxy(serverUrl string, kubernetesProvider *kubernetes.Provider) error
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, config.Config.KubesharkResourcesNamespace, kubernetes.HubServiceName, cancel)
|
||||
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, config.Config.ResourcesNamespace, kubernetes.HubServiceName, cancel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -68,7 +68,7 @@ func checkPortForward(serverUrl string, kubernetesProvider *kubernetes.Provider)
|
||||
defer cancel()
|
||||
|
||||
podRegex, _ := regexp.Compile(kubernetes.HubPodName)
|
||||
forwarder, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.KubesharkResourcesNamespace, podRegex, config.Config.Tap.GuiPort, config.Config.Tap.GuiPort, ctx, cancel)
|
||||
forwarder, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.ResourcesNamespace, podRegex, config.Config.Tap.GuiPort, config.Config.Tap.GuiPort, ctx, cancel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -10,5 +10,5 @@ func performCleanCommand() {
|
||||
return
|
||||
}
|
||||
|
||||
finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace)
|
||||
finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace)
|
||||
}
|
||||
|
@@ -24,7 +24,7 @@ import (
|
||||
)
|
||||
|
||||
func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx context.Context, cancel context.CancelFunc, serviceName string, srcPort uint16, dstPort uint16, healthCheck string) {
|
||||
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, srcPort, dstPort, config.Config.KubesharkResourcesNamespace, serviceName, cancel)
|
||||
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, srcPort, dstPort, config.Config.ResourcesNamespace, serviceName, cancel)
|
||||
if err != nil {
|
||||
log.Printf(utils.Error, fmt.Sprintf("Error occured while running k8s proxy %v\n"+
|
||||
"Try setting different port by using --%s", errormessage.FormatError(err), configStructs.GuiPortTapName))
|
||||
@@ -40,7 +40,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
|
||||
}
|
||||
|
||||
podRegex, _ := regexp.Compile(kubernetes.HubPodName)
|
||||
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.KubesharkResourcesNamespace, podRegex, srcPort, dstPort, ctx, cancel); err != nil {
|
||||
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.ResourcesNamespace, podRegex, srcPort, dstPort, ctx, cancel); err != nil {
|
||||
log.Printf(utils.Error, fmt.Sprintf("Error occured while running port forward [%s] %v\n"+
|
||||
"Try setting different port by using --%s", podRegex, errormessage.FormatError(err), configStructs.GuiPortTapName))
|
||||
cancel()
|
||||
@@ -109,8 +109,8 @@ func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provid
|
||||
}
|
||||
}
|
||||
|
||||
func getSerializedKubesharkAgentConfig(kubesharkAgentConfig *models.Config) (string, error) {
|
||||
serializedConfig, err := json.Marshal(kubesharkAgentConfig)
|
||||
func getSerializedTapConfig(conf *models.Config) (string, error) {
|
||||
serializedConfig, err := json.Marshal(conf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@@ -55,17 +55,17 @@ func RunKubesharkTap() {
|
||||
|
||||
state.targetNamespaces = getNamespaces(kubernetesProvider)
|
||||
|
||||
kubesharkAgentConfig := getTapKubesharkAgentConfig()
|
||||
serializedKubesharkConfig, err := getSerializedKubesharkAgentConfig(kubesharkAgentConfig)
|
||||
conf := getTapConfig()
|
||||
serializedKubesharkConfig, err := getSerializedTapConfig(conf)
|
||||
if err != nil {
|
||||
log.Printf(utils.Error, fmt.Sprintf("Error serializing kubeshark config: %v", errormessage.FormatError(err)))
|
||||
return
|
||||
}
|
||||
|
||||
if config.Config.IsNsRestrictedMode() {
|
||||
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.KubesharkResourcesNamespace) {
|
||||
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.ResourcesNamespace) {
|
||||
log.Printf("Not supported mode. Kubeshark can't resolve IPs in other namespaces when running in namespace restricted mode.\n"+
|
||||
"You can use the same namespace for --%s and --%s", configStructs.NamespacesTapName, config.KubesharkResourcesNamespaceConfigName)
|
||||
"You can use the same namespace for --%s and --%s", configStructs.NamespacesTapName, config.ResourcesNamespaceConfigName)
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -87,13 +87,13 @@ func RunKubesharkTap() {
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Waiting for Kubeshark Agent to start...")
|
||||
if state.kubesharkServiceAccountExists, err = resources.CreateTapKubesharkResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace, config.Config.AgentImage, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil {
|
||||
log.Printf("Waiting for Kubeshark deployment to finish...")
|
||||
if state.kubesharkServiceAccountExists, err = resources.CreateTapKubesharkResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil {
|
||||
var statusError *k8serrors.StatusError
|
||||
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
|
||||
log.Print("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
|
||||
} else {
|
||||
defer resources.CleanUpKubesharkResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace)
|
||||
defer resources.CleanUpKubesharkResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace)
|
||||
log.Printf(utils.Error, fmt.Sprintf("Error creating resources: %v", errormessage.FormatError(err)))
|
||||
}
|
||||
|
||||
@@ -111,24 +111,23 @@ func RunKubesharkTap() {
|
||||
}
|
||||
|
||||
func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
|
||||
finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace)
|
||||
finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace)
|
||||
}
|
||||
|
||||
func getTapKubesharkAgentConfig() *models.Config {
|
||||
kubesharkAgentConfig := models.Config{
|
||||
func getTapConfig() *models.Config {
|
||||
conf := models.Config{
|
||||
MaxDBSizeBytes: config.Config.Tap.MaxEntriesDBSizeBytes(),
|
||||
InsertionFilter: config.Config.Tap.GetInsertionFilter(),
|
||||
AgentImage: config.Config.AgentImage,
|
||||
PullPolicy: config.Config.ImagePullPolicyStr,
|
||||
LogLevel: config.Config.LogLevel(),
|
||||
TapperResources: config.Config.Tap.TapperResources,
|
||||
KubesharkResourcesNamespace: config.Config.KubesharkResourcesNamespace,
|
||||
KubesharkResourcesNamespace: config.Config.ResourcesNamespace,
|
||||
AgentDatabasePath: models.DataDirPath,
|
||||
ServiceMap: config.Config.ServiceMap,
|
||||
OAS: config.Config.OAS,
|
||||
}
|
||||
|
||||
return &kubesharkAgentConfig
|
||||
return &conf
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -154,8 +153,7 @@ func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider
|
||||
tapperSyncer, err := kubernetes.CreateAndStartKubesharkTapperSyncer(ctx, provider, kubernetes.TapperSyncerConfig{
|
||||
TargetNamespaces: targetNamespaces,
|
||||
PodFilterRegex: *config.Config.Tap.PodRegex(),
|
||||
KubesharkResourcesNamespace: config.Config.KubesharkResourcesNamespace,
|
||||
AgentImage: config.Config.AgentImage,
|
||||
KubesharkResourcesNamespace: config.Config.ResourcesNamespace,
|
||||
TapperResources: config.Config.Tap.TapperResources,
|
||||
ImagePullPolicy: config.Config.ImagePullPolicy(),
|
||||
LogLevel: config.Config.LogLevel(),
|
||||
@@ -232,7 +230,7 @@ func getErrorDisplayTextForK8sTapManagerError(err kubernetes.K8sTapManagerError)
|
||||
func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.HubPodName))
|
||||
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, podWatchHelper)
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.ResourcesNamespace}, podWatchHelper)
|
||||
isPodReady := false
|
||||
|
||||
hubTimeoutSec := config.GetIntEnvConfig(config.HubTimeoutSec, 120)
|
||||
@@ -283,7 +281,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("[ERROR] Agent creation, watching %v namespace, error: %v", config.Config.KubesharkResourcesNamespace, err)
|
||||
log.Printf("[ERROR] Hub pod creation, watching %v namespace, error: %v", config.Config.ResourcesNamespace, err)
|
||||
cancel()
|
||||
|
||||
case <-timeAfter:
|
||||
@@ -301,7 +299,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
|
||||
func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.FrontPodName))
|
||||
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, podWatchHelper)
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.ResourcesNamespace}, podWatchHelper)
|
||||
isPodReady := false
|
||||
|
||||
hubTimeoutSec := config.GetIntEnvConfig(config.HubTimeoutSec, 120)
|
||||
@@ -351,7 +349,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("[ERROR] Agent creation, watching %v namespace, error: %v", config.Config.KubesharkResourcesNamespace, err)
|
||||
log.Printf("[ERROR] Front pod creation, watching %v namespace, error: %v", config.Config.ResourcesNamespace, err)
|
||||
cancel()
|
||||
|
||||
case <-timeAfter:
|
||||
@@ -360,7 +358,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
|
||||
cancel()
|
||||
}
|
||||
case <-ctx.Done():
|
||||
log.Printf("Watching Hub pod loop, ctx done")
|
||||
log.Printf("Watching Front pod loop, ctx done")
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -369,7 +367,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
|
||||
func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.HubPodName))
|
||||
eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, podExactRegex, "pod")
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, eventWatchHelper)
|
||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.ResourcesNamespace}, eventWatchHelper)
|
||||
for {
|
||||
select {
|
||||
case wEvent, ok := <-eventChan:
|
||||
|
@@ -25,7 +25,7 @@ func runKubesharkView() {
|
||||
url := config.Config.View.Url
|
||||
|
||||
if url == "" {
|
||||
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.HubServiceName)
|
||||
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.ResourcesNamespace, kubernetes.HubServiceName)
|
||||
if err != nil {
|
||||
log.Printf("Failed to found kubeshark service %v", err)
|
||||
cancel()
|
||||
|
Reference in New Issue
Block a user