mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-08-14 06:37:12 +00:00
🔨 Replace the Agent
naming in functions and variables
This commit is contained in:
parent
671aa783c5
commit
0e8ea1e31b
@ -41,7 +41,7 @@ func TapKubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesP
|
|||||||
|
|
||||||
switch resource := obj.(type) {
|
switch resource := obj.(type) {
|
||||||
case *rbac.Role:
|
case *rbac.Role:
|
||||||
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, config.Config.KubesharkResourcesNamespace)
|
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, config.Config.ResourcesNamespace)
|
||||||
case *rbac.ClusterRole:
|
case *rbac.ClusterRole:
|
||||||
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, "")
|
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, "")
|
||||||
}
|
}
|
||||||
|
@ -13,20 +13,20 @@ import (
|
|||||||
func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||||
log.Printf("\nk8s-components\n--------------------")
|
log.Printf("\nk8s-components\n--------------------")
|
||||||
|
|
||||||
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.KubesharkResourcesNamespace)
|
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.ResourcesNamespace)
|
||||||
allResourcesExist := checkResourceExist(config.Config.KubesharkResourcesNamespace, "namespace", exist, err)
|
allResourcesExist := checkResourceExist(config.Config.ResourcesNamespace, "namespace", exist, err)
|
||||||
|
|
||||||
exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.ConfigMapName)
|
exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.ResourcesNamespace, kubernetes.ConfigMapName)
|
||||||
allResourcesExist = checkResourceExist(kubernetes.ConfigMapName, "config map", exist, err) && allResourcesExist
|
allResourcesExist = checkResourceExist(kubernetes.ConfigMapName, "config map", exist, err) && allResourcesExist
|
||||||
|
|
||||||
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.ServiceAccountName)
|
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.ResourcesNamespace, kubernetes.ServiceAccountName)
|
||||||
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
|
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
|
||||||
|
|
||||||
if config.Config.IsNsRestrictedMode() {
|
if config.Config.IsNsRestrictedMode() {
|
||||||
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.RoleName)
|
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.ResourcesNamespace, kubernetes.RoleName)
|
||||||
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
|
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
|
||||||
|
|
||||||
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.RoleBindingName)
|
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.ResourcesNamespace, kubernetes.RoleBindingName)
|
||||||
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
|
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
|
||||||
} else {
|
} else {
|
||||||
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
|
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
|
||||||
@ -36,7 +36,7 @@ func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Pro
|
|||||||
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
|
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
|
||||||
}
|
}
|
||||||
|
|
||||||
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.HubServiceName)
|
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.ResourcesNamespace, kubernetes.HubServiceName)
|
||||||
allResourcesExist = checkResourceExist(kubernetes.HubServiceName, "service", exist, err) && allResourcesExist
|
allResourcesExist = checkResourceExist(kubernetes.HubServiceName, "service", exist, err) && allResourcesExist
|
||||||
|
|
||||||
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
|
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
|
||||||
@ -45,7 +45,7 @@ func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Pro
|
|||||||
}
|
}
|
||||||
|
|
||||||
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
|
||||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.HubPodName); err != nil {
|
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.ResourcesNamespace, kubernetes.HubPodName); err != nil {
|
||||||
log.Printf("%v error checking if '%v' pod is running, err: %v", fmt.Sprintf(utils.Red, "✗"), kubernetes.HubPodName, err)
|
log.Printf("%v error checking if '%v' pod is running, err: %v", fmt.Sprintf(utils.Red, "✗"), kubernetes.HubPodName, err)
|
||||||
return false
|
return false
|
||||||
} else if len(pods) == 0 {
|
} else if len(pods) == 0 {
|
||||||
@ -58,7 +58,7 @@ func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.
|
|||||||
|
|
||||||
log.Printf("%v '%v' pod running", fmt.Sprintf(utils.Green, "√"), kubernetes.HubPodName)
|
log.Printf("%v '%v' pod running", fmt.Sprintf(utils.Green, "√"), kubernetes.HubPodName)
|
||||||
|
|
||||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.TapperPodName); err != nil {
|
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.ResourcesNamespace, kubernetes.TapperPodName); err != nil {
|
||||||
log.Printf("%v error checking if '%v' pods are running, err: %v", fmt.Sprintf(utils.Red, "✗"), kubernetes.TapperPodName, err)
|
log.Printf("%v error checking if '%v' pods are running, err: %v", fmt.Sprintf(utils.Red, "✗"), kubernetes.TapperPodName, err)
|
||||||
return false
|
return false
|
||||||
} else {
|
} else {
|
||||||
|
@ -46,7 +46,7 @@ func checkProxy(serverUrl string, kubernetesProvider *kubernetes.Provider) error
|
|||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, config.Config.KubesharkResourcesNamespace, kubernetes.HubServiceName, cancel)
|
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, config.Config.ResourcesNamespace, kubernetes.HubServiceName, cancel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -68,7 +68,7 @@ func checkPortForward(serverUrl string, kubernetesProvider *kubernetes.Provider)
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
podRegex, _ := regexp.Compile(kubernetes.HubPodName)
|
podRegex, _ := regexp.Compile(kubernetes.HubPodName)
|
||||||
forwarder, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.KubesharkResourcesNamespace, podRegex, config.Config.Tap.GuiPort, config.Config.Tap.GuiPort, ctx, cancel)
|
forwarder, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.ResourcesNamespace, podRegex, config.Config.Tap.GuiPort, config.Config.Tap.GuiPort, ctx, cancel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -10,5 +10,5 @@ func performCleanCommand() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace)
|
finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace)
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx context.Context, cancel context.CancelFunc, serviceName string, srcPort uint16, dstPort uint16, healthCheck string) {
|
func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx context.Context, cancel context.CancelFunc, serviceName string, srcPort uint16, dstPort uint16, healthCheck string) {
|
||||||
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, srcPort, dstPort, config.Config.KubesharkResourcesNamespace, serviceName, cancel)
|
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, srcPort, dstPort, config.Config.ResourcesNamespace, serviceName, cancel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf(utils.Error, fmt.Sprintf("Error occured while running k8s proxy %v\n"+
|
log.Printf(utils.Error, fmt.Sprintf("Error occured while running k8s proxy %v\n"+
|
||||||
"Try setting different port by using --%s", errormessage.FormatError(err), configStructs.GuiPortTapName))
|
"Try setting different port by using --%s", errormessage.FormatError(err), configStructs.GuiPortTapName))
|
||||||
@ -40,7 +40,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
|
|||||||
}
|
}
|
||||||
|
|
||||||
podRegex, _ := regexp.Compile(kubernetes.HubPodName)
|
podRegex, _ := regexp.Compile(kubernetes.HubPodName)
|
||||||
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.KubesharkResourcesNamespace, podRegex, srcPort, dstPort, ctx, cancel); err != nil {
|
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.ResourcesNamespace, podRegex, srcPort, dstPort, ctx, cancel); err != nil {
|
||||||
log.Printf(utils.Error, fmt.Sprintf("Error occured while running port forward [%s] %v\n"+
|
log.Printf(utils.Error, fmt.Sprintf("Error occured while running port forward [%s] %v\n"+
|
||||||
"Try setting different port by using --%s", podRegex, errormessage.FormatError(err), configStructs.GuiPortTapName))
|
"Try setting different port by using --%s", podRegex, errormessage.FormatError(err), configStructs.GuiPortTapName))
|
||||||
cancel()
|
cancel()
|
||||||
@ -109,8 +109,8 @@ func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provid
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSerializedKubesharkAgentConfig(kubesharkAgentConfig *models.Config) (string, error) {
|
func getSerializedTapConfig(conf *models.Config) (string, error) {
|
||||||
serializedConfig, err := json.Marshal(kubesharkAgentConfig)
|
serializedConfig, err := json.Marshal(conf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -55,17 +55,17 @@ func RunKubesharkTap() {
|
|||||||
|
|
||||||
state.targetNamespaces = getNamespaces(kubernetesProvider)
|
state.targetNamespaces = getNamespaces(kubernetesProvider)
|
||||||
|
|
||||||
kubesharkAgentConfig := getTapKubesharkAgentConfig()
|
conf := getTapConfig()
|
||||||
serializedKubesharkConfig, err := getSerializedKubesharkAgentConfig(kubesharkAgentConfig)
|
serializedKubesharkConfig, err := getSerializedTapConfig(conf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf(utils.Error, fmt.Sprintf("Error serializing kubeshark config: %v", errormessage.FormatError(err)))
|
log.Printf(utils.Error, fmt.Sprintf("Error serializing kubeshark config: %v", errormessage.FormatError(err)))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.Config.IsNsRestrictedMode() {
|
if config.Config.IsNsRestrictedMode() {
|
||||||
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.KubesharkResourcesNamespace) {
|
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.ResourcesNamespace) {
|
||||||
log.Printf("Not supported mode. Kubeshark can't resolve IPs in other namespaces when running in namespace restricted mode.\n"+
|
log.Printf("Not supported mode. Kubeshark can't resolve IPs in other namespaces when running in namespace restricted mode.\n"+
|
||||||
"You can use the same namespace for --%s and --%s", configStructs.NamespacesTapName, config.KubesharkResourcesNamespaceConfigName)
|
"You can use the same namespace for --%s and --%s", configStructs.NamespacesTapName, config.ResourcesNamespaceConfigName)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -87,13 +87,13 @@ func RunKubesharkTap() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Waiting for Kubeshark Agent to start...")
|
log.Printf("Waiting for Kubeshark deployment to finish...")
|
||||||
if state.kubesharkServiceAccountExists, err = resources.CreateTapKubesharkResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace, config.Config.AgentImage, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil {
|
if state.kubesharkServiceAccountExists, err = resources.CreateTapKubesharkResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil {
|
||||||
var statusError *k8serrors.StatusError
|
var statusError *k8serrors.StatusError
|
||||||
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
|
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
|
||||||
log.Print("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
|
log.Print("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
|
||||||
} else {
|
} else {
|
||||||
defer resources.CleanUpKubesharkResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace)
|
defer resources.CleanUpKubesharkResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace)
|
||||||
log.Printf(utils.Error, fmt.Sprintf("Error creating resources: %v", errormessage.FormatError(err)))
|
log.Printf(utils.Error, fmt.Sprintf("Error creating resources: %v", errormessage.FormatError(err)))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,24 +111,23 @@ func RunKubesharkTap() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
|
func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
|
||||||
finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace)
|
finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTapKubesharkAgentConfig() *models.Config {
|
func getTapConfig() *models.Config {
|
||||||
kubesharkAgentConfig := models.Config{
|
conf := models.Config{
|
||||||
MaxDBSizeBytes: config.Config.Tap.MaxEntriesDBSizeBytes(),
|
MaxDBSizeBytes: config.Config.Tap.MaxEntriesDBSizeBytes(),
|
||||||
InsertionFilter: config.Config.Tap.GetInsertionFilter(),
|
InsertionFilter: config.Config.Tap.GetInsertionFilter(),
|
||||||
AgentImage: config.Config.AgentImage,
|
|
||||||
PullPolicy: config.Config.ImagePullPolicyStr,
|
PullPolicy: config.Config.ImagePullPolicyStr,
|
||||||
LogLevel: config.Config.LogLevel(),
|
LogLevel: config.Config.LogLevel(),
|
||||||
TapperResources: config.Config.Tap.TapperResources,
|
TapperResources: config.Config.Tap.TapperResources,
|
||||||
KubesharkResourcesNamespace: config.Config.KubesharkResourcesNamespace,
|
KubesharkResourcesNamespace: config.Config.ResourcesNamespace,
|
||||||
AgentDatabasePath: models.DataDirPath,
|
AgentDatabasePath: models.DataDirPath,
|
||||||
ServiceMap: config.Config.ServiceMap,
|
ServiceMap: config.Config.ServiceMap,
|
||||||
OAS: config.Config.OAS,
|
OAS: config.Config.OAS,
|
||||||
}
|
}
|
||||||
|
|
||||||
return &kubesharkAgentConfig
|
return &conf
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -154,8 +153,7 @@ func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider
|
|||||||
tapperSyncer, err := kubernetes.CreateAndStartKubesharkTapperSyncer(ctx, provider, kubernetes.TapperSyncerConfig{
|
tapperSyncer, err := kubernetes.CreateAndStartKubesharkTapperSyncer(ctx, provider, kubernetes.TapperSyncerConfig{
|
||||||
TargetNamespaces: targetNamespaces,
|
TargetNamespaces: targetNamespaces,
|
||||||
PodFilterRegex: *config.Config.Tap.PodRegex(),
|
PodFilterRegex: *config.Config.Tap.PodRegex(),
|
||||||
KubesharkResourcesNamespace: config.Config.KubesharkResourcesNamespace,
|
KubesharkResourcesNamespace: config.Config.ResourcesNamespace,
|
||||||
AgentImage: config.Config.AgentImage,
|
|
||||||
TapperResources: config.Config.Tap.TapperResources,
|
TapperResources: config.Config.Tap.TapperResources,
|
||||||
ImagePullPolicy: config.Config.ImagePullPolicy(),
|
ImagePullPolicy: config.Config.ImagePullPolicy(),
|
||||||
LogLevel: config.Config.LogLevel(),
|
LogLevel: config.Config.LogLevel(),
|
||||||
@ -232,7 +230,7 @@ func getErrorDisplayTextForK8sTapManagerError(err kubernetes.K8sTapManagerError)
|
|||||||
func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.HubPodName))
|
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.HubPodName))
|
||||||
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
||||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, podWatchHelper)
|
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.ResourcesNamespace}, podWatchHelper)
|
||||||
isPodReady := false
|
isPodReady := false
|
||||||
|
|
||||||
hubTimeoutSec := config.GetIntEnvConfig(config.HubTimeoutSec, 120)
|
hubTimeoutSec := config.GetIntEnvConfig(config.HubTimeoutSec, 120)
|
||||||
@ -283,7 +281,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[ERROR] Agent creation, watching %v namespace, error: %v", config.Config.KubesharkResourcesNamespace, err)
|
log.Printf("[ERROR] Hub pod creation, watching %v namespace, error: %v", config.Config.ResourcesNamespace, err)
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
case <-timeAfter:
|
case <-timeAfter:
|
||||||
@ -301,7 +299,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
|
|||||||
func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.FrontPodName))
|
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.FrontPodName))
|
||||||
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
||||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, podWatchHelper)
|
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.ResourcesNamespace}, podWatchHelper)
|
||||||
isPodReady := false
|
isPodReady := false
|
||||||
|
|
||||||
hubTimeoutSec := config.GetIntEnvConfig(config.HubTimeoutSec, 120)
|
hubTimeoutSec := config.GetIntEnvConfig(config.HubTimeoutSec, 120)
|
||||||
@ -351,7 +349,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[ERROR] Agent creation, watching %v namespace, error: %v", config.Config.KubesharkResourcesNamespace, err)
|
log.Printf("[ERROR] Front pod creation, watching %v namespace, error: %v", config.Config.ResourcesNamespace, err)
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
case <-timeAfter:
|
case <-timeAfter:
|
||||||
@ -360,7 +358,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
|
|||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
log.Printf("Watching Hub pod loop, ctx done")
|
log.Printf("Watching Front pod loop, ctx done")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -369,7 +367,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
|
|||||||
func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.HubPodName))
|
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.HubPodName))
|
||||||
eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, podExactRegex, "pod")
|
eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, podExactRegex, "pod")
|
||||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, eventWatchHelper)
|
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.ResourcesNamespace}, eventWatchHelper)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case wEvent, ok := <-eventChan:
|
case wEvent, ok := <-eventChan:
|
||||||
|
@ -25,7 +25,7 @@ func runKubesharkView() {
|
|||||||
url := config.Config.View.Url
|
url := config.Config.View.Url
|
||||||
|
|
||||||
if url == "" {
|
if url == "" {
|
||||||
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.HubServiceName)
|
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.ResourcesNamespace, kubernetes.HubServiceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to found kubeshark service %v", err)
|
log.Printf("Failed to found kubeshark service %v", err)
|
||||||
cancel()
|
cancel()
|
||||||
|
@ -15,7 +15,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
KubesharkResourcesNamespaceConfigName = "kubeshark-resources-namespace"
|
ResourcesNamespaceConfigName = "resources-namespace"
|
||||||
ConfigFilePathCommandName = "config-path"
|
ConfigFilePathCommandName = "config-path"
|
||||||
KubeConfigPathConfigName = "kube-config-path"
|
KubeConfigPathConfigName = "kube-config-path"
|
||||||
)
|
)
|
||||||
@ -63,9 +63,8 @@ type ConfigStruct struct {
|
|||||||
View configStructs.ViewConfig `yaml:"view"`
|
View configStructs.ViewConfig `yaml:"view"`
|
||||||
Logs configStructs.LogsConfig `yaml:"logs"`
|
Logs configStructs.LogsConfig `yaml:"logs"`
|
||||||
Config configStructs.ConfigConfig `yaml:"config,omitempty"`
|
Config configStructs.ConfigConfig `yaml:"config,omitempty"`
|
||||||
AgentImage string `yaml:"agent-image,omitempty" readonly:""`
|
|
||||||
ImagePullPolicyStr string `yaml:"image-pull-policy" default:"Always"`
|
ImagePullPolicyStr string `yaml:"image-pull-policy" default:"Always"`
|
||||||
KubesharkResourcesNamespace string `yaml:"kubeshark-resources-namespace" default:"kubeshark"`
|
ResourcesNamespace string `yaml:"resources-namespace" default:"kubeshark"`
|
||||||
DumpLogs bool `yaml:"dump-logs" default:"false"`
|
DumpLogs bool `yaml:"dump-logs" default:"false"`
|
||||||
KubeConfigPathStr string `yaml:"kube-config-path"`
|
KubeConfigPathStr string `yaml:"kube-config-path"`
|
||||||
KubeContext string `yaml:"kube-context"`
|
KubeContext string `yaml:"kube-context"`
|
||||||
@ -85,7 +84,6 @@ func (config *ConfigStruct) validate() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (config *ConfigStruct) SetDefaults() {
|
func (config *ConfigStruct) SetDefaults() {
|
||||||
config.AgentImage = "kubeshark/hub:latest"
|
|
||||||
config.ConfigFilePath = path.Join(kubeshark.GetKubesharkFolderPath(), "config.yaml")
|
config.ConfigFilePath = path.Join(kubeshark.GetKubesharkFolderPath(), "config.yaml")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,7 +92,7 @@ func (config *ConfigStruct) ImagePullPolicy() v1.PullPolicy {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (config *ConfigStruct) IsNsRestrictedMode() bool {
|
func (config *ConfigStruct) IsNsRestrictedMode() bool {
|
||||||
return config.KubesharkResourcesNamespace != "kubeshark" // Notice "kubeshark" string must match the default KubesharkResourcesNamespace
|
return config.ResourcesNamespace != "kubeshark" // Notice "kubeshark" string must match the default KubesharkResourcesNamespace
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *ConfigStruct) KubeConfigPath() string {
|
func (config *ConfigStruct) KubeConfigPath() string {
|
||||||
|
@ -19,9 +19,9 @@ func FormatError(err error) error {
|
|||||||
"supply the required permission or control Kubeshark's access to namespaces by setting %s "+
|
"supply the required permission or control Kubeshark's access to namespaces by setting %s "+
|
||||||
"in the config file or setting the tapped namespace with --%s %s=<NAMEPSACE>",
|
"in the config file or setting the tapped namespace with --%s %s=<NAMEPSACE>",
|
||||||
err,
|
err,
|
||||||
config.KubesharkResourcesNamespaceConfigName,
|
config.ResourcesNamespaceConfigName,
|
||||||
config.SetCommandName,
|
config.SetCommandName,
|
||||||
config.KubesharkResourcesNamespaceConfigName)
|
config.ResourcesNamespaceConfigName)
|
||||||
} else if syntaxError, isSyntaxError := asRegexSyntaxError(err); isSyntaxError {
|
} else if syntaxError, isSyntaxError := asRegexSyntaxError(err); isSyntaxError {
|
||||||
errorNew = fmt.Errorf("regex %s is invalid: %w", syntaxError.Expr, err)
|
errorNew = fmt.Errorf("regex %s is invalid: %w", syntaxError.Expr, err)
|
||||||
} else {
|
} else {
|
||||||
|
@ -23,6 +23,4 @@ const (
|
|||||||
LabelManagedBy = LabelPrefixApp + "managed-by"
|
LabelManagedBy = LabelPrefixApp + "managed-by"
|
||||||
LabelCreatedBy = LabelPrefixApp + "created-by"
|
LabelCreatedBy = LabelPrefixApp + "created-by"
|
||||||
LabelValueKubeshark = "kubeshark"
|
LabelValueKubeshark = "kubeshark"
|
||||||
LabelValueKubesharkCLI = "kubeshark-cli"
|
|
||||||
LabelValueKubesharkAgent = "kubeshark-agent"
|
|
||||||
)
|
)
|
||||||
|
@ -40,7 +40,6 @@ type TapperSyncerConfig struct {
|
|||||||
TargetNamespaces []string
|
TargetNamespaces []string
|
||||||
PodFilterRegex regexp.Regexp
|
PodFilterRegex regexp.Regexp
|
||||||
KubesharkResourcesNamespace string
|
KubesharkResourcesNamespace string
|
||||||
AgentImage string
|
|
||||||
TapperResources models.Resources
|
TapperResources models.Resources
|
||||||
ImagePullPolicy core.PullPolicy
|
ImagePullPolicy core.PullPolicy
|
||||||
LogLevel logging.Level
|
LogLevel logging.Level
|
||||||
@ -312,6 +311,8 @@ func (tapperSyncer *KubesharkTapperSyncer) updateKubesharkTappers() error {
|
|||||||
|
|
||||||
log.Printf("Updating DaemonSet to run on nodes: %v", nodesToTap)
|
log.Printf("Updating DaemonSet to run on nodes: %v", nodesToTap)
|
||||||
|
|
||||||
|
image := "kubeshark/worker:latest"
|
||||||
|
|
||||||
if len(tapperSyncer.nodeToTappedPodMap) > 0 {
|
if len(tapperSyncer.nodeToTappedPodMap) > 0 {
|
||||||
var serviceAccountName string
|
var serviceAccountName string
|
||||||
if tapperSyncer.config.KubesharkServiceAccountExists {
|
if tapperSyncer.config.KubesharkServiceAccountExists {
|
||||||
@ -329,7 +330,7 @@ func (tapperSyncer *KubesharkTapperSyncer) updateKubesharkTappers() error {
|
|||||||
tapperSyncer.context,
|
tapperSyncer.context,
|
||||||
tapperSyncer.config.KubesharkResourcesNamespace,
|
tapperSyncer.config.KubesharkResourcesNamespace,
|
||||||
TapperDaemonSetName,
|
TapperDaemonSetName,
|
||||||
"kubeshark/worker:latest",
|
image,
|
||||||
TapperPodName,
|
TapperPodName,
|
||||||
fmt.Sprintf("%s.%s.svc", HubPodName, tapperSyncer.config.KubesharkResourcesNamespace),
|
fmt.Sprintf("%s.%s.svc", HubPodName, tapperSyncer.config.KubesharkResourcesNamespace),
|
||||||
nodeNames,
|
nodeNames,
|
||||||
@ -350,7 +351,7 @@ func (tapperSyncer *KubesharkTapperSyncer) updateKubesharkTappers() error {
|
|||||||
tapperSyncer.context,
|
tapperSyncer.context,
|
||||||
tapperSyncer.config.KubesharkResourcesNamespace,
|
tapperSyncer.config.KubesharkResourcesNamespace,
|
||||||
TapperDaemonSetName,
|
TapperDaemonSetName,
|
||||||
tapperSyncer.config.AgentImage,
|
image,
|
||||||
TapperPodName); err != nil {
|
TapperPodName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -85,7 +85,7 @@ func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) {
|
|||||||
kubernetesConfig: kubernetesConfig,
|
kubernetesConfig: kubernetesConfig,
|
||||||
clientConfig: *restClientConfig,
|
clientConfig: *restClientConfig,
|
||||||
managedBy: LabelValueKubeshark,
|
managedBy: LabelValueKubeshark,
|
||||||
createdBy: LabelValueKubesharkCLI,
|
createdBy: LabelValueKubeshark,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -105,7 +105,7 @@ func NewProviderInCluster() (*Provider, error) {
|
|||||||
kubernetesConfig: nil, // not relevant in cluster
|
kubernetesConfig: nil, // not relevant in cluster
|
||||||
clientConfig: *restClientConfig,
|
clientConfig: *restClientConfig,
|
||||||
managedBy: LabelValueKubeshark,
|
managedBy: LabelValueKubeshark,
|
||||||
createdBy: LabelValueKubesharkAgent,
|
createdBy: LabelValueKubeshark,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -838,10 +838,10 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
|
|||||||
kubesharkCmd = append(kubesharkCmd, "--procfs", procfsMountPath)
|
kubesharkCmd = append(kubesharkCmd, "--procfs", procfsMountPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
agentContainer := applyconfcore.Container()
|
workerContainer := applyconfcore.Container()
|
||||||
agentContainer.WithName(tapperPodName)
|
workerContainer.WithName(tapperPodName)
|
||||||
agentContainer.WithImage(podImage)
|
workerContainer.WithImage(podImage)
|
||||||
agentContainer.WithImagePullPolicy(imagePullPolicy)
|
workerContainer.WithImagePullPolicy(imagePullPolicy)
|
||||||
|
|
||||||
caps := applyconfcore.Capabilities().WithDrop("ALL")
|
caps := applyconfcore.Capabilities().WithDrop("ALL")
|
||||||
|
|
||||||
@ -860,15 +860,15 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
agentContainer.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
|
workerContainer.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
|
||||||
|
|
||||||
agentContainer.WithCommand(kubesharkCmd...)
|
workerContainer.WithCommand(kubesharkCmd...)
|
||||||
agentContainer.WithEnv(
|
workerContainer.WithEnv(
|
||||||
applyconfcore.EnvVar().WithName(utils.LogLevelEnvVar).WithValue(logLevel.String()),
|
applyconfcore.EnvVar().WithName(utils.LogLevelEnvVar).WithValue(logLevel.String()),
|
||||||
applyconfcore.EnvVar().WithName(utils.HostModeEnvVar).WithValue("1"),
|
applyconfcore.EnvVar().WithName(utils.HostModeEnvVar).WithValue("1"),
|
||||||
applyconfcore.EnvVar().WithName(utils.KubesharkFilteringOptionsEnvVar).WithValue(string(kubesharkApiFilteringOptionsJsonStr)),
|
applyconfcore.EnvVar().WithName(utils.KubesharkFilteringOptionsEnvVar).WithValue(string(kubesharkApiFilteringOptionsJsonStr)),
|
||||||
)
|
)
|
||||||
agentContainer.WithEnv(
|
workerContainer.WithEnv(
|
||||||
applyconfcore.EnvVar().WithName(utils.NodeNameEnvVar).WithValueFrom(
|
applyconfcore.EnvVar().WithName(utils.NodeNameEnvVar).WithValueFrom(
|
||||||
applyconfcore.EnvVarSource().WithFieldRef(
|
applyconfcore.EnvVarSource().WithFieldRef(
|
||||||
applyconfcore.ObjectFieldSelector().WithAPIVersion("v1").WithFieldPath("spec.nodeName"),
|
applyconfcore.ObjectFieldSelector().WithAPIVersion("v1").WithFieldPath("spec.nodeName"),
|
||||||
@ -891,16 +891,16 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid memory request for %s container", tapperPodName)
|
return fmt.Errorf("invalid memory request for %s container", tapperPodName)
|
||||||
}
|
}
|
||||||
agentResourceLimits := core.ResourceList{
|
workerResourceLimits := core.ResourceList{
|
||||||
"cpu": cpuLimit,
|
"cpu": cpuLimit,
|
||||||
"memory": memLimit,
|
"memory": memLimit,
|
||||||
}
|
}
|
||||||
agentResourceRequests := core.ResourceList{
|
workerResourceRequests := core.ResourceList{
|
||||||
"cpu": cpuRequests,
|
"cpu": cpuRequests,
|
||||||
"memory": memRequests,
|
"memory": memRequests,
|
||||||
}
|
}
|
||||||
agentResources := applyconfcore.ResourceRequirements().WithRequests(agentResourceRequests).WithLimits(agentResourceLimits)
|
workerResources := applyconfcore.ResourceRequirements().WithRequests(workerResourceRequests).WithLimits(workerResourceLimits)
|
||||||
agentContainer.WithResources(agentResources)
|
workerContainer.WithResources(workerResources)
|
||||||
|
|
||||||
matchFields := make([]*applyconfcore.NodeSelectorTermApplyConfiguration, 0)
|
matchFields := make([]*applyconfcore.NodeSelectorTermApplyConfiguration, 0)
|
||||||
for _, nodeName := range nodeNames {
|
for _, nodeName := range nodeNames {
|
||||||
@ -934,14 +934,14 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
|
|||||||
procfsVolume := applyconfcore.Volume()
|
procfsVolume := applyconfcore.Volume()
|
||||||
procfsVolume.WithName(procfsVolumeName).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath("/proc"))
|
procfsVolume.WithName(procfsVolumeName).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath("/proc"))
|
||||||
procfsVolumeMount := applyconfcore.VolumeMount().WithName(procfsVolumeName).WithMountPath(procfsMountPath).WithReadOnly(true)
|
procfsVolumeMount := applyconfcore.VolumeMount().WithName(procfsVolumeName).WithMountPath(procfsMountPath).WithReadOnly(true)
|
||||||
agentContainer.WithVolumeMounts(procfsVolumeMount)
|
workerContainer.WithVolumeMounts(procfsVolumeMount)
|
||||||
|
|
||||||
// We need access to /sys in order to install certain eBPF tracepoints
|
// We need access to /sys in order to install certain eBPF tracepoints
|
||||||
//
|
//
|
||||||
sysfsVolume := applyconfcore.Volume()
|
sysfsVolume := applyconfcore.Volume()
|
||||||
sysfsVolume.WithName(sysfsVolumeName).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath("/sys"))
|
sysfsVolume.WithName(sysfsVolumeName).WithHostPath(applyconfcore.HostPathVolumeSource().WithPath("/sys"))
|
||||||
sysfsVolumeMount := applyconfcore.VolumeMount().WithName(sysfsVolumeName).WithMountPath(sysfsMountPath).WithReadOnly(true)
|
sysfsVolumeMount := applyconfcore.VolumeMount().WithName(sysfsVolumeName).WithMountPath(sysfsMountPath).WithReadOnly(true)
|
||||||
agentContainer.WithVolumeMounts(sysfsVolumeMount)
|
workerContainer.WithVolumeMounts(sysfsVolumeMount)
|
||||||
|
|
||||||
podSpec := applyconfcore.PodSpec()
|
podSpec := applyconfcore.PodSpec()
|
||||||
podSpec.WithHostNetwork(true)
|
podSpec.WithHostNetwork(true)
|
||||||
@ -950,7 +950,7 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
|
|||||||
if serviceAccountName != "" {
|
if serviceAccountName != "" {
|
||||||
podSpec.WithServiceAccountName(serviceAccountName)
|
podSpec.WithServiceAccountName(serviceAccountName)
|
||||||
}
|
}
|
||||||
podSpec.WithContainers(agentContainer)
|
podSpec.WithContainers(workerContainer)
|
||||||
podSpec.WithAffinity(affinity)
|
podSpec.WithAffinity(affinity)
|
||||||
podSpec.WithTolerations(noExecuteToleration, noScheduleToleration)
|
podSpec.WithTolerations(noExecuteToleration, noScheduleToleration)
|
||||||
podSpec.WithVolumes(procfsVolume, sysfsVolume)
|
podSpec.WithVolumes(procfsVolume, sysfsVolume)
|
||||||
@ -984,9 +984,9 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (provider *Provider) ResetKubesharkTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string) error {
|
func (provider *Provider) ResetKubesharkTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string) error {
|
||||||
agentContainer := applyconfcore.Container()
|
workerContainer := applyconfcore.Container()
|
||||||
agentContainer.WithName(tapperPodName)
|
workerContainer.WithName(tapperPodName)
|
||||||
agentContainer.WithImage(podImage)
|
workerContainer.WithImage(podImage)
|
||||||
|
|
||||||
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
|
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
|
||||||
nodeSelectorRequirement.WithKey("kubeshark-non-existing-label")
|
nodeSelectorRequirement.WithKey("kubeshark-non-existing-label")
|
||||||
@ -1001,7 +1001,7 @@ func (provider *Provider) ResetKubesharkTapperDaemonSet(ctx context.Context, nam
|
|||||||
affinity.WithNodeAffinity(nodeAffinity)
|
affinity.WithNodeAffinity(nodeAffinity)
|
||||||
|
|
||||||
podSpec := applyconfcore.PodSpec()
|
podSpec := applyconfcore.PodSpec()
|
||||||
podSpec.WithContainers(agentContainer)
|
podSpec.WithContainers(workerContainer)
|
||||||
podSpec.WithAffinity(affinity)
|
podSpec.WithAffinity(affinity)
|
||||||
|
|
||||||
podTemplate := applyconfcore.PodTemplateSpec()
|
podTemplate := applyconfcore.PodTemplateSpec()
|
||||||
|
@ -14,13 +14,13 @@ import (
|
|||||||
|
|
||||||
func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath string) error {
|
func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath string) error {
|
||||||
podExactRegex := regexp.MustCompile("^" + kubernetes.KubesharkResourcesPrefix)
|
podExactRegex := regexp.MustCompile("^" + kubernetes.KubesharkResourcesPrefix)
|
||||||
pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.KubesharkResourcesNamespace})
|
pods, err := provider.ListAllPodsMatchingRegex(ctx, podExactRegex, []string{config.Config.ResourcesNamespace})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pods) == 0 {
|
if len(pods) == 0 {
|
||||||
return fmt.Errorf("no kubeshark pods found in namespace %s", config.Config.KubesharkResourcesNamespace)
|
return fmt.Errorf("no kubeshark pods found in namespace %s", config.Config.ResourcesNamespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
newZipFile, err := os.Create(filePath)
|
newZipFile, err := os.Create(filePath)
|
||||||
@ -49,17 +49,17 @@ func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath strin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
events, err := provider.GetNamespaceEvents(ctx, config.Config.KubesharkResourcesNamespace)
|
events, err := provider.GetNamespaceEvents(ctx, config.Config.ResourcesNamespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Failed to get k8b events, %v", err)
|
log.Printf("Failed to get k8b events, %v", err)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Successfully read events for k8b namespace: %s", config.Config.KubesharkResourcesNamespace)
|
log.Printf("Successfully read events for k8b namespace: %s", config.Config.ResourcesNamespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.KubesharkResourcesNamespace)); err != nil {
|
if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.ResourcesNamespace)); err != nil {
|
||||||
log.Printf("Failed write logs, %v", err)
|
log.Printf("Failed write logs, %v", err)
|
||||||
} else {
|
} else {
|
||||||
log.Printf("Successfully added events for k8b namespace: %s", config.Config.KubesharkResourcesNamespace)
|
log.Printf("Successfully added events for k8b namespace: %s", config.Config.ResourcesNamespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := AddFileToZip(zipWriter, config.Config.ConfigFilePath); err != nil {
|
if err := AddFileToZip(zipWriter, config.Config.ConfigFilePath); err != nil {
|
||||||
|
@ -15,7 +15,7 @@ import (
|
|||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, agentImage string, maxEntriesDBSizeBytes int64, hubResources models.Resources, imagePullPolicy core.PullPolicy, logLevel logging.Level, profiler bool) (bool, error) {
|
func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, maxEntriesDBSizeBytes int64, hubResources models.Resources, imagePullPolicy core.PullPolicy, logLevel logging.Level, profiler bool) (bool, error) {
|
||||||
if !isNsRestrictedMode {
|
if !isNsRestrictedMode {
|
||||||
if err := createKubesharkNamespace(ctx, kubernetesProvider, kubesharkResourcesNamespace); err != nil {
|
if err := createKubesharkNamespace(ctx, kubernetesProvider, kubesharkResourcesNamespace); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@ -41,7 +41,7 @@ func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubern
|
|||||||
opts := &kubernetes.HubOptions{
|
opts := &kubernetes.HubOptions{
|
||||||
Namespace: kubesharkResourcesNamespace,
|
Namespace: kubesharkResourcesNamespace,
|
||||||
PodName: kubernetes.HubPodName,
|
PodName: kubernetes.HubPodName,
|
||||||
PodImage: agentImage,
|
PodImage: "kubeshark/hub:latest",
|
||||||
KratosImage: "",
|
KratosImage: "",
|
||||||
KetoImage: "",
|
KetoImage: "",
|
||||||
ServiceAccountName: serviceAccountName,
|
ServiceAccountName: serviceAccountName,
|
||||||
@ -56,7 +56,7 @@ func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubern
|
|||||||
frontOpts := &kubernetes.HubOptions{
|
frontOpts := &kubernetes.HubOptions{
|
||||||
Namespace: kubesharkResourcesNamespace,
|
Namespace: kubesharkResourcesNamespace,
|
||||||
PodName: kubernetes.FrontPodName,
|
PodName: kubernetes.FrontPodName,
|
||||||
PodImage: agentImage,
|
PodImage: "kubeshark/worker:latest",
|
||||||
KratosImage: "",
|
KratosImage: "",
|
||||||
KetoImage: "",
|
KetoImage: "",
|
||||||
ServiceAccountName: serviceAccountName,
|
ServiceAccountName: serviceAccountName,
|
||||||
|
Loading…
Reference in New Issue
Block a user