🎨 Replace the tap/tapper terminology with deploy, worker and targetted

This commit is contained in:
M. Mert Yildiran
2022-11-29 07:31:36 +03:00
parent ae278526ab
commit 6ca0fe137e
18 changed files with 250 additions and 250 deletions

View File

@@ -12,7 +12,7 @@ import (
"k8s.io/client-go/kubernetes/scheme"
)
func TapKubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesProvider *kubernetes.Provider) bool {
func KubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Str("procedure", "kubernetes-permissions").Msg("Checking:")
var filePath string

View File

@@ -66,33 +66,33 @@ func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.
Str("name", kubernetes.HubPodName).
Msg("Pod is running.")
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.ResourcesNamespace, kubernetes.TapperPodName); err != nil {
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.ResourcesNamespace, kubernetes.WorkerPodName); err != nil {
log.Error().
Str("name", kubernetes.TapperPodName).
Str("name", kubernetes.WorkerPodName).
Err(err).
Msg("While checking if pods are running!")
return false
} else {
tappers := 0
notRunningTappers := 0
workers := 0
notRunningWorkers := 0
for _, pod := range pods {
tappers += 1
workers += 1
if !kubernetes.IsPodRunning(&pod) {
notRunningTappers += 1
notRunningWorkers += 1
}
}
if notRunningTappers > 0 {
if notRunningWorkers > 0 {
log.Error().
Str("name", kubernetes.TapperPodName).
Msg(fmt.Sprintf("%d/%d pods are not running!", notRunningTappers, tappers))
Str("name", kubernetes.WorkerPodName).
Msg(fmt.Sprintf("%d/%d pods are not running!", notRunningWorkers, workers))
return false
}
log.Info().
Str("name", kubernetes.TapperPodName).
Msg(fmt.Sprintf("All %d pods are running.", tappers))
Str("name", kubernetes.WorkerPodName).
Msg(fmt.Sprintf("All %d pods are running.", workers))
return true
}
}

View File

@@ -29,7 +29,7 @@ func runKubesharkCheck() {
}
if checkPassed {
checkPassed = check.TapKubernetesPermissions(ctx, embedFS, kubernetesProvider)
checkPassed = check.KubernetesPermissions(ctx, embedFS, kubernetesProvider)
}
if checkPassed {
@@ -47,8 +47,8 @@ func runKubesharkCheck() {
log.Info().Msg(fmt.Sprintf(utils.Green, "All checks are passed."))
} else {
log.Error().
Str("command1", "kubeshark clean").
Str("command2", "kubeshark tap").
Str("command1", fmt.Sprintf("kubeshark %s", cleanCmd.Use)).
Str("command2", fmt.Sprintf("kubeshark %s", deployCmd.Use)).
Msg(fmt.Sprintf(utils.Red, "There are issues in your deployment! Run these commands:"))
os.Exit(1)
}

View File

@@ -26,7 +26,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
if err != nil {
log.Error().
Err(errormessage.FormatError(err)).
Msg(fmt.Sprintf("Error occured while running k8s proxy. Try setting different port by using --%s", configStructs.GuiPortTapName))
Msg(fmt.Sprintf("Error occured while running k8s proxy. Try setting different port by using --%s", configStructs.ProxyPortLabel))
cancel()
return
}
@@ -45,7 +45,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
log.Error().
Str("pod-regex", podRegex.String()).
Err(errormessage.FormatError(err)).
Msg(fmt.Sprintf("Error occured while running port forward. Try setting different port by using --%s", configStructs.GuiPortTapName))
Msg(fmt.Sprintf("Error occured while running port forward. Try setting different port by using --%s", configStructs.ProxyPortLabel))
cancel()
return
}

View File

@@ -16,7 +16,7 @@ var deployCmd = &cobra.Command{
Short: "Deploy Kubeshark into your K8s cluster.",
Long: `Deploy Kubeshark into your K8s cluster to gain visibility.`,
RunE: func(cmd *cobra.Command, args []string) error {
RunKubesharkTap()
deploy()
return nil
},
PreRunE: func(cmd *cobra.Command, args []string) error {
@@ -46,15 +46,15 @@ func init() {
log.Debug().Err(err).Send()
}
deployCmd.Flags().Uint16P(configStructs.GuiPortTapName, "p", defaultDeployConfig.GuiPort, "Provide a custom port for the web interface webserver")
deployCmd.Flags().StringSliceP(configStructs.NamespacesTapName, "n", defaultDeployConfig.Namespaces, "Namespaces selector")
deployCmd.Flags().BoolP(configStructs.AllNamespacesTapName, "A", defaultDeployConfig.AllNamespaces, "Tap all namespaces")
deployCmd.Flags().Bool(configStructs.EnableRedactionTapName, defaultDeployConfig.EnableRedaction, "Enables redaction of potentially sensitive request/response headers and body values")
deployCmd.Flags().String(configStructs.HumanMaxEntriesDBSizeTapName, defaultDeployConfig.HumanMaxEntriesDBSize, "Override the default max entries db size")
deployCmd.Flags().Uint16P(configStructs.ProxyPortLabel, "p", defaultDeployConfig.ProxyPort, "Provide a custom port for the web interface webserver.")
deployCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultDeployConfig.Namespaces, "Namespaces selector.")
deployCmd.Flags().BoolP(configStructs.AllNamespacesLabel, "A", defaultDeployConfig.AllNamespaces, "Deploy to all namespaces.")
deployCmd.Flags().Bool(configStructs.EnableRedactionLabel, defaultDeployConfig.EnableRedaction, "Enables redaction of potentially sensitive request/response headers and body values.")
deployCmd.Flags().String(configStructs.HumanMaxEntriesDBSizeLabel, defaultDeployConfig.HumanMaxEntriesDBSize, "Override the default max entries db size.")
deployCmd.Flags().String(configStructs.InsertionFilterName, defaultDeployConfig.InsertionFilter, "Set the insertion filter. Accepts string or a file path.")
deployCmd.Flags().Bool(configStructs.DryRunTapName, defaultDeployConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
deployCmd.Flags().Bool(configStructs.ServiceMeshName, defaultDeployConfig.ServiceMesh, "Record decrypted traffic if the cluster is configured with a service mesh and with mtls")
deployCmd.Flags().Bool(configStructs.TlsName, defaultDeployConfig.Tls, "Record tls traffic")
deployCmd.Flags().Bool(configStructs.ProfilerName, defaultDeployConfig.Profiler, "Run pprof server")
deployCmd.Flags().Int(configStructs.MaxLiveStreamsName, defaultDeployConfig.MaxLiveStreams, "Maximum live tcp streams to handle concurrently")
deployCmd.Flags().Bool(configStructs.DryRunLabel, defaultDeployConfig.DryRun, "Preview of all pods matching the regex, without deploying workers on them.")
deployCmd.Flags().Bool(configStructs.ServiceMeshName, defaultDeployConfig.ServiceMesh, "Record decrypted traffic if the cluster is configured with a service mesh and with mtls.")
deployCmd.Flags().Bool(configStructs.TlsName, defaultDeployConfig.Tls, "Record tls traffic.")
deployCmd.Flags().Bool(configStructs.ProfilerName, defaultDeployConfig.Profiler, "Run pprof server.")
deployCmd.Flags().Int(configStructs.MaxLiveStreamsName, defaultDeployConfig.MaxLiveStreams, "Maximum live tcp streams to handle concurrently.")
}

View File

@@ -27,19 +27,19 @@ import (
const cleanupTimeout = time.Minute
type tapState struct {
type deployState struct {
startTime time.Time
targetNamespaces []string
kubesharkServiceAccountExists bool
}
var state tapState
var state deployState
var connector *connect.Connector
var hubPodReady bool
var frontPodReady bool
var proxyDone bool
func RunKubesharkTap() {
func deploy() {
state.startTime = time.Now()
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Hub.PortForward.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
@@ -63,14 +63,14 @@ func RunKubesharkTap() {
if config.Config.IsNsRestrictedMode() {
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.ResourcesNamespace) {
log.Error().Msg(fmt.Sprintf("Kubeshark can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", configStructs.NamespacesTapName, config.ResourcesNamespaceConfigName))
log.Error().Msg(fmt.Sprintf("Kubeshark can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", configStructs.NamespacesLabel, config.ResourcesNamespaceConfigName))
return
}
}
log.Info().Strs("namespaces", state.targetNamespaces).Msg("Targetting pods in:")
if err := printTappedPodsPreview(ctx, kubernetesProvider, state.targetNamespaces); err != nil {
if err := printTargettedPodsPreview(ctx, kubernetesProvider, state.targetNamespaces); err != nil {
log.Error().Err(errormessage.FormatError(err)).Msg("Error listing pods!")
}
@@ -79,7 +79,7 @@ func RunKubesharkTap() {
}
log.Info().Msg("Waiting for Kubeshark deployment to finish...")
if state.kubesharkServiceAccountExists, err = resources.CreateTapKubesharkResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Deploy.MaxEntriesDBSizeBytes(), config.Config.Deploy.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Deploy.Profiler); err != nil {
if state.kubesharkServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Deploy.MaxEntriesDBSizeBytes(), config.Config.Deploy.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Deploy.Profiler); err != nil {
var statusError *k8serrors.StatusError
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
log.Info().Msg("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
@@ -91,7 +91,7 @@ func RunKubesharkTap() {
return
}
defer finishTapExecution(kubernetesProvider)
defer finishDeployExecution(kubernetesProvider)
go goUtils.HandleExcWrapper(watchHubEvents, ctx, kubernetesProvider, cancel)
go goUtils.HandleExcWrapper(watchHubPod, ctx, kubernetesProvider, cancel)
@@ -101,7 +101,7 @@ func RunKubesharkTap() {
utils.WaitForFinish(ctx, cancel)
}
func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
func finishDeployExecution(kubernetesProvider *kubernetes.Provider) {
finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace)
}
@@ -110,7 +110,7 @@ func getDeployConfig() *models.Config {
MaxDBSizeBytes: config.Config.Deploy.MaxEntriesDBSizeBytes(),
InsertionFilter: config.Config.Deploy.GetInsertionFilter(),
PullPolicy: config.Config.ImagePullPolicyStr,
TapperResources: config.Config.Deploy.TapperResources,
TapperResources: config.Config.Deploy.WorkerResources,
KubesharkResourcesNamespace: config.Config.ResourcesNamespace,
AgentDatabasePath: models.DataDirPath,
ServiceMap: config.Config.ServiceMap,
@@ -121,30 +121,30 @@ func getDeployConfig() *models.Config {
}
/*
this function is a bit problematic as it might be detached from the actual pods the Kubeshark Hub will tap.
This function is a bit problematic as it might be detached from the actual pods the Kubeshark that targets.
The alternative would be to wait for Hub to be ready and then query it for the pods it listens to, this has
the arguably worse drawback of taking a relatively very long time before the user sees which pods are targeted, if any.
*/
func printTappedPodsPreview(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespaces []string) error {
func printTargettedPodsPreview(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespaces []string) error {
if matchingPods, err := kubernetesProvider.ListAllRunningPodsMatchingRegex(ctx, config.Config.Deploy.PodRegex(), namespaces); err != nil {
return err
} else {
if len(matchingPods) == 0 {
printNoPodsFoundSuggestion(namespaces)
}
for _, tappedPod := range matchingPods {
log.Info().Msg(fmt.Sprintf("New pod: %s", fmt.Sprintf(utils.Green, tappedPod.Name)))
for _, targettedPod := range matchingPods {
log.Info().Msg(fmt.Sprintf("New pod: %s", fmt.Sprintf(utils.Green, targettedPod.Name)))
}
return nil
}
}
func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider *kubernetes.Provider, targetNamespaces []string, startTime time.Time) error {
tapperSyncer, err := kubernetes.CreateAndStartKubesharkTapperSyncer(ctx, provider, kubernetes.TapperSyncerConfig{
func startWorkerSyncer(ctx context.Context, cancel context.CancelFunc, provider *kubernetes.Provider, targetNamespaces []string, startTime time.Time) error {
workerSyncer, err := kubernetes.CreateAndStartWorkerSyncer(ctx, provider, kubernetes.WorkerSyncerConfig{
TargetNamespaces: targetNamespaces,
PodFilterRegex: *config.Config.Deploy.PodRegex(),
KubesharkResourcesNamespace: config.Config.ResourcesNamespace,
TapperResources: config.Config.Deploy.TapperResources,
WorkerResources: config.Config.Deploy.WorkerResources,
ImagePullPolicy: config.Config.ImagePullPolicy(),
LogLevel: config.Config.LogLevel(),
KubesharkApiFilteringOptions: api.TrafficFilteringOptions{
@@ -163,31 +163,31 @@ func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider
go func() {
for {
select {
case syncerErr, ok := <-tapperSyncer.ErrorOut:
case syncerErr, ok := <-workerSyncer.ErrorOut:
if !ok {
log.Debug().Msg("kubesharkTapperSyncer err channel closed, ending listener loop")
log.Debug().Msg("workerSyncer err channel closed, ending listener loop")
return
}
log.Error().Msg(getErrorDisplayTextForK8sTapManagerError(syncerErr))
log.Error().Msg(getK8sDeployManagerErrorText(syncerErr))
cancel()
case _, ok := <-tapperSyncer.TapPodChangesOut:
case _, ok := <-workerSyncer.DeployPodChangesOut:
if !ok {
log.Debug().Msg("kubesharkTapperSyncer pod changes channel closed, ending listener loop")
log.Debug().Msg("workerSyncer pod changes channel closed, ending listener loop")
return
}
if err := connector.ReportTappedPods(tapperSyncer.CurrentlyTappedPods); err != nil {
log.Error().Err(err).Msg("failed update tapped pods.")
if err := connector.ReportTargettedPods(workerSyncer.CurrentlyTargettedPods); err != nil {
log.Error().Err(err).Msg("failed update targetted pods.")
}
case tapperStatus, ok := <-tapperSyncer.TapperStatusChangedOut:
case workerStatus, ok := <-workerSyncer.WorkerStatusChangedOut:
if !ok {
log.Debug().Msg("kubesharkTapperSyncer tapper status changed channel closed, ending listener loop")
log.Debug().Msg("workerSyncer worker status changed channel closed, ending listener loop")
return
}
if err := connector.ReportTapperStatus(tapperStatus); err != nil {
log.Error().Err(err).Msg("failed update tapper status.")
if err := connector.ReportWorkerStatus(workerStatus); err != nil {
log.Error().Err(err).Msg("failed update worker status.")
}
case <-ctx.Done():
log.Debug().Msg("kubesharkTapperSyncer event listener loop exiting due to context done")
log.Debug().Msg("workerSyncer event listener loop exiting due to context done")
return
}
}
@@ -199,21 +199,21 @@ func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider
func printNoPodsFoundSuggestion(targetNamespaces []string) {
var suggestionStr string
if !utils.Contains(targetNamespaces, kubernetes.K8sAllNamespaces) {
suggestionStr = ". You can also try selecting a different namespace with -n or tap all namespaces with -A"
suggestionStr = ". You can also try selecting a different namespace with -n or target all namespaces with -A"
}
log.Warn().Msg(fmt.Sprintf("Did not find any currently running pods that match the regex argument, kubeshark will automatically tap matching pods if any are created later%s", suggestionStr))
log.Warn().Msg(fmt.Sprintf("Did not find any currently running pods that match the regex argument, kubeshark will automatically target matching pods if any are created later%s", suggestionStr))
}
func getErrorDisplayTextForK8sTapManagerError(err kubernetes.K8sTapManagerError) string {
switch err.TapManagerReason {
case kubernetes.TapManagerPodListError:
return fmt.Sprintf("Failed to update currently tapped pods: %v", err.OriginalError)
case kubernetes.TapManagerPodWatchError:
return fmt.Sprintf("Error occured in k8s pod watch: %v", err.OriginalError)
case kubernetes.TapManagerTapperUpdateError:
return fmt.Sprintf("Error updating tappers: %v", err.OriginalError)
func getK8sDeployManagerErrorText(err kubernetes.K8sDeployManagerError) string {
switch err.DeployManagerReason {
case kubernetes.DeployManagerPodListError:
return fmt.Sprintf("Failed to update currently targetted pods: %v", err.OriginalError)
case kubernetes.DeployManagerPodWatchError:
return fmt.Sprintf("Error occured in K8s pod watch: %v", err.OriginalError)
case kubernetes.DeployManagerWorkerUpdateError:
return fmt.Sprintf("Error updating worker: %v", err.OriginalError)
default:
return fmt.Sprintf("Unknown error occured in k8s tap manager: %v", err.OriginalError)
return fmt.Sprintf("Unknown error occured in K8s deploy manager: %v", err.OriginalError)
}
}
@@ -450,8 +450,8 @@ func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider
func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, kubernetes.HubServiceName, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, "/echo")
if err := startTapperSyncer(ctx, cancel, kubernetesProvider, state.targetNamespaces, state.startTime); err != nil {
log.Error().Err(errormessage.FormatError(err)).Msg("Error starting kubeshark tapper syncer")
if err := startWorkerSyncer(ctx, cancel, kubernetesProvider, state.targetNamespaces, state.startTime); err != nil {
log.Error().Err(errormessage.FormatError(err)).Msg("Error starting kubeshark worker syncer")
cancel()
}

View File

@@ -34,7 +34,7 @@ func runOpen() {
if !exists {
log.Error().
Str("service", kubernetes.FrontServiceName).
Str("command", "kubeshark tap").
Str("command", fmt.Sprintf("kubeshark %s", deployCmd.Use)).
Msg("Service not found! You should run the command first:")
cancel()
return

View File

@@ -36,7 +36,7 @@ func init() {
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the tapCmd.
// This is called by main.main(). It only needs to happen once to the deployCmd.
func Execute() {
cobra.CheckErr(rootCmd.Execute())
}