mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-06-28 17:19:44 +00:00
🎨 Replace the tap
/tapper
terminology with deploy
, worker
and targetted
This commit is contained in:
parent
ae278526ab
commit
6ca0fe137e
@ -12,7 +12,7 @@ import (
|
|||||||
"k8s.io/client-go/kubernetes/scheme"
|
"k8s.io/client-go/kubernetes/scheme"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TapKubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesProvider *kubernetes.Provider) bool {
|
func KubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesProvider *kubernetes.Provider) bool {
|
||||||
log.Info().Str("procedure", "kubernetes-permissions").Msg("Checking:")
|
log.Info().Str("procedure", "kubernetes-permissions").Msg("Checking:")
|
||||||
|
|
||||||
var filePath string
|
var filePath string
|
||||||
|
@ -66,33 +66,33 @@ func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.
|
|||||||
Str("name", kubernetes.HubPodName).
|
Str("name", kubernetes.HubPodName).
|
||||||
Msg("Pod is running.")
|
Msg("Pod is running.")
|
||||||
|
|
||||||
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.ResourcesNamespace, kubernetes.TapperPodName); err != nil {
|
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.ResourcesNamespace, kubernetes.WorkerPodName); err != nil {
|
||||||
log.Error().
|
log.Error().
|
||||||
Str("name", kubernetes.TapperPodName).
|
Str("name", kubernetes.WorkerPodName).
|
||||||
Err(err).
|
Err(err).
|
||||||
Msg("While checking if pods are running!")
|
Msg("While checking if pods are running!")
|
||||||
return false
|
return false
|
||||||
} else {
|
} else {
|
||||||
tappers := 0
|
workers := 0
|
||||||
notRunningTappers := 0
|
notRunningWorkers := 0
|
||||||
|
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
tappers += 1
|
workers += 1
|
||||||
if !kubernetes.IsPodRunning(&pod) {
|
if !kubernetes.IsPodRunning(&pod) {
|
||||||
notRunningTappers += 1
|
notRunningWorkers += 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if notRunningTappers > 0 {
|
if notRunningWorkers > 0 {
|
||||||
log.Error().
|
log.Error().
|
||||||
Str("name", kubernetes.TapperPodName).
|
Str("name", kubernetes.WorkerPodName).
|
||||||
Msg(fmt.Sprintf("%d/%d pods are not running!", notRunningTappers, tappers))
|
Msg(fmt.Sprintf("%d/%d pods are not running!", notRunningWorkers, workers))
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info().
|
log.Info().
|
||||||
Str("name", kubernetes.TapperPodName).
|
Str("name", kubernetes.WorkerPodName).
|
||||||
Msg(fmt.Sprintf("All %d pods are running.", tappers))
|
Msg(fmt.Sprintf("All %d pods are running.", workers))
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,7 @@ func runKubesharkCheck() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if checkPassed {
|
if checkPassed {
|
||||||
checkPassed = check.TapKubernetesPermissions(ctx, embedFS, kubernetesProvider)
|
checkPassed = check.KubernetesPermissions(ctx, embedFS, kubernetesProvider)
|
||||||
}
|
}
|
||||||
|
|
||||||
if checkPassed {
|
if checkPassed {
|
||||||
@ -47,8 +47,8 @@ func runKubesharkCheck() {
|
|||||||
log.Info().Msg(fmt.Sprintf(utils.Green, "All checks are passed."))
|
log.Info().Msg(fmt.Sprintf(utils.Green, "All checks are passed."))
|
||||||
} else {
|
} else {
|
||||||
log.Error().
|
log.Error().
|
||||||
Str("command1", "kubeshark clean").
|
Str("command1", fmt.Sprintf("kubeshark %s", cleanCmd.Use)).
|
||||||
Str("command2", "kubeshark tap").
|
Str("command2", fmt.Sprintf("kubeshark %s", deployCmd.Use)).
|
||||||
Msg(fmt.Sprintf(utils.Red, "There are issues in your deployment! Run these commands:"))
|
Msg(fmt.Sprintf(utils.Red, "There are issues in your deployment! Run these commands:"))
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error().
|
log.Error().
|
||||||
Err(errormessage.FormatError(err)).
|
Err(errormessage.FormatError(err)).
|
||||||
Msg(fmt.Sprintf("Error occured while running k8s proxy. Try setting different port by using --%s", configStructs.GuiPortTapName))
|
Msg(fmt.Sprintf("Error occured while running k8s proxy. Try setting different port by using --%s", configStructs.ProxyPortLabel))
|
||||||
cancel()
|
cancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -45,7 +45,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
|
|||||||
log.Error().
|
log.Error().
|
||||||
Str("pod-regex", podRegex.String()).
|
Str("pod-regex", podRegex.String()).
|
||||||
Err(errormessage.FormatError(err)).
|
Err(errormessage.FormatError(err)).
|
||||||
Msg(fmt.Sprintf("Error occured while running port forward. Try setting different port by using --%s", configStructs.GuiPortTapName))
|
Msg(fmt.Sprintf("Error occured while running port forward. Try setting different port by using --%s", configStructs.ProxyPortLabel))
|
||||||
cancel()
|
cancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,7 @@ var deployCmd = &cobra.Command{
|
|||||||
Short: "Deploy Kubeshark into your K8s cluster.",
|
Short: "Deploy Kubeshark into your K8s cluster.",
|
||||||
Long: `Deploy Kubeshark into your K8s cluster to gain visibility.`,
|
Long: `Deploy Kubeshark into your K8s cluster to gain visibility.`,
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
RunKubesharkTap()
|
deploy()
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||||
@ -46,15 +46,15 @@ func init() {
|
|||||||
log.Debug().Err(err).Send()
|
log.Debug().Err(err).Send()
|
||||||
}
|
}
|
||||||
|
|
||||||
deployCmd.Flags().Uint16P(configStructs.GuiPortTapName, "p", defaultDeployConfig.GuiPort, "Provide a custom port for the web interface webserver")
|
deployCmd.Flags().Uint16P(configStructs.ProxyPortLabel, "p", defaultDeployConfig.ProxyPort, "Provide a custom port for the web interface webserver.")
|
||||||
deployCmd.Flags().StringSliceP(configStructs.NamespacesTapName, "n", defaultDeployConfig.Namespaces, "Namespaces selector")
|
deployCmd.Flags().StringSliceP(configStructs.NamespacesLabel, "n", defaultDeployConfig.Namespaces, "Namespaces selector.")
|
||||||
deployCmd.Flags().BoolP(configStructs.AllNamespacesTapName, "A", defaultDeployConfig.AllNamespaces, "Tap all namespaces")
|
deployCmd.Flags().BoolP(configStructs.AllNamespacesLabel, "A", defaultDeployConfig.AllNamespaces, "Deploy to all namespaces.")
|
||||||
deployCmd.Flags().Bool(configStructs.EnableRedactionTapName, defaultDeployConfig.EnableRedaction, "Enables redaction of potentially sensitive request/response headers and body values")
|
deployCmd.Flags().Bool(configStructs.EnableRedactionLabel, defaultDeployConfig.EnableRedaction, "Enables redaction of potentially sensitive request/response headers and body values.")
|
||||||
deployCmd.Flags().String(configStructs.HumanMaxEntriesDBSizeTapName, defaultDeployConfig.HumanMaxEntriesDBSize, "Override the default max entries db size")
|
deployCmd.Flags().String(configStructs.HumanMaxEntriesDBSizeLabel, defaultDeployConfig.HumanMaxEntriesDBSize, "Override the default max entries db size.")
|
||||||
deployCmd.Flags().String(configStructs.InsertionFilterName, defaultDeployConfig.InsertionFilter, "Set the insertion filter. Accepts string or a file path.")
|
deployCmd.Flags().String(configStructs.InsertionFilterName, defaultDeployConfig.InsertionFilter, "Set the insertion filter. Accepts string or a file path.")
|
||||||
deployCmd.Flags().Bool(configStructs.DryRunTapName, defaultDeployConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
|
deployCmd.Flags().Bool(configStructs.DryRunLabel, defaultDeployConfig.DryRun, "Preview of all pods matching the regex, without deploying workers on them.")
|
||||||
deployCmd.Flags().Bool(configStructs.ServiceMeshName, defaultDeployConfig.ServiceMesh, "Record decrypted traffic if the cluster is configured with a service mesh and with mtls")
|
deployCmd.Flags().Bool(configStructs.ServiceMeshName, defaultDeployConfig.ServiceMesh, "Record decrypted traffic if the cluster is configured with a service mesh and with mtls.")
|
||||||
deployCmd.Flags().Bool(configStructs.TlsName, defaultDeployConfig.Tls, "Record tls traffic")
|
deployCmd.Flags().Bool(configStructs.TlsName, defaultDeployConfig.Tls, "Record tls traffic.")
|
||||||
deployCmd.Flags().Bool(configStructs.ProfilerName, defaultDeployConfig.Profiler, "Run pprof server")
|
deployCmd.Flags().Bool(configStructs.ProfilerName, defaultDeployConfig.Profiler, "Run pprof server.")
|
||||||
deployCmd.Flags().Int(configStructs.MaxLiveStreamsName, defaultDeployConfig.MaxLiveStreams, "Maximum live tcp streams to handle concurrently")
|
deployCmd.Flags().Int(configStructs.MaxLiveStreamsName, defaultDeployConfig.MaxLiveStreams, "Maximum live tcp streams to handle concurrently.")
|
||||||
}
|
}
|
||||||
|
@ -27,19 +27,19 @@ import (
|
|||||||
|
|
||||||
const cleanupTimeout = time.Minute
|
const cleanupTimeout = time.Minute
|
||||||
|
|
||||||
type tapState struct {
|
type deployState struct {
|
||||||
startTime time.Time
|
startTime time.Time
|
||||||
targetNamespaces []string
|
targetNamespaces []string
|
||||||
kubesharkServiceAccountExists bool
|
kubesharkServiceAccountExists bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var state tapState
|
var state deployState
|
||||||
var connector *connect.Connector
|
var connector *connect.Connector
|
||||||
var hubPodReady bool
|
var hubPodReady bool
|
||||||
var frontPodReady bool
|
var frontPodReady bool
|
||||||
var proxyDone bool
|
var proxyDone bool
|
||||||
|
|
||||||
func RunKubesharkTap() {
|
func deploy() {
|
||||||
state.startTime = time.Now()
|
state.startTime = time.Now()
|
||||||
|
|
||||||
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Hub.PortForward.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
|
connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(config.Config.Hub.PortForward.SrcPort), connect.DefaultRetries, connect.DefaultTimeout)
|
||||||
@ -63,14 +63,14 @@ func RunKubesharkTap() {
|
|||||||
|
|
||||||
if config.Config.IsNsRestrictedMode() {
|
if config.Config.IsNsRestrictedMode() {
|
||||||
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.ResourcesNamespace) {
|
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.ResourcesNamespace) {
|
||||||
log.Error().Msg(fmt.Sprintf("Kubeshark can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", configStructs.NamespacesTapName, config.ResourcesNamespaceConfigName))
|
log.Error().Msg(fmt.Sprintf("Kubeshark can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", configStructs.NamespacesLabel, config.ResourcesNamespaceConfigName))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info().Strs("namespaces", state.targetNamespaces).Msg("Targetting pods in:")
|
log.Info().Strs("namespaces", state.targetNamespaces).Msg("Targetting pods in:")
|
||||||
|
|
||||||
if err := printTappedPodsPreview(ctx, kubernetesProvider, state.targetNamespaces); err != nil {
|
if err := printTargettedPodsPreview(ctx, kubernetesProvider, state.targetNamespaces); err != nil {
|
||||||
log.Error().Err(errormessage.FormatError(err)).Msg("Error listing pods!")
|
log.Error().Err(errormessage.FormatError(err)).Msg("Error listing pods!")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,7 +79,7 @@ func RunKubesharkTap() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Info().Msg("Waiting for Kubeshark deployment to finish...")
|
log.Info().Msg("Waiting for Kubeshark deployment to finish...")
|
||||||
if state.kubesharkServiceAccountExists, err = resources.CreateTapKubesharkResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Deploy.MaxEntriesDBSizeBytes(), config.Config.Deploy.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Deploy.Profiler); err != nil {
|
if state.kubesharkServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Deploy.MaxEntriesDBSizeBytes(), config.Config.Deploy.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Deploy.Profiler); err != nil {
|
||||||
var statusError *k8serrors.StatusError
|
var statusError *k8serrors.StatusError
|
||||||
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
|
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
|
||||||
log.Info().Msg("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
|
log.Info().Msg("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
|
||||||
@ -91,7 +91,7 @@ func RunKubesharkTap() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer finishTapExecution(kubernetesProvider)
|
defer finishDeployExecution(kubernetesProvider)
|
||||||
|
|
||||||
go goUtils.HandleExcWrapper(watchHubEvents, ctx, kubernetesProvider, cancel)
|
go goUtils.HandleExcWrapper(watchHubEvents, ctx, kubernetesProvider, cancel)
|
||||||
go goUtils.HandleExcWrapper(watchHubPod, ctx, kubernetesProvider, cancel)
|
go goUtils.HandleExcWrapper(watchHubPod, ctx, kubernetesProvider, cancel)
|
||||||
@ -101,7 +101,7 @@ func RunKubesharkTap() {
|
|||||||
utils.WaitForFinish(ctx, cancel)
|
utils.WaitForFinish(ctx, cancel)
|
||||||
}
|
}
|
||||||
|
|
||||||
func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
|
func finishDeployExecution(kubernetesProvider *kubernetes.Provider) {
|
||||||
finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace)
|
finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,7 +110,7 @@ func getDeployConfig() *models.Config {
|
|||||||
MaxDBSizeBytes: config.Config.Deploy.MaxEntriesDBSizeBytes(),
|
MaxDBSizeBytes: config.Config.Deploy.MaxEntriesDBSizeBytes(),
|
||||||
InsertionFilter: config.Config.Deploy.GetInsertionFilter(),
|
InsertionFilter: config.Config.Deploy.GetInsertionFilter(),
|
||||||
PullPolicy: config.Config.ImagePullPolicyStr,
|
PullPolicy: config.Config.ImagePullPolicyStr,
|
||||||
TapperResources: config.Config.Deploy.TapperResources,
|
TapperResources: config.Config.Deploy.WorkerResources,
|
||||||
KubesharkResourcesNamespace: config.Config.ResourcesNamespace,
|
KubesharkResourcesNamespace: config.Config.ResourcesNamespace,
|
||||||
AgentDatabasePath: models.DataDirPath,
|
AgentDatabasePath: models.DataDirPath,
|
||||||
ServiceMap: config.Config.ServiceMap,
|
ServiceMap: config.Config.ServiceMap,
|
||||||
@ -121,30 +121,30 @@ func getDeployConfig() *models.Config {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
this function is a bit problematic as it might be detached from the actual pods the Kubeshark Hub will tap.
|
This function is a bit problematic as it might be detached from the actual pods the Kubeshark that targets.
|
||||||
The alternative would be to wait for Hub to be ready and then query it for the pods it listens to, this has
|
The alternative would be to wait for Hub to be ready and then query it for the pods it listens to, this has
|
||||||
the arguably worse drawback of taking a relatively very long time before the user sees which pods are targeted, if any.
|
the arguably worse drawback of taking a relatively very long time before the user sees which pods are targeted, if any.
|
||||||
*/
|
*/
|
||||||
func printTappedPodsPreview(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespaces []string) error {
|
func printTargettedPodsPreview(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespaces []string) error {
|
||||||
if matchingPods, err := kubernetesProvider.ListAllRunningPodsMatchingRegex(ctx, config.Config.Deploy.PodRegex(), namespaces); err != nil {
|
if matchingPods, err := kubernetesProvider.ListAllRunningPodsMatchingRegex(ctx, config.Config.Deploy.PodRegex(), namespaces); err != nil {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
if len(matchingPods) == 0 {
|
if len(matchingPods) == 0 {
|
||||||
printNoPodsFoundSuggestion(namespaces)
|
printNoPodsFoundSuggestion(namespaces)
|
||||||
}
|
}
|
||||||
for _, tappedPod := range matchingPods {
|
for _, targettedPod := range matchingPods {
|
||||||
log.Info().Msg(fmt.Sprintf("New pod: %s", fmt.Sprintf(utils.Green, tappedPod.Name)))
|
log.Info().Msg(fmt.Sprintf("New pod: %s", fmt.Sprintf(utils.Green, targettedPod.Name)))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider *kubernetes.Provider, targetNamespaces []string, startTime time.Time) error {
|
func startWorkerSyncer(ctx context.Context, cancel context.CancelFunc, provider *kubernetes.Provider, targetNamespaces []string, startTime time.Time) error {
|
||||||
tapperSyncer, err := kubernetes.CreateAndStartKubesharkTapperSyncer(ctx, provider, kubernetes.TapperSyncerConfig{
|
workerSyncer, err := kubernetes.CreateAndStartWorkerSyncer(ctx, provider, kubernetes.WorkerSyncerConfig{
|
||||||
TargetNamespaces: targetNamespaces,
|
TargetNamespaces: targetNamespaces,
|
||||||
PodFilterRegex: *config.Config.Deploy.PodRegex(),
|
PodFilterRegex: *config.Config.Deploy.PodRegex(),
|
||||||
KubesharkResourcesNamespace: config.Config.ResourcesNamespace,
|
KubesharkResourcesNamespace: config.Config.ResourcesNamespace,
|
||||||
TapperResources: config.Config.Deploy.TapperResources,
|
WorkerResources: config.Config.Deploy.WorkerResources,
|
||||||
ImagePullPolicy: config.Config.ImagePullPolicy(),
|
ImagePullPolicy: config.Config.ImagePullPolicy(),
|
||||||
LogLevel: config.Config.LogLevel(),
|
LogLevel: config.Config.LogLevel(),
|
||||||
KubesharkApiFilteringOptions: api.TrafficFilteringOptions{
|
KubesharkApiFilteringOptions: api.TrafficFilteringOptions{
|
||||||
@ -163,31 +163,31 @@ func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider
|
|||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case syncerErr, ok := <-tapperSyncer.ErrorOut:
|
case syncerErr, ok := <-workerSyncer.ErrorOut:
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Debug().Msg("kubesharkTapperSyncer err channel closed, ending listener loop")
|
log.Debug().Msg("workerSyncer err channel closed, ending listener loop")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Error().Msg(getErrorDisplayTextForK8sTapManagerError(syncerErr))
|
log.Error().Msg(getK8sDeployManagerErrorText(syncerErr))
|
||||||
cancel()
|
cancel()
|
||||||
case _, ok := <-tapperSyncer.TapPodChangesOut:
|
case _, ok := <-workerSyncer.DeployPodChangesOut:
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Debug().Msg("kubesharkTapperSyncer pod changes channel closed, ending listener loop")
|
log.Debug().Msg("workerSyncer pod changes channel closed, ending listener loop")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := connector.ReportTappedPods(tapperSyncer.CurrentlyTappedPods); err != nil {
|
if err := connector.ReportTargettedPods(workerSyncer.CurrentlyTargettedPods); err != nil {
|
||||||
log.Error().Err(err).Msg("failed update tapped pods.")
|
log.Error().Err(err).Msg("failed update targetted pods.")
|
||||||
}
|
}
|
||||||
case tapperStatus, ok := <-tapperSyncer.TapperStatusChangedOut:
|
case workerStatus, ok := <-workerSyncer.WorkerStatusChangedOut:
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Debug().Msg("kubesharkTapperSyncer tapper status changed channel closed, ending listener loop")
|
log.Debug().Msg("workerSyncer worker status changed channel closed, ending listener loop")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := connector.ReportTapperStatus(tapperStatus); err != nil {
|
if err := connector.ReportWorkerStatus(workerStatus); err != nil {
|
||||||
log.Error().Err(err).Msg("failed update tapper status.")
|
log.Error().Err(err).Msg("failed update worker status.")
|
||||||
}
|
}
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
log.Debug().Msg("kubesharkTapperSyncer event listener loop exiting due to context done")
|
log.Debug().Msg("workerSyncer event listener loop exiting due to context done")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -199,21 +199,21 @@ func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider
|
|||||||
func printNoPodsFoundSuggestion(targetNamespaces []string) {
|
func printNoPodsFoundSuggestion(targetNamespaces []string) {
|
||||||
var suggestionStr string
|
var suggestionStr string
|
||||||
if !utils.Contains(targetNamespaces, kubernetes.K8sAllNamespaces) {
|
if !utils.Contains(targetNamespaces, kubernetes.K8sAllNamespaces) {
|
||||||
suggestionStr = ". You can also try selecting a different namespace with -n or tap all namespaces with -A"
|
suggestionStr = ". You can also try selecting a different namespace with -n or target all namespaces with -A"
|
||||||
}
|
}
|
||||||
log.Warn().Msg(fmt.Sprintf("Did not find any currently running pods that match the regex argument, kubeshark will automatically tap matching pods if any are created later%s", suggestionStr))
|
log.Warn().Msg(fmt.Sprintf("Did not find any currently running pods that match the regex argument, kubeshark will automatically target matching pods if any are created later%s", suggestionStr))
|
||||||
}
|
}
|
||||||
|
|
||||||
func getErrorDisplayTextForK8sTapManagerError(err kubernetes.K8sTapManagerError) string {
|
func getK8sDeployManagerErrorText(err kubernetes.K8sDeployManagerError) string {
|
||||||
switch err.TapManagerReason {
|
switch err.DeployManagerReason {
|
||||||
case kubernetes.TapManagerPodListError:
|
case kubernetes.DeployManagerPodListError:
|
||||||
return fmt.Sprintf("Failed to update currently tapped pods: %v", err.OriginalError)
|
return fmt.Sprintf("Failed to update currently targetted pods: %v", err.OriginalError)
|
||||||
case kubernetes.TapManagerPodWatchError:
|
case kubernetes.DeployManagerPodWatchError:
|
||||||
return fmt.Sprintf("Error occured in k8s pod watch: %v", err.OriginalError)
|
return fmt.Sprintf("Error occured in K8s pod watch: %v", err.OriginalError)
|
||||||
case kubernetes.TapManagerTapperUpdateError:
|
case kubernetes.DeployManagerWorkerUpdateError:
|
||||||
return fmt.Sprintf("Error updating tappers: %v", err.OriginalError)
|
return fmt.Sprintf("Error updating worker: %v", err.OriginalError)
|
||||||
default:
|
default:
|
||||||
return fmt.Sprintf("Unknown error occured in k8s tap manager: %v", err.OriginalError)
|
return fmt.Sprintf("Unknown error occured in K8s deploy manager: %v", err.OriginalError)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -450,8 +450,8 @@ func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider
|
|||||||
func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||||
startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, kubernetes.HubServiceName, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, "/echo")
|
startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, kubernetes.HubServiceName, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, "/echo")
|
||||||
|
|
||||||
if err := startTapperSyncer(ctx, cancel, kubernetesProvider, state.targetNamespaces, state.startTime); err != nil {
|
if err := startWorkerSyncer(ctx, cancel, kubernetesProvider, state.targetNamespaces, state.startTime); err != nil {
|
||||||
log.Error().Err(errormessage.FormatError(err)).Msg("Error starting kubeshark tapper syncer")
|
log.Error().Err(errormessage.FormatError(err)).Msg("Error starting kubeshark worker syncer")
|
||||||
cancel()
|
cancel()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,7 +34,7 @@ func runOpen() {
|
|||||||
if !exists {
|
if !exists {
|
||||||
log.Error().
|
log.Error().
|
||||||
Str("service", kubernetes.FrontServiceName).
|
Str("service", kubernetes.FrontServiceName).
|
||||||
Str("command", "kubeshark tap").
|
Str("command", fmt.Sprintf("kubeshark %s", deployCmd.Use)).
|
||||||
Msg("Service not found! You should run the command first:")
|
Msg("Service not found! You should run the command first:")
|
||||||
cancel()
|
cancel()
|
||||||
return
|
return
|
||||||
|
@ -36,7 +36,7 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Execute adds all child commands to the root command and sets flags appropriately.
|
// Execute adds all child commands to the root command and sets flags appropriately.
|
||||||
// This is called by main.main(). It only needs to happen once to the tapCmd.
|
// This is called by main.main(). It only needs to happen once to the deployCmd.
|
||||||
func Execute() {
|
func Execute() {
|
||||||
cobra.CheckErr(rootCmd.Execute())
|
cobra.CheckErr(rootCmd.Execute())
|
||||||
}
|
}
|
||||||
|
@ -13,22 +13,22 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
GuiPortTapName = "gui-port"
|
ProxyPortLabel = "proxy-port"
|
||||||
NamespacesTapName = "namespaces"
|
NamespacesLabel = "namespaces"
|
||||||
AllNamespacesTapName = "all-namespaces"
|
AllNamespacesLabel = "all-namespaces"
|
||||||
EnableRedactionTapName = "redact"
|
EnableRedactionLabel = "redact"
|
||||||
HumanMaxEntriesDBSizeTapName = "max-entries-db-size"
|
HumanMaxEntriesDBSizeLabel = "max-entries-db-size"
|
||||||
InsertionFilterName = "insertion-filter"
|
InsertionFilterName = "insertion-filter"
|
||||||
DryRunTapName = "dry-run"
|
DryRunLabel = "dry-run"
|
||||||
ServiceMeshName = "service-mesh"
|
ServiceMeshName = "service-mesh"
|
||||||
TlsName = "tls"
|
TlsName = "tls"
|
||||||
ProfilerName = "profiler"
|
ProfilerName = "profiler"
|
||||||
MaxLiveStreamsName = "max-live-streams"
|
MaxLiveStreamsName = "max-live-streams"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DeployConfig struct {
|
type DeployConfig struct {
|
||||||
PodRegexStr string `yaml:"regex" default:".*"`
|
PodRegexStr string `yaml:"regex" default:".*"`
|
||||||
GuiPort uint16 `yaml:"gui-port" default:"8899"`
|
ProxyPort uint16 `yaml:"proxy-port" default:"8899"`
|
||||||
ProxyHost string `yaml:"proxy-host" default:"127.0.0.1"`
|
ProxyHost string `yaml:"proxy-host" default:"127.0.0.1"`
|
||||||
Namespaces []string `yaml:"namespaces"`
|
Namespaces []string `yaml:"namespaces"`
|
||||||
AllNamespaces bool `yaml:"all-namespaces" default:"false"`
|
AllNamespaces bool `yaml:"all-namespaces" default:"false"`
|
||||||
@ -45,7 +45,7 @@ type DeployConfig struct {
|
|||||||
InsertionFilter string `yaml:"insertion-filter" default:""`
|
InsertionFilter string `yaml:"insertion-filter" default:""`
|
||||||
DryRun bool `yaml:"dry-run" default:"false"`
|
DryRun bool `yaml:"dry-run" default:"false"`
|
||||||
HubResources models.Resources `yaml:"hub-resources"`
|
HubResources models.Resources `yaml:"hub-resources"`
|
||||||
TapperResources models.Resources `yaml:"tapper-resources"`
|
WorkerResources models.Resources `yaml:"worker-resources"`
|
||||||
ServiceMesh bool `yaml:"service-mesh" default:"false"`
|
ServiceMesh bool `yaml:"service-mesh" default:"false"`
|
||||||
Tls bool `yaml:"tls" default:"false"`
|
Tls bool `yaml:"tls" default:"false"`
|
||||||
PacketCapture string `yaml:"packet-capture" default:"libpcap"`
|
PacketCapture string `yaml:"packet-capture" default:"libpcap"`
|
||||||
@ -126,7 +126,7 @@ func (config *DeployConfig) Validate() error {
|
|||||||
|
|
||||||
_, parseHumanDataSizeErr := utils.HumanReadableToBytes(config.HumanMaxEntriesDBSize)
|
_, parseHumanDataSizeErr := utils.HumanReadableToBytes(config.HumanMaxEntriesDBSize)
|
||||||
if parseHumanDataSizeErr != nil {
|
if parseHumanDataSizeErr != nil {
|
||||||
return fmt.Errorf("Could not parse --%s value %s", HumanMaxEntriesDBSizeTapName, config.HumanMaxEntriesDBSize)
|
return fmt.Errorf("Could not parse --%s value %s", HumanMaxEntriesDBSizeLabel, config.HumanMaxEntriesDBSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -17,7 +17,7 @@ func FormatError(err error) error {
|
|||||||
if k8serrors.IsForbidden(err) {
|
if k8serrors.IsForbidden(err) {
|
||||||
errorNew = fmt.Errorf("insufficient permissions: %w. "+
|
errorNew = fmt.Errorf("insufficient permissions: %w. "+
|
||||||
"supply the required permission or control Kubeshark's access to namespaces by setting %s "+
|
"supply the required permission or control Kubeshark's access to namespaces by setting %s "+
|
||||||
"in the config file or setting the tapped namespace with --%s %s=<NAMEPSACE>",
|
"in the config file or setting the targetted namespace with --%s %s=<NAMEPSACE>",
|
||||||
err,
|
err,
|
||||||
config.ResourcesNamespaceConfigName,
|
config.ResourcesNamespaceConfigName,
|
||||||
config.SetCommandName,
|
config.SetCommandName,
|
||||||
|
@ -62,31 +62,31 @@ func (connector *Connector) isReachable(path string) (bool, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (connector *Connector) ReportTapperStatus(tapperStatus models.TapperStatus) error {
|
func (connector *Connector) ReportWorkerStatus(workerStatus models.TapperStatus) error {
|
||||||
tapperStatusUrl := fmt.Sprintf("%s/status/tapperStatus", connector.url)
|
workerStatusUrl := fmt.Sprintf("%s/status/tapperStatus", connector.url)
|
||||||
|
|
||||||
if jsonValue, err := json.Marshal(tapperStatus); err != nil {
|
if jsonValue, err := json.Marshal(workerStatus); err != nil {
|
||||||
return fmt.Errorf("Failed Marshal the tapper status %w", err)
|
return fmt.Errorf("Failed Marshal the worker status %w", err)
|
||||||
} else {
|
} else {
|
||||||
if _, err := utils.Post(tapperStatusUrl, "application/json", bytes.NewBuffer(jsonValue), connector.client); err != nil {
|
if _, err := utils.Post(workerStatusUrl, "application/json", bytes.NewBuffer(jsonValue), connector.client); err != nil {
|
||||||
return fmt.Errorf("Failed sending to Hub the tapped pods %w", err)
|
return fmt.Errorf("Failed sending to Hub the targetted pods %w", err)
|
||||||
} else {
|
} else {
|
||||||
log.Debug().Interface("tapper-status", tapperStatus).Msg("Reported to Hub about tapper status:")
|
log.Debug().Interface("worker-status", workerStatus).Msg("Reported to Hub about Worker status:")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (connector *Connector) ReportTappedPods(pods []core.Pod) error {
|
func (connector *Connector) ReportTargettedPods(pods []core.Pod) error {
|
||||||
tappedPodsUrl := fmt.Sprintf("%s/status/tappedPods", connector.url)
|
targettedPodsUrl := fmt.Sprintf("%s/status/tappedPods", connector.url)
|
||||||
|
|
||||||
if jsonValue, err := json.Marshal(pods); err != nil {
|
if jsonValue, err := json.Marshal(pods); err != nil {
|
||||||
return fmt.Errorf("Failed Marshal the tapped pods %w", err)
|
return fmt.Errorf("Failed Marshal the targetted pods %w", err)
|
||||||
} else {
|
} else {
|
||||||
if _, err := utils.Post(tappedPodsUrl, "application/json", bytes.NewBuffer(jsonValue), connector.client); err != nil {
|
if _, err := utils.Post(targettedPodsUrl, "application/json", bytes.NewBuffer(jsonValue), connector.client); err != nil {
|
||||||
return fmt.Errorf("Failed sending to Hub the tapped pods %w", err)
|
return fmt.Errorf("Failed sending to Hub the targetted pods %w", err)
|
||||||
} else {
|
} else {
|
||||||
log.Debug().Int("pod-count", len(pods)).Msg("Reported to Hub about tapped pod count:")
|
log.Debug().Int("pod-count", len(pods)).Msg("Reported to Hub about targetted pod count:")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -12,8 +12,8 @@ const (
|
|||||||
RoleBindingName = KubesharkResourcesPrefix + "role-binding"
|
RoleBindingName = KubesharkResourcesPrefix + "role-binding"
|
||||||
RoleName = KubesharkResourcesPrefix + "role"
|
RoleName = KubesharkResourcesPrefix + "role"
|
||||||
ServiceAccountName = KubesharkResourcesPrefix + "service-account"
|
ServiceAccountName = KubesharkResourcesPrefix + "service-account"
|
||||||
TapperDaemonSetName = KubesharkResourcesPrefix + "worker-daemon-set"
|
WorkerDaemonSetName = KubesharkResourcesPrefix + "worker-daemon-set"
|
||||||
TapperPodName = KubesharkResourcesPrefix + "worker"
|
WorkerPodName = KubesharkResourcesPrefix + "worker"
|
||||||
ConfigMapName = KubesharkResourcesPrefix + "config"
|
ConfigMapName = KubesharkResourcesPrefix + "config"
|
||||||
MinKubernetesServerVersion = "1.16.0"
|
MinKubernetesServerVersion = "1.16.0"
|
||||||
)
|
)
|
||||||
|
@ -1,20 +1,20 @@
|
|||||||
package kubernetes
|
package kubernetes
|
||||||
|
|
||||||
type K8sTapManagerErrorReason string
|
type K8sDeployManagerErrorReason string
|
||||||
|
|
||||||
const (
|
const (
|
||||||
TapManagerTapperUpdateError K8sTapManagerErrorReason = "TAPPER_UPDATE_ERROR"
|
DeployManagerWorkerUpdateError K8sDeployManagerErrorReason = "TAPPER_UPDATE_ERROR"
|
||||||
TapManagerPodWatchError K8sTapManagerErrorReason = "POD_WATCH_ERROR"
|
DeployManagerPodWatchError K8sDeployManagerErrorReason = "POD_WATCH_ERROR"
|
||||||
TapManagerPodListError K8sTapManagerErrorReason = "POD_LIST_ERROR"
|
DeployManagerPodListError K8sDeployManagerErrorReason = "POD_LIST_ERROR"
|
||||||
)
|
)
|
||||||
|
|
||||||
type K8sTapManagerError struct {
|
type K8sDeployManagerError struct {
|
||||||
OriginalError error
|
OriginalError error
|
||||||
TapManagerReason K8sTapManagerErrorReason
|
DeployManagerReason K8sDeployManagerErrorReason
|
||||||
}
|
}
|
||||||
|
|
||||||
// K8sTapManagerError implements the Error interface.
|
// K8sDeployManagerError implements the Error interface.
|
||||||
func (e *K8sTapManagerError) Error() string {
|
func (e *K8sDeployManagerError) Error() string {
|
||||||
return e.OriginalError.Error()
|
return e.OriginalError.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,32 +15,32 @@ import (
|
|||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
const updateTappersDelay = 5 * time.Second
|
const updateWorkersDelay = 5 * time.Second
|
||||||
|
|
||||||
type TappedPodChangeEvent struct {
|
type TargettedPodChangeEvent struct {
|
||||||
Added []core.Pod
|
Added []core.Pod
|
||||||
Removed []core.Pod
|
Removed []core.Pod
|
||||||
}
|
}
|
||||||
|
|
||||||
// KubesharkTapperSyncer uses a k8s pod watch to update tapper daemonsets when targeted pods are removed or created
|
// WorkerSyncer uses a k8s pod watch to update Worker daemonsets when targeted pods are removed or created
|
||||||
type KubesharkTapperSyncer struct {
|
type WorkerSyncer struct {
|
||||||
startTime time.Time
|
startTime time.Time
|
||||||
context context.Context
|
context context.Context
|
||||||
CurrentlyTappedPods []core.Pod
|
CurrentlyTargettedPods []core.Pod
|
||||||
config TapperSyncerConfig
|
config WorkerSyncerConfig
|
||||||
kubernetesProvider *Provider
|
kubernetesProvider *Provider
|
||||||
TapPodChangesOut chan TappedPodChangeEvent
|
DeployPodChangesOut chan TargettedPodChangeEvent
|
||||||
TapperStatusChangedOut chan models.TapperStatus
|
WorkerStatusChangedOut chan models.TapperStatus
|
||||||
ErrorOut chan K8sTapManagerError
|
ErrorOut chan K8sDeployManagerError
|
||||||
nodeToTappedPodMap models.NodeToPodsMap
|
nodeToTargettedPodMap models.NodeToPodsMap
|
||||||
tappedNodes []string
|
targettedNodes []string
|
||||||
}
|
}
|
||||||
|
|
||||||
type TapperSyncerConfig struct {
|
type WorkerSyncerConfig struct {
|
||||||
TargetNamespaces []string
|
TargetNamespaces []string
|
||||||
PodFilterRegex regexp.Regexp
|
PodFilterRegex regexp.Regexp
|
||||||
KubesharkResourcesNamespace string
|
KubesharkResourcesNamespace string
|
||||||
TapperResources models.Resources
|
WorkerResources models.Resources
|
||||||
ImagePullPolicy core.PullPolicy
|
ImagePullPolicy core.PullPolicy
|
||||||
LogLevel zerolog.Level
|
LogLevel zerolog.Level
|
||||||
KubesharkApiFilteringOptions api.TrafficFilteringOptions
|
KubesharkApiFilteringOptions api.TrafficFilteringOptions
|
||||||
@ -50,36 +50,36 @@ type TapperSyncerConfig struct {
|
|||||||
MaxLiveStreams int
|
MaxLiveStreams int
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateAndStartKubesharkTapperSyncer(ctx context.Context, kubernetesProvider *Provider, config TapperSyncerConfig, startTime time.Time) (*KubesharkTapperSyncer, error) {
|
func CreateAndStartWorkerSyncer(ctx context.Context, kubernetesProvider *Provider, config WorkerSyncerConfig, startTime time.Time) (*WorkerSyncer, error) {
|
||||||
syncer := &KubesharkTapperSyncer{
|
syncer := &WorkerSyncer{
|
||||||
startTime: startTime.Truncate(time.Second), // Round down because k8s CreationTimestamp is given in 1 sec resolution.
|
startTime: startTime.Truncate(time.Second), // Round down because k8s CreationTimestamp is given in 1 sec resolution.
|
||||||
context: ctx,
|
context: ctx,
|
||||||
CurrentlyTappedPods: make([]core.Pod, 0),
|
CurrentlyTargettedPods: make([]core.Pod, 0),
|
||||||
config: config,
|
config: config,
|
||||||
kubernetesProvider: kubernetesProvider,
|
kubernetesProvider: kubernetesProvider,
|
||||||
TapPodChangesOut: make(chan TappedPodChangeEvent, 100),
|
DeployPodChangesOut: make(chan TargettedPodChangeEvent, 100),
|
||||||
TapperStatusChangedOut: make(chan models.TapperStatus, 100),
|
WorkerStatusChangedOut: make(chan models.TapperStatus, 100),
|
||||||
ErrorOut: make(chan K8sTapManagerError, 100),
|
ErrorOut: make(chan K8sDeployManagerError, 100),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err, _ := syncer.updateCurrentlyTappedPods(); err != nil {
|
if err, _ := syncer.updateCurrentlyTargettedPods(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := syncer.updateKubesharkTappers(); err != nil {
|
if err := syncer.updateWorkers(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
go syncer.watchPodsForTapping()
|
go syncer.watchPodsForTargetting()
|
||||||
go syncer.watchTapperEvents()
|
go syncer.watchWorkerEvents()
|
||||||
go syncer.watchTapperPods()
|
go syncer.watchWorkerPods()
|
||||||
return syncer, nil
|
return syncer, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tapperSyncer *KubesharkTapperSyncer) watchTapperPods() {
|
func (workerSyncer *WorkerSyncer) watchWorkerPods() {
|
||||||
kubesharkResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", TapperPodName))
|
kubesharkResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", WorkerPodName))
|
||||||
podWatchHelper := NewPodWatchHelper(tapperSyncer.kubernetesProvider, kubesharkResourceRegex)
|
podWatchHelper := NewPodWatchHelper(workerSyncer.kubernetesProvider, kubesharkResourceRegex)
|
||||||
eventChan, errorChan := FilteredWatch(tapperSyncer.context, podWatchHelper, []string{tapperSyncer.config.KubesharkResourcesNamespace}, podWatchHelper)
|
eventChan, errorChan := FilteredWatch(workerSyncer.context, podWatchHelper, []string{workerSyncer.config.KubesharkResourcesNamespace}, podWatchHelper)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -91,7 +91,7 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperPods() {
|
|||||||
|
|
||||||
pod, err := wEvent.ToPod()
|
pod, err := wEvent.ToPod()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error().Str("pod", TapperPodName).Err(err).Msg("While parsing Kubeshark resource!")
|
log.Error().Str("pod", WorkerPodName).Err(err).Msg("While parsing Kubeshark resource!")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,8 +101,8 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperPods() {
|
|||||||
Interface("phase", pod.Status.Phase).
|
Interface("phase", pod.Status.Phase).
|
||||||
Msg("Watching pod events...")
|
Msg("Watching pod events...")
|
||||||
if pod.Spec.NodeName != "" {
|
if pod.Spec.NodeName != "" {
|
||||||
tapperStatus := models.TapperStatus{TapperName: pod.Name, NodeName: pod.Spec.NodeName, Status: string(pod.Status.Phase)}
|
workerStatus := models.TapperStatus{TapperName: pod.Name, NodeName: pod.Spec.NodeName, Status: string(pod.Status.Phase)}
|
||||||
tapperSyncer.TapperStatusChangedOut <- tapperStatus
|
workerSyncer.WorkerStatusChangedOut <- workerStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
case err, ok := <-errorChan:
|
case err, ok := <-errorChan:
|
||||||
@ -110,21 +110,21 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperPods() {
|
|||||||
errorChan = nil
|
errorChan = nil
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.Error().Str("pod", TapperPodName).Err(err).Msg("While watching pod!")
|
log.Error().Str("pod", WorkerPodName).Err(err).Msg("While watching pod!")
|
||||||
|
|
||||||
case <-tapperSyncer.context.Done():
|
case <-workerSyncer.context.Done():
|
||||||
log.Debug().
|
log.Debug().
|
||||||
Str("pod", TapperPodName).
|
Str("pod", WorkerPodName).
|
||||||
Msg("Watching pod, context done.")
|
Msg("Watching pod, context done.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tapperSyncer *KubesharkTapperSyncer) watchTapperEvents() {
|
func (workerSyncer *WorkerSyncer) watchWorkerEvents() {
|
||||||
kubesharkResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", TapperPodName))
|
kubesharkResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", WorkerPodName))
|
||||||
eventWatchHelper := NewEventWatchHelper(tapperSyncer.kubernetesProvider, kubesharkResourceRegex, "pod")
|
eventWatchHelper := NewEventWatchHelper(workerSyncer.kubernetesProvider, kubesharkResourceRegex, "pod")
|
||||||
eventChan, errorChan := FilteredWatch(tapperSyncer.context, eventWatchHelper, []string{tapperSyncer.config.KubesharkResourcesNamespace}, eventWatchHelper)
|
eventChan, errorChan := FilteredWatch(workerSyncer.context, eventWatchHelper, []string{workerSyncer.config.KubesharkResourcesNamespace}, eventWatchHelper)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -137,14 +137,14 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperEvents() {
|
|||||||
event, err := wEvent.ToEvent()
|
event, err := wEvent.ToEvent()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error().
|
log.Error().
|
||||||
Str("pod", TapperPodName).
|
Str("pod", WorkerPodName).
|
||||||
Err(err).
|
Err(err).
|
||||||
Msg("Parsing resource event.")
|
Msg("Parsing resource event.")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug().
|
log.Debug().
|
||||||
Str("pod", TapperPodName).
|
Str("pod", WorkerPodName).
|
||||||
Str("event", event.Name).
|
Str("event", event.Name).
|
||||||
Time("time", event.CreationTimestamp.Time).
|
Time("time", event.CreationTimestamp.Time).
|
||||||
Str("name", event.Regarding.Name).
|
Str("name", event.Regarding.Name).
|
||||||
@ -153,7 +153,7 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperEvents() {
|
|||||||
Str("note", event.Note).
|
Str("note", event.Note).
|
||||||
Msg("Watching events.")
|
Msg("Watching events.")
|
||||||
|
|
||||||
pod, err1 := tapperSyncer.kubernetesProvider.GetPod(tapperSyncer.context, tapperSyncer.config.KubesharkResourcesNamespace, event.Regarding.Name)
|
pod, err1 := workerSyncer.kubernetesProvider.GetPod(workerSyncer.context, workerSyncer.config.KubesharkResourcesNamespace, event.Regarding.Name)
|
||||||
if err1 != nil {
|
if err1 != nil {
|
||||||
log.Error().Str("name", event.Regarding.Name).Msg("Couldn't get pod")
|
log.Error().Str("name", event.Regarding.Name).Msg("Couldn't get pod")
|
||||||
continue
|
continue
|
||||||
@ -166,8 +166,8 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperEvents() {
|
|||||||
nodeName = pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
|
nodeName = pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
tapperStatus := models.TapperStatus{TapperName: pod.Name, NodeName: nodeName, Status: string(pod.Status.Phase)}
|
workerStatus := models.TapperStatus{TapperName: pod.Name, NodeName: nodeName, Status: string(pod.Status.Phase)}
|
||||||
tapperSyncer.TapperStatusChangedOut <- tapperStatus
|
workerSyncer.WorkerStatusChangedOut <- workerStatus
|
||||||
|
|
||||||
case err, ok := <-errorChan:
|
case err, ok := <-errorChan:
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -176,44 +176,44 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperEvents() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
log.Error().
|
log.Error().
|
||||||
Str("pod", TapperPodName).
|
Str("pod", WorkerPodName).
|
||||||
Err(err).
|
Err(err).
|
||||||
Msg("While watching events.")
|
Msg("While watching events.")
|
||||||
|
|
||||||
case <-tapperSyncer.context.Done():
|
case <-workerSyncer.context.Done():
|
||||||
log.Debug().
|
log.Debug().
|
||||||
Str("pod", TapperPodName).
|
Str("pod", WorkerPodName).
|
||||||
Msg("Watching pod events, context done.")
|
Msg("Watching pod events, context done.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() {
|
func (workerSyncer *WorkerSyncer) watchPodsForTargetting() {
|
||||||
podWatchHelper := NewPodWatchHelper(tapperSyncer.kubernetesProvider, &tapperSyncer.config.PodFilterRegex)
|
podWatchHelper := NewPodWatchHelper(workerSyncer.kubernetesProvider, &workerSyncer.config.PodFilterRegex)
|
||||||
eventChan, errorChan := FilteredWatch(tapperSyncer.context, podWatchHelper, tapperSyncer.config.TargetNamespaces, podWatchHelper)
|
eventChan, errorChan := FilteredWatch(workerSyncer.context, podWatchHelper, workerSyncer.config.TargetNamespaces, podWatchHelper)
|
||||||
|
|
||||||
handleChangeInPods := func() {
|
handleChangeInPods := func() {
|
||||||
err, changeFound := tapperSyncer.updateCurrentlyTappedPods()
|
err, changeFound := workerSyncer.updateCurrentlyTargettedPods()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tapperSyncer.ErrorOut <- K8sTapManagerError{
|
workerSyncer.ErrorOut <- K8sDeployManagerError{
|
||||||
OriginalError: err,
|
OriginalError: err,
|
||||||
TapManagerReason: TapManagerPodListError,
|
DeployManagerReason: DeployManagerPodListError,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !changeFound {
|
if !changeFound {
|
||||||
log.Debug().Msg("Nothing changed. Updating tappers is not needed.")
|
log.Debug().Msg("Nothing changed. Updating workers is not needed.")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := tapperSyncer.updateKubesharkTappers(); err != nil {
|
if err := workerSyncer.updateWorkers(); err != nil {
|
||||||
tapperSyncer.ErrorOut <- K8sTapManagerError{
|
workerSyncer.ErrorOut <- K8sDeployManagerError{
|
||||||
OriginalError: err,
|
OriginalError: err,
|
||||||
TapManagerReason: TapManagerTapperUpdateError,
|
DeployManagerReason: DeployManagerWorkerUpdateError,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
restartTappersDebouncer := debounce.NewDebouncer(updateTappersDelay, handleChangeInPods)
|
restartWorkersDebouncer := debounce.NewDebouncer(updateWorkersDelay, handleChangeInPods)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -225,7 +225,7 @@ func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() {
|
|||||||
|
|
||||||
pod, err := wEvent.ToPod()
|
pod, err := wEvent.ToPod()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tapperSyncer.handleErrorInWatchLoop(err, restartTappersDebouncer)
|
workerSyncer.handleErrorInWatchLoop(err, restartWorkersDebouncer)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -235,24 +235,24 @@ func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() {
|
|||||||
Str("pod", pod.Name).
|
Str("pod", pod.Name).
|
||||||
Str("namespace", pod.Namespace).
|
Str("namespace", pod.Namespace).
|
||||||
Msg("Added matching pod.")
|
Msg("Added matching pod.")
|
||||||
if err := restartTappersDebouncer.SetOn(); err != nil {
|
if err := restartWorkersDebouncer.SetOn(); err != nil {
|
||||||
log.Error().
|
log.Error().
|
||||||
Str("pod", pod.Name).
|
Str("pod", pod.Name).
|
||||||
Str("namespace", pod.Namespace).
|
Str("namespace", pod.Namespace).
|
||||||
Err(err).
|
Err(err).
|
||||||
Msg("While restarting tappers!")
|
Msg("While restarting workers!")
|
||||||
}
|
}
|
||||||
case EventDeleted:
|
case EventDeleted:
|
||||||
log.Debug().
|
log.Debug().
|
||||||
Str("pod", pod.Name).
|
Str("pod", pod.Name).
|
||||||
Str("namespace", pod.Namespace).
|
Str("namespace", pod.Namespace).
|
||||||
Msg("Removed matching pod.")
|
Msg("Removed matching pod.")
|
||||||
if err := restartTappersDebouncer.SetOn(); err != nil {
|
if err := restartWorkersDebouncer.SetOn(); err != nil {
|
||||||
log.Error().
|
log.Error().
|
||||||
Str("pod", pod.Name).
|
Str("pod", pod.Name).
|
||||||
Str("namespace", pod.Namespace).
|
Str("namespace", pod.Namespace).
|
||||||
Err(err).
|
Err(err).
|
||||||
Msg("While restarting tappers!")
|
Msg("While restarting workers!")
|
||||||
}
|
}
|
||||||
case EventModified:
|
case EventModified:
|
||||||
log.Debug().
|
log.Debug().
|
||||||
@ -269,12 +269,12 @@ func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() {
|
|||||||
// - Pod reaches ready state
|
// - Pod reaches ready state
|
||||||
// Ready/unready transitions might also trigger this event.
|
// Ready/unready transitions might also trigger this event.
|
||||||
if pod.Status.PodIP != "" {
|
if pod.Status.PodIP != "" {
|
||||||
if err := restartTappersDebouncer.SetOn(); err != nil {
|
if err := restartWorkersDebouncer.SetOn(); err != nil {
|
||||||
log.Error().
|
log.Error().
|
||||||
Str("pod", pod.Name).
|
Str("pod", pod.Name).
|
||||||
Str("namespace", pod.Namespace).
|
Str("namespace", pod.Namespace).
|
||||||
Err(err).
|
Err(err).
|
||||||
Msg("While restarting tappers!")
|
Msg("While restarting workers!")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case EventBookmark:
|
case EventBookmark:
|
||||||
@ -288,33 +288,33 @@ func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
tapperSyncer.handleErrorInWatchLoop(err, restartTappersDebouncer)
|
workerSyncer.handleErrorInWatchLoop(err, restartWorkersDebouncer)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
case <-tapperSyncer.context.Done():
|
case <-workerSyncer.context.Done():
|
||||||
log.Debug().Msg("Watching pods, context done. Stopping \"restart tappers debouncer\"")
|
log.Debug().Msg("Watching pods, context done. Stopping \"restart workers debouncer\"")
|
||||||
restartTappersDebouncer.Cancel()
|
restartWorkersDebouncer.Cancel()
|
||||||
// TODO: Does this also perform cleanup?
|
// TODO: Does this also perform cleanup?
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tapperSyncer *KubesharkTapperSyncer) handleErrorInWatchLoop(err error, restartTappersDebouncer *debounce.Debouncer) {
|
func (workerSyncer *WorkerSyncer) handleErrorInWatchLoop(err error, restartWorkersDebouncer *debounce.Debouncer) {
|
||||||
log.Error().Err(err).Msg("While watching pods, got an error! Stopping \"restart tappers debouncer\"")
|
log.Error().Err(err).Msg("While watching pods, got an error! Stopping \"restart workers debouncer\"")
|
||||||
restartTappersDebouncer.Cancel()
|
restartWorkersDebouncer.Cancel()
|
||||||
tapperSyncer.ErrorOut <- K8sTapManagerError{
|
workerSyncer.ErrorOut <- K8sDeployManagerError{
|
||||||
OriginalError: err,
|
OriginalError: err,
|
||||||
TapManagerReason: TapManagerPodWatchError,
|
DeployManagerReason: DeployManagerPodWatchError,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tapperSyncer *KubesharkTapperSyncer) updateCurrentlyTappedPods() (err error, changesFound bool) {
|
func (workerSyncer *WorkerSyncer) updateCurrentlyTargettedPods() (err error, changesFound bool) {
|
||||||
if matchingPods, err := tapperSyncer.kubernetesProvider.ListAllRunningPodsMatchingRegex(tapperSyncer.context, &tapperSyncer.config.PodFilterRegex, tapperSyncer.config.TargetNamespaces); err != nil {
|
if matchingPods, err := workerSyncer.kubernetesProvider.ListAllRunningPodsMatchingRegex(workerSyncer.context, &workerSyncer.config.PodFilterRegex, workerSyncer.config.TargetNamespaces); err != nil {
|
||||||
return err, false
|
return err, false
|
||||||
} else {
|
} else {
|
||||||
podsToTap := excludeKubesharkPods(matchingPods)
|
podsToTarget := excludeSelfPods(matchingPods)
|
||||||
addedPods, removedPods := getPodArrayDiff(tapperSyncer.CurrentlyTappedPods, podsToTap)
|
addedPods, removedPods := getPodArrayDiff(workerSyncer.CurrentlyTargettedPods, podsToTarget)
|
||||||
for _, addedPod := range addedPods {
|
for _, addedPod := range addedPods {
|
||||||
log.Info().Str("pod", addedPod.Name).Msg("Currently targetting:")
|
log.Info().Str("pod", addedPod.Name).Msg("Currently targetting:")
|
||||||
}
|
}
|
||||||
@ -322,9 +322,9 @@ func (tapperSyncer *KubesharkTapperSyncer) updateCurrentlyTappedPods() (err erro
|
|||||||
log.Info().Str("pod", removedPod.Name).Msg("Pod is no longer running. Targetting is stopped.")
|
log.Info().Str("pod", removedPod.Name).Msg("Pod is no longer running. Targetting is stopped.")
|
||||||
}
|
}
|
||||||
if len(addedPods) > 0 || len(removedPods) > 0 {
|
if len(addedPods) > 0 || len(removedPods) > 0 {
|
||||||
tapperSyncer.CurrentlyTappedPods = podsToTap
|
workerSyncer.CurrentlyTargettedPods = podsToTarget
|
||||||
tapperSyncer.nodeToTappedPodMap = GetNodeHostToTappedPodsMap(tapperSyncer.CurrentlyTappedPods)
|
workerSyncer.nodeToTargettedPodMap = GetNodeHostToTargettedPodsMap(workerSyncer.CurrentlyTargettedPods)
|
||||||
tapperSyncer.TapPodChangesOut <- TappedPodChangeEvent{
|
workerSyncer.DeployPodChangesOut <- TargettedPodChangeEvent{
|
||||||
Added: addedPods,
|
Added: addedPods,
|
||||||
Removed: removedPods,
|
Removed: removedPods,
|
||||||
}
|
}
|
||||||
@ -334,70 +334,70 @@ func (tapperSyncer *KubesharkTapperSyncer) updateCurrentlyTappedPods() (err erro
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tapperSyncer *KubesharkTapperSyncer) updateKubesharkTappers() error {
|
func (workerSyncer *WorkerSyncer) updateWorkers() error {
|
||||||
nodesToTap := make([]string, len(tapperSyncer.nodeToTappedPodMap))
|
nodesToTarget := make([]string, len(workerSyncer.nodeToTargettedPodMap))
|
||||||
i := 0
|
i := 0
|
||||||
for node := range tapperSyncer.nodeToTappedPodMap {
|
for node := range workerSyncer.nodeToTargettedPodMap {
|
||||||
nodesToTap[i] = node
|
nodesToTarget[i] = node
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
|
|
||||||
if utils.EqualStringSlices(nodesToTap, tapperSyncer.tappedNodes) {
|
if utils.EqualStringSlices(nodesToTarget, workerSyncer.targettedNodes) {
|
||||||
log.Debug().Msg("Skipping apply, DaemonSet is up to date")
|
log.Debug().Msg("Skipping apply, DaemonSet is up to date")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug().Strs("nodes", nodesToTap).Msg("Updating DaemonSet to run on nodes.")
|
log.Debug().Strs("nodes", nodesToTarget).Msg("Updating DaemonSet to run on nodes.")
|
||||||
|
|
||||||
image := "kubeshark/worker:latest"
|
image := "kubeshark/worker:latest"
|
||||||
|
|
||||||
if len(tapperSyncer.nodeToTappedPodMap) > 0 {
|
if len(workerSyncer.nodeToTargettedPodMap) > 0 {
|
||||||
var serviceAccountName string
|
var serviceAccountName string
|
||||||
if tapperSyncer.config.KubesharkServiceAccountExists {
|
if workerSyncer.config.KubesharkServiceAccountExists {
|
||||||
serviceAccountName = ServiceAccountName
|
serviceAccountName = ServiceAccountName
|
||||||
} else {
|
} else {
|
||||||
serviceAccountName = ""
|
serviceAccountName = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeNames := make([]string, 0, len(tapperSyncer.nodeToTappedPodMap))
|
nodeNames := make([]string, 0, len(workerSyncer.nodeToTargettedPodMap))
|
||||||
for nodeName := range tapperSyncer.nodeToTappedPodMap {
|
for nodeName := range workerSyncer.nodeToTargettedPodMap {
|
||||||
nodeNames = append(nodeNames, nodeName)
|
nodeNames = append(nodeNames, nodeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := tapperSyncer.kubernetesProvider.ApplyKubesharkTapperDaemonSet(
|
if err := workerSyncer.kubernetesProvider.ApplyWorkerDaemonSet(
|
||||||
tapperSyncer.context,
|
workerSyncer.context,
|
||||||
tapperSyncer.config.KubesharkResourcesNamespace,
|
workerSyncer.config.KubesharkResourcesNamespace,
|
||||||
TapperDaemonSetName,
|
WorkerDaemonSetName,
|
||||||
image,
|
image,
|
||||||
TapperPodName,
|
WorkerPodName,
|
||||||
fmt.Sprintf("%s.%s.svc", HubPodName, tapperSyncer.config.KubesharkResourcesNamespace),
|
fmt.Sprintf("%s.%s.svc", HubPodName, workerSyncer.config.KubesharkResourcesNamespace),
|
||||||
nodeNames,
|
nodeNames,
|
||||||
serviceAccountName,
|
serviceAccountName,
|
||||||
tapperSyncer.config.TapperResources,
|
workerSyncer.config.WorkerResources,
|
||||||
tapperSyncer.config.ImagePullPolicy,
|
workerSyncer.config.ImagePullPolicy,
|
||||||
tapperSyncer.config.KubesharkApiFilteringOptions,
|
workerSyncer.config.KubesharkApiFilteringOptions,
|
||||||
tapperSyncer.config.LogLevel,
|
workerSyncer.config.LogLevel,
|
||||||
tapperSyncer.config.ServiceMesh,
|
workerSyncer.config.ServiceMesh,
|
||||||
tapperSyncer.config.Tls,
|
workerSyncer.config.Tls,
|
||||||
tapperSyncer.config.MaxLiveStreams); err != nil {
|
workerSyncer.config.MaxLiveStreams); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug().Int("tapper-count", len(tapperSyncer.nodeToTappedPodMap)).Msg("Successfully created tappers.")
|
log.Debug().Int("worker-count", len(workerSyncer.nodeToTargettedPodMap)).Msg("Successfully created workers.")
|
||||||
} else {
|
} else {
|
||||||
if err := tapperSyncer.kubernetesProvider.ResetKubesharkTapperDaemonSet(
|
if err := workerSyncer.kubernetesProvider.ResetWorkerDaemonSet(
|
||||||
tapperSyncer.context,
|
workerSyncer.context,
|
||||||
tapperSyncer.config.KubesharkResourcesNamespace,
|
workerSyncer.config.KubesharkResourcesNamespace,
|
||||||
TapperDaemonSetName,
|
WorkerDaemonSetName,
|
||||||
image,
|
image,
|
||||||
TapperPodName); err != nil {
|
WorkerPodName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug().Msg("Successfully reset tapper daemon set")
|
log.Debug().Msg("Successfully resetted Worker DaemonSet")
|
||||||
}
|
}
|
||||||
|
|
||||||
tapperSyncer.tappedNodes = nodesToTap
|
workerSyncer.targettedNodes = nodesToTarget
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -810,17 +810,17 @@ func (provider *Provider) CreateConfigMap(ctx context.Context, namespace string,
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, hubPodIp string, nodeNames []string, serviceAccountName string, resources models.Resources, imagePullPolicy core.PullPolicy, kubesharkApiFilteringOptions api.TrafficFilteringOptions, logLevel zerolog.Level, serviceMesh bool, tls bool, maxLiveStreams int) error {
|
func (provider *Provider) ApplyWorkerDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, workerPodName string, hubPodIp string, nodeNames []string, serviceAccountName string, resources models.Resources, imagePullPolicy core.PullPolicy, kubesharkApiFilteringOptions api.TrafficFilteringOptions, logLevel zerolog.Level, serviceMesh bool, tls bool, maxLiveStreams int) error {
|
||||||
log.Debug().
|
log.Debug().
|
||||||
Int("node-count", len(nodeNames)).
|
Int("node-count", len(nodeNames)).
|
||||||
Str("namespace", namespace).
|
Str("namespace", namespace).
|
||||||
Str("daemonset-name", daemonSetName).
|
Str("daemonset-name", daemonSetName).
|
||||||
Str("image", podImage).
|
Str("image", podImage).
|
||||||
Str("pod", tapperPodName).
|
Str("pod", workerPodName).
|
||||||
Msg("Applying tapper DaemonSets.")
|
Msg("Applying worker DaemonSets.")
|
||||||
|
|
||||||
if len(nodeNames) == 0 {
|
if len(nodeNames) == 0 {
|
||||||
return fmt.Errorf("daemon set %s must tap at least 1 pod", daemonSetName)
|
return fmt.Errorf("DaemonSet %s must target at least 1 pod", daemonSetName)
|
||||||
}
|
}
|
||||||
|
|
||||||
kubesharkApiFilteringOptionsJsonStr, err := json.Marshal(kubesharkApiFilteringOptions)
|
kubesharkApiFilteringOptionsJsonStr, err := json.Marshal(kubesharkApiFilteringOptions)
|
||||||
@ -849,7 +849,7 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
|
|||||||
}
|
}
|
||||||
|
|
||||||
workerContainer := applyconfcore.Container()
|
workerContainer := applyconfcore.Container()
|
||||||
workerContainer.WithName(tapperPodName)
|
workerContainer.WithName(workerPodName)
|
||||||
workerContainer.WithImage(podImage)
|
workerContainer.WithImage(podImage)
|
||||||
workerContainer.WithImagePullPolicy(imagePullPolicy)
|
workerContainer.WithImagePullPolicy(imagePullPolicy)
|
||||||
|
|
||||||
@ -887,19 +887,19 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
|
|||||||
)
|
)
|
||||||
cpuLimit, err := resource.ParseQuantity(resources.CpuLimit)
|
cpuLimit, err := resource.ParseQuantity(resources.CpuLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid cpu limit for %s container", tapperPodName)
|
return fmt.Errorf("invalid cpu limit for %s container", workerPodName)
|
||||||
}
|
}
|
||||||
memLimit, err := resource.ParseQuantity(resources.MemoryLimit)
|
memLimit, err := resource.ParseQuantity(resources.MemoryLimit)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid memory limit for %s container", tapperPodName)
|
return fmt.Errorf("invalid memory limit for %s container", workerPodName)
|
||||||
}
|
}
|
||||||
cpuRequests, err := resource.ParseQuantity(resources.CpuRequests)
|
cpuRequests, err := resource.ParseQuantity(resources.CpuRequests)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid cpu request for %s container", tapperPodName)
|
return fmt.Errorf("invalid cpu request for %s container", workerPodName)
|
||||||
}
|
}
|
||||||
memRequests, err := resource.ParseQuantity(resources.MemoryRequests)
|
memRequests, err := resource.ParseQuantity(resources.MemoryRequests)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid memory request for %s container", tapperPodName)
|
return fmt.Errorf("invalid memory request for %s container", workerPodName)
|
||||||
}
|
}
|
||||||
workerResourceLimits := core.ResourceList{
|
workerResourceLimits := core.ResourceList{
|
||||||
"cpu": cpuLimit,
|
"cpu": cpuLimit,
|
||||||
@ -967,14 +967,14 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
|
|||||||
|
|
||||||
podTemplate := applyconfcore.PodTemplateSpec()
|
podTemplate := applyconfcore.PodTemplateSpec()
|
||||||
podTemplate.WithLabels(map[string]string{
|
podTemplate.WithLabels(map[string]string{
|
||||||
"app": tapperPodName,
|
"app": workerPodName,
|
||||||
LabelManagedBy: provider.managedBy,
|
LabelManagedBy: provider.managedBy,
|
||||||
LabelCreatedBy: provider.createdBy,
|
LabelCreatedBy: provider.createdBy,
|
||||||
})
|
})
|
||||||
podTemplate.WithSpec(podSpec)
|
podTemplate.WithSpec(podSpec)
|
||||||
|
|
||||||
labelSelector := applyconfmeta.LabelSelector()
|
labelSelector := applyconfmeta.LabelSelector()
|
||||||
labelSelector.WithMatchLabels(map[string]string{"app": tapperPodName})
|
labelSelector.WithMatchLabels(map[string]string{"app": workerPodName})
|
||||||
|
|
||||||
applyOptions := metav1.ApplyOptions{
|
applyOptions := metav1.ApplyOptions{
|
||||||
Force: true,
|
Force: true,
|
||||||
@ -993,9 +993,9 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (provider *Provider) ResetKubesharkTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string) error {
|
func (provider *Provider) ResetWorkerDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, workerPodName string) error {
|
||||||
workerContainer := applyconfcore.Container()
|
workerContainer := applyconfcore.Container()
|
||||||
workerContainer.WithName(tapperPodName)
|
workerContainer.WithName(workerPodName)
|
||||||
workerContainer.WithImage(podImage)
|
workerContainer.WithImage(podImage)
|
||||||
|
|
||||||
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
|
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
|
||||||
@ -1016,14 +1016,14 @@ func (provider *Provider) ResetKubesharkTapperDaemonSet(ctx context.Context, nam
|
|||||||
|
|
||||||
podTemplate := applyconfcore.PodTemplateSpec()
|
podTemplate := applyconfcore.PodTemplateSpec()
|
||||||
podTemplate.WithLabels(map[string]string{
|
podTemplate.WithLabels(map[string]string{
|
||||||
"app": tapperPodName,
|
"app": workerPodName,
|
||||||
LabelManagedBy: provider.managedBy,
|
LabelManagedBy: provider.managedBy,
|
||||||
LabelCreatedBy: provider.createdBy,
|
LabelCreatedBy: provider.createdBy,
|
||||||
})
|
})
|
||||||
podTemplate.WithSpec(podSpec)
|
podTemplate.WithSpec(podSpec)
|
||||||
|
|
||||||
labelSelector := applyconfmeta.LabelSelector()
|
labelSelector := applyconfmeta.LabelSelector()
|
||||||
labelSelector.WithMatchLabels(map[string]string{"app": tapperPodName})
|
labelSelector.WithMatchLabels(map[string]string{"app": workerPodName})
|
||||||
|
|
||||||
applyOptions := metav1.ApplyOptions{
|
applyOptions := metav1.ApplyOptions{
|
||||||
Force: true,
|
Force: true,
|
||||||
|
@ -8,19 +8,19 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func GetNodeHostToTappedPodsMap(tappedPods []core.Pod) models.NodeToPodsMap {
|
func GetNodeHostToTargettedPodsMap(targettedPods []core.Pod) models.NodeToPodsMap {
|
||||||
nodeToTappedPodMap := make(models.NodeToPodsMap)
|
nodeToTargettedPodsMap := make(models.NodeToPodsMap)
|
||||||
for _, pod := range tappedPods {
|
for _, pod := range targettedPods {
|
||||||
minimizedPod := getMinimizedPod(pod)
|
minimizedPod := getMinimizedPod(pod)
|
||||||
|
|
||||||
existingList := nodeToTappedPodMap[pod.Spec.NodeName]
|
existingList := nodeToTargettedPodsMap[pod.Spec.NodeName]
|
||||||
if existingList == nil {
|
if existingList == nil {
|
||||||
nodeToTappedPodMap[pod.Spec.NodeName] = []core.Pod{minimizedPod}
|
nodeToTargettedPodsMap[pod.Spec.NodeName] = []core.Pod{minimizedPod}
|
||||||
} else {
|
} else {
|
||||||
nodeToTappedPodMap[pod.Spec.NodeName] = append(nodeToTappedPodMap[pod.Spec.NodeName], minimizedPod)
|
nodeToTargettedPodsMap[pod.Spec.NodeName] = append(nodeToTargettedPodsMap[pod.Spec.NodeName], minimizedPod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nodeToTappedPodMap
|
return nodeToTargettedPodsMap
|
||||||
}
|
}
|
||||||
|
|
||||||
func getMinimizedPod(fullPod core.Pod) core.Pod {
|
func getMinimizedPod(fullPod core.Pod) core.Pod {
|
||||||
@ -48,7 +48,7 @@ func getMinimizedContainerStatuses(fullPod core.Pod) []core.ContainerStatus {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func excludeKubesharkPods(pods []core.Pod) []core.Pod {
|
func excludeSelfPods(pods []core.Pod) []core.Pod {
|
||||||
kubesharkPrefixRegex := regexp.MustCompile("^" + KubesharkResourcesPrefix)
|
kubesharkPrefixRegex := regexp.MustCompile("^" + KubesharkResourcesPrefix)
|
||||||
|
|
||||||
nonKubesharkPods := make([]core.Pod, 0)
|
nonKubesharkPods := make([]core.Pod, 0)
|
||||||
|
@ -107,8 +107,8 @@ func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.P
|
|||||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := kubernetesProvider.RemoveDaemonSet(ctx, kubesharkResourcesNamespace, kubernetes.TapperDaemonSetName); err != nil {
|
if err := kubernetesProvider.RemoveDaemonSet(ctx, kubesharkResourcesNamespace, kubernetes.WorkerDaemonSetName); err != nil {
|
||||||
resourceDesc := fmt.Sprintf("DaemonSet %s in namespace %s", kubernetes.TapperDaemonSetName, kubesharkResourcesNamespace)
|
resourceDesc := fmt.Sprintf("DaemonSet %s in namespace %s", kubernetes.WorkerDaemonSetName, kubesharkResourcesNamespace)
|
||||||
handleDeletionError(err, resourceDesc, &leftoverResources)
|
handleDeletionError(err, resourceDesc, &leftoverResources)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ import (
|
|||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, maxEntriesDBSizeBytes int64, hubResources models.Resources, imagePullPolicy core.PullPolicy, logLevel zerolog.Level, profiler bool) (bool, error) {
|
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, maxEntriesDBSizeBytes int64, hubResources models.Resources, imagePullPolicy core.PullPolicy, logLevel zerolog.Level, profiler bool) (bool, error) {
|
||||||
if !isNsRestrictedMode {
|
if !isNsRestrictedMode {
|
||||||
if err := createKubesharkNamespace(ctx, kubernetesProvider, kubesharkResourcesNamespace); err != nil {
|
if err := createKubesharkNamespace(ctx, kubernetesProvider, kubesharkResourcesNamespace); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
Loading…
Reference in New Issue
Block a user