Replace --profiler option with --debug

This commit is contained in:
M. Mert Yildiran 2022-12-26 08:15:47 +03:00
parent 0a5343299c
commit c25be876f2
No known key found for this signature in database
GPG Key ID: DA5D6DCBB758A461
6 changed files with 30 additions and 30 deletions

View File

@ -13,8 +13,8 @@ import (
var tapCmd = &cobra.Command{ var tapCmd = &cobra.Command{
Use: "tap [POD REGEX]", Use: "tap [POD REGEX]",
Short: "Record and see the network traffic in your Kubernetes cluster.", Short: "Capture the network traffic in your Kubernetes cluster.",
Long: "Record and see the network traffic in your Kubernetes cluster.", Long: "Capture the network traffic in your Kubernetes cluster.",
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
tap() tap()
return nil return nil
@ -55,7 +55,7 @@ func init() {
tapCmd.Flags().BoolP(configStructs.AllNamespacesLabel, "A", defaultTapConfig.AllNamespaces, "Tap all namespaces.") tapCmd.Flags().BoolP(configStructs.AllNamespacesLabel, "A", defaultTapConfig.AllNamespaces, "Tap all namespaces.")
tapCmd.Flags().String(configStructs.HumanMaxEntriesDBSizeLabel, defaultTapConfig.HumanMaxEntriesDBSize, "Override the default max entries db size.") tapCmd.Flags().String(configStructs.HumanMaxEntriesDBSizeLabel, defaultTapConfig.HumanMaxEntriesDBSize, "Override the default max entries db size.")
tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them.") tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them.")
tapCmd.Flags().Bool(configStructs.ServiceMeshName, defaultTapConfig.ServiceMesh, "Record decrypted traffic if the cluster is configured with a service mesh and with mtls.") tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS.")
tapCmd.Flags().Bool(configStructs.TlsName, defaultTapConfig.Tls, "Record tls traffic.") tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries.")
tapCmd.Flags().Bool(configStructs.ProfilerName, defaultTapConfig.Profiler, "Run pprof server.") tapCmd.Flags().Bool(configStructs.DebugLabel, defaultTapConfig.Debug, "Enable the debug mode.")
} }

View File

@ -82,7 +82,7 @@ func tap() {
} }
log.Info().Msg("Waiting for the creation of Kubeshark resources...") log.Info().Msg("Waiting for the creation of Kubeshark resources...")
if state.kubesharkServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil { if state.kubesharkServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Debug); err != nil {
var statusError *k8serrors.StatusError var statusError *k8serrors.StatusError
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) { if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
log.Info().Msg("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance") log.Info().Msg("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
@ -153,6 +153,7 @@ func startWorkerSyncer(ctx context.Context, cancel context.CancelFunc, provider
KubesharkServiceAccountExists: state.kubesharkServiceAccountExists, KubesharkServiceAccountExists: state.kubesharkServiceAccountExists,
ServiceMesh: config.Config.Tap.ServiceMesh, ServiceMesh: config.Config.Tap.ServiceMesh,
Tls: config.Config.Tap.Tls, Tls: config.Config.Tap.Tls,
Debug: config.Config.Tap.Debug,
}, startTime) }, startTime)
if err != nil { if err != nil {

View File

@ -18,9 +18,9 @@ const (
AllNamespacesLabel = "all-namespaces" AllNamespacesLabel = "all-namespaces"
HumanMaxEntriesDBSizeLabel = "max-entries-db-size" HumanMaxEntriesDBSizeLabel = "max-entries-db-size"
DryRunLabel = "dry-run" DryRunLabel = "dry-run"
ServiceMeshName = "service-mesh" ServiceMeshLabel = "service-mesh"
TlsName = "tls" TlsLabel = "tls"
ProfilerName = "profiler" DebugLabel = "debug"
) )
type HubConfig struct { type HubConfig struct {
@ -50,7 +50,7 @@ type TapConfig struct {
ServiceMesh bool `yaml:"service-mesh" default:"true"` ServiceMesh bool `yaml:"service-mesh" default:"true"`
Tls bool `yaml:"tls" default:"true"` Tls bool `yaml:"tls" default:"true"`
PacketCapture string `yaml:"packet-capture" default:"libpcap"` PacketCapture string `yaml:"packet-capture" default:"libpcap"`
Profiler bool `yaml:"profiler" default:"false"` Debug bool `yaml:"debug" default:"false"`
} }
func (config *TapConfig) PodRegex() *regexp.Regexp { func (config *TapConfig) PodRegex() *regexp.Regexp {

View File

@ -179,12 +179,11 @@ type PodOptions struct {
PodName string PodName string
PodImage string PodImage string
ServiceAccountName string ServiceAccountName string
IsNamespaceRestricted bool
MaxEntriesDBSizeBytes int64 MaxEntriesDBSizeBytes int64
Resources models.Resources Resources models.Resources
ImagePullPolicy core.PullPolicy ImagePullPolicy core.PullPolicy
LogLevel zerolog.Level LogLevel zerolog.Level
Profiler bool Debug bool
} }
func (provider *Provider) BuildHubPod(opts *PodOptions, mountVolumeClaim bool, volumeClaimName string) (*core.Pod, error) { func (provider *Provider) BuildHubPod(opts *PodOptions, mountVolumeClaim bool, volumeClaimName string) (*core.Pod, error) {
@ -212,12 +211,8 @@ func (provider *Provider) BuildHubPod(opts *PodOptions, mountVolumeClaim bool, v
"./hub", "./hub",
} }
if opts.Profiler { if opts.Debug {
command = append(command, "--profiler") command = append(command, "-debug")
}
if opts.IsNamespaceRestricted {
command = append(command, "--namespace", opts.Namespace)
} }
volumeMounts := []core.VolumeMount{ volumeMounts := []core.VolumeMount{
@ -716,7 +711,7 @@ func (provider *Provider) CreateConfigMap(ctx context.Context, namespace string,
return nil return nil
} }
func (provider *Provider) ApplyWorkerDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, workerPodName string, nodeNames []string, serviceAccountName string, resources models.Resources, imagePullPolicy core.PullPolicy, kubesharkApiFilteringOptions api.TrafficFilteringOptions, logLevel zerolog.Level, serviceMesh bool, tls bool) error { func (provider *Provider) ApplyWorkerDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, workerPodName string, nodeNames []string, serviceAccountName string, resources models.Resources, imagePullPolicy core.PullPolicy, kubesharkApiFilteringOptions api.TrafficFilteringOptions, logLevel zerolog.Level, serviceMesh bool, tls bool, debug bool) error {
log.Debug(). log.Debug().
Int("node-count", len(nodeNames)). Int("node-count", len(nodeNames)).
Str("namespace", namespace). Str("namespace", namespace).
@ -734,18 +729,22 @@ func (provider *Provider) ApplyWorkerDaemonSet(ctx context.Context, namespace st
return err return err
} }
kubesharkCmd := []string{"./worker", "-i", "any", "-port", "8897"} command := []string{"./worker", "-i", "any", "-port", "8897"}
if debug {
command = append(command, "-debug")
}
if serviceMesh { if serviceMesh {
kubesharkCmd = append(kubesharkCmd, "--servicemesh") command = append(command, "-servicemesh")
} }
if tls { if tls {
kubesharkCmd = append(kubesharkCmd, "--tls") command = append(command, "-tls")
} }
if serviceMesh || tls { if serviceMesh || tls {
kubesharkCmd = append(kubesharkCmd, "--procfs", procfsMountPath) command = append(command, "-procfs", procfsMountPath)
} }
workerContainer := applyconfcore.Container() workerContainer := applyconfcore.Container()
@ -772,7 +771,7 @@ func (provider *Provider) ApplyWorkerDaemonSet(ctx context.Context, namespace st
workerContainer.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps)) workerContainer.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
workerContainer.WithCommand(kubesharkCmd...) workerContainer.WithCommand(command...)
workerContainer.WithEnv( workerContainer.WithEnv(
applyconfcore.EnvVar().WithName(utils.LogLevelEnvVar).WithValue(logLevel.String()), applyconfcore.EnvVar().WithName(utils.LogLevelEnvVar).WithValue(logLevel.String()),
applyconfcore.EnvVar().WithName(utils.HostModeEnvVar).WithValue("1"), applyconfcore.EnvVar().WithName(utils.HostModeEnvVar).WithValue("1"),

View File

@ -48,6 +48,7 @@ type WorkerSyncerConfig struct {
KubesharkServiceAccountExists bool KubesharkServiceAccountExists bool
ServiceMesh bool ServiceMesh bool
Tls bool Tls bool
Debug bool
} }
func CreateAndStartWorkerSyncer(ctx context.Context, kubernetesProvider *Provider, config WorkerSyncerConfig, startTime time.Time) (*WorkerSyncer, error) { func CreateAndStartWorkerSyncer(ctx context.Context, kubernetesProvider *Provider, config WorkerSyncerConfig, startTime time.Time) (*WorkerSyncer, error) {
@ -368,7 +369,8 @@ func (workerSyncer *WorkerSyncer) updateWorkers() error {
workerSyncer.config.KubesharkApiFilteringOptions, workerSyncer.config.KubesharkApiFilteringOptions,
workerSyncer.config.LogLevel, workerSyncer.config.LogLevel,
workerSyncer.config.ServiceMesh, workerSyncer.config.ServiceMesh,
workerSyncer.config.Tls); err != nil { workerSyncer.config.Tls,
workerSyncer.config.Debug); err != nil {
return err return err
} }

View File

@ -14,7 +14,7 @@ import (
core "k8s.io/api/core/v1" core "k8s.io/api/core/v1"
) )
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, maxEntriesDBSizeBytes int64, hubResources models.Resources, imagePullPolicy core.PullPolicy, logLevel zerolog.Level, profiler bool) (bool, error) { func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, maxEntriesDBSizeBytes int64, hubResources models.Resources, imagePullPolicy core.PullPolicy, logLevel zerolog.Level, debug bool) (bool, error) {
if !isNsRestrictedMode { if !isNsRestrictedMode {
if err := createKubesharkNamespace(ctx, kubernetesProvider, kubesharkResourcesNamespace); err != nil { if err := createKubesharkNamespace(ctx, kubernetesProvider, kubesharkResourcesNamespace); err != nil {
return false, err return false, err
@ -42,12 +42,11 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
PodName: kubernetes.HubPodName, PodName: kubernetes.HubPodName,
PodImage: docker.GetHubImage(), PodImage: docker.GetHubImage(),
ServiceAccountName: serviceAccountName, ServiceAccountName: serviceAccountName,
IsNamespaceRestricted: isNsRestrictedMode,
MaxEntriesDBSizeBytes: maxEntriesDBSizeBytes, MaxEntriesDBSizeBytes: maxEntriesDBSizeBytes,
Resources: hubResources, Resources: hubResources,
ImagePullPolicy: imagePullPolicy, ImagePullPolicy: imagePullPolicy,
LogLevel: logLevel, LogLevel: logLevel,
Profiler: profiler, Debug: debug,
} }
frontOpts := &kubernetes.PodOptions{ frontOpts := &kubernetes.PodOptions{
@ -55,12 +54,11 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
PodName: kubernetes.FrontPodName, PodName: kubernetes.FrontPodName,
PodImage: docker.GetWorkerImage(), PodImage: docker.GetWorkerImage(),
ServiceAccountName: serviceAccountName, ServiceAccountName: serviceAccountName,
IsNamespaceRestricted: isNsRestrictedMode,
MaxEntriesDBSizeBytes: maxEntriesDBSizeBytes, MaxEntriesDBSizeBytes: maxEntriesDBSizeBytes,
Resources: hubResources, Resources: hubResources,
ImagePullPolicy: imagePullPolicy, ImagePullPolicy: imagePullPolicy,
LogLevel: logLevel, LogLevel: logLevel,
Profiler: profiler, Debug: debug,
} }
if err := createKubesharkHubPod(ctx, kubernetesProvider, opts); err != nil { if err := createKubesharkHubPod(ctx, kubernetesProvider, opts); err != nil {