Replace --profiler option with --debug

This commit is contained in:
M. Mert Yildiran 2022-12-26 08:15:47 +03:00
parent 0a5343299c
commit c25be876f2
No known key found for this signature in database
GPG Key ID: DA5D6DCBB758A461
6 changed files with 30 additions and 30 deletions

View File

@ -13,8 +13,8 @@ import (
var tapCmd = &cobra.Command{
Use: "tap [POD REGEX]",
Short: "Record and see the network traffic in your Kubernetes cluster.",
Long: "Record and see the network traffic in your Kubernetes cluster.",
Short: "Capture the network traffic in your Kubernetes cluster.",
Long: "Capture the network traffic in your Kubernetes cluster.",
RunE: func(cmd *cobra.Command, args []string) error {
tap()
return nil
@ -55,7 +55,7 @@ func init() {
tapCmd.Flags().BoolP(configStructs.AllNamespacesLabel, "A", defaultTapConfig.AllNamespaces, "Tap all namespaces.")
tapCmd.Flags().String(configStructs.HumanMaxEntriesDBSizeLabel, defaultTapConfig.HumanMaxEntriesDBSize, "Override the default max entries db size.")
tapCmd.Flags().Bool(configStructs.DryRunLabel, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them.")
tapCmd.Flags().Bool(configStructs.ServiceMeshName, defaultTapConfig.ServiceMesh, "Record decrypted traffic if the cluster is configured with a service mesh and with mtls.")
tapCmd.Flags().Bool(configStructs.TlsName, defaultTapConfig.Tls, "Record tls traffic.")
tapCmd.Flags().Bool(configStructs.ProfilerName, defaultTapConfig.Profiler, "Run pprof server.")
tapCmd.Flags().Bool(configStructs.ServiceMeshLabel, defaultTapConfig.ServiceMesh, "Capture the encrypted traffic if the cluster is configured with a service mesh and with mTLS.")
tapCmd.Flags().Bool(configStructs.TlsLabel, defaultTapConfig.Tls, "Capture the traffic that's encrypted with OpenSSL or Go crypto/tls libraries.")
tapCmd.Flags().Bool(configStructs.DebugLabel, defaultTapConfig.Debug, "Enable the debug mode.")
}

View File

@ -82,7 +82,7 @@ func tap() {
}
log.Info().Msg("Waiting for the creation of Kubeshark resources...")
if state.kubesharkServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil {
if state.kubesharkServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Debug); err != nil {
var statusError *k8serrors.StatusError
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
log.Info().Msg("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
@ -153,6 +153,7 @@ func startWorkerSyncer(ctx context.Context, cancel context.CancelFunc, provider
KubesharkServiceAccountExists: state.kubesharkServiceAccountExists,
ServiceMesh: config.Config.Tap.ServiceMesh,
Tls: config.Config.Tap.Tls,
Debug: config.Config.Tap.Debug,
}, startTime)
if err != nil {

View File

@ -18,9 +18,9 @@ const (
AllNamespacesLabel = "all-namespaces"
HumanMaxEntriesDBSizeLabel = "max-entries-db-size"
DryRunLabel = "dry-run"
ServiceMeshName = "service-mesh"
TlsName = "tls"
ProfilerName = "profiler"
ServiceMeshLabel = "service-mesh"
TlsLabel = "tls"
DebugLabel = "debug"
)
type HubConfig struct {
@ -50,7 +50,7 @@ type TapConfig struct {
ServiceMesh bool `yaml:"service-mesh" default:"true"`
Tls bool `yaml:"tls" default:"true"`
PacketCapture string `yaml:"packet-capture" default:"libpcap"`
Profiler bool `yaml:"profiler" default:"false"`
Debug bool `yaml:"debug" default:"false"`
}
func (config *TapConfig) PodRegex() *regexp.Regexp {

View File

@ -179,12 +179,11 @@ type PodOptions struct {
PodName string
PodImage string
ServiceAccountName string
IsNamespaceRestricted bool
MaxEntriesDBSizeBytes int64
Resources models.Resources
ImagePullPolicy core.PullPolicy
LogLevel zerolog.Level
Profiler bool
Debug bool
}
func (provider *Provider) BuildHubPod(opts *PodOptions, mountVolumeClaim bool, volumeClaimName string) (*core.Pod, error) {
@ -212,12 +211,8 @@ func (provider *Provider) BuildHubPod(opts *PodOptions, mountVolumeClaim bool, v
"./hub",
}
if opts.Profiler {
command = append(command, "--profiler")
}
if opts.IsNamespaceRestricted {
command = append(command, "--namespace", opts.Namespace)
if opts.Debug {
command = append(command, "-debug")
}
volumeMounts := []core.VolumeMount{
@ -716,7 +711,7 @@ func (provider *Provider) CreateConfigMap(ctx context.Context, namespace string,
return nil
}
func (provider *Provider) ApplyWorkerDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, workerPodName string, nodeNames []string, serviceAccountName string, resources models.Resources, imagePullPolicy core.PullPolicy, kubesharkApiFilteringOptions api.TrafficFilteringOptions, logLevel zerolog.Level, serviceMesh bool, tls bool) error {
func (provider *Provider) ApplyWorkerDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, workerPodName string, nodeNames []string, serviceAccountName string, resources models.Resources, imagePullPolicy core.PullPolicy, kubesharkApiFilteringOptions api.TrafficFilteringOptions, logLevel zerolog.Level, serviceMesh bool, tls bool, debug bool) error {
log.Debug().
Int("node-count", len(nodeNames)).
Str("namespace", namespace).
@ -734,18 +729,22 @@ func (provider *Provider) ApplyWorkerDaemonSet(ctx context.Context, namespace st
return err
}
kubesharkCmd := []string{"./worker", "-i", "any", "-port", "8897"}
command := []string{"./worker", "-i", "any", "-port", "8897"}
if debug {
command = append(command, "-debug")
}
if serviceMesh {
kubesharkCmd = append(kubesharkCmd, "--servicemesh")
command = append(command, "-servicemesh")
}
if tls {
kubesharkCmd = append(kubesharkCmd, "--tls")
command = append(command, "-tls")
}
if serviceMesh || tls {
kubesharkCmd = append(kubesharkCmd, "--procfs", procfsMountPath)
command = append(command, "-procfs", procfsMountPath)
}
workerContainer := applyconfcore.Container()
@ -772,7 +771,7 @@ func (provider *Provider) ApplyWorkerDaemonSet(ctx context.Context, namespace st
workerContainer.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
workerContainer.WithCommand(kubesharkCmd...)
workerContainer.WithCommand(command...)
workerContainer.WithEnv(
applyconfcore.EnvVar().WithName(utils.LogLevelEnvVar).WithValue(logLevel.String()),
applyconfcore.EnvVar().WithName(utils.HostModeEnvVar).WithValue("1"),

View File

@ -48,6 +48,7 @@ type WorkerSyncerConfig struct {
KubesharkServiceAccountExists bool
ServiceMesh bool
Tls bool
Debug bool
}
func CreateAndStartWorkerSyncer(ctx context.Context, kubernetesProvider *Provider, config WorkerSyncerConfig, startTime time.Time) (*WorkerSyncer, error) {
@ -368,7 +369,8 @@ func (workerSyncer *WorkerSyncer) updateWorkers() error {
workerSyncer.config.KubesharkApiFilteringOptions,
workerSyncer.config.LogLevel,
workerSyncer.config.ServiceMesh,
workerSyncer.config.Tls); err != nil {
workerSyncer.config.Tls,
workerSyncer.config.Debug); err != nil {
return err
}

View File

@ -14,7 +14,7 @@ import (
core "k8s.io/api/core/v1"
)
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, maxEntriesDBSizeBytes int64, hubResources models.Resources, imagePullPolicy core.PullPolicy, logLevel zerolog.Level, profiler bool) (bool, error) {
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, maxEntriesDBSizeBytes int64, hubResources models.Resources, imagePullPolicy core.PullPolicy, logLevel zerolog.Level, debug bool) (bool, error) {
if !isNsRestrictedMode {
if err := createKubesharkNamespace(ctx, kubernetesProvider, kubesharkResourcesNamespace); err != nil {
return false, err
@ -42,12 +42,11 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
PodName: kubernetes.HubPodName,
PodImage: docker.GetHubImage(),
ServiceAccountName: serviceAccountName,
IsNamespaceRestricted: isNsRestrictedMode,
MaxEntriesDBSizeBytes: maxEntriesDBSizeBytes,
Resources: hubResources,
ImagePullPolicy: imagePullPolicy,
LogLevel: logLevel,
Profiler: profiler,
Debug: debug,
}
frontOpts := &kubernetes.PodOptions{
@ -55,12 +54,11 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
PodName: kubernetes.FrontPodName,
PodImage: docker.GetWorkerImage(),
ServiceAccountName: serviceAccountName,
IsNamespaceRestricted: isNsRestrictedMode,
MaxEntriesDBSizeBytes: maxEntriesDBSizeBytes,
Resources: hubResources,
ImagePullPolicy: imagePullPolicy,
LogLevel: logLevel,
Profiler: profiler,
Debug: debug,
}
if err := createKubesharkHubPod(ctx, kubernetesProvider, opts); err != nil {