diff --git a/cmd/check.go b/cmd/check.go index 604676f7b..16deef26a 100644 --- a/cmd/check.go +++ b/cmd/check.go @@ -1,10 +1,9 @@ package cmd import ( - "log" - "github.com/creasty/defaults" "github.com/kubeshark/kubeshark/config/configStructs" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -22,7 +21,7 @@ func init() { defaultCheckConfig := configStructs.CheckConfig{} if err := defaults.Set(&defaultCheckConfig); err != nil { - log.Print(err) + log.Error().Err(err) } checkCmd.Flags().Bool(configStructs.PreTapCheckName, defaultCheckConfig.PreTap, "Check pre-tap Kubeshark installation for potential problems") diff --git a/cmd/check/imagePullInCluster.go b/cmd/check/imagePullInCluster.go index 7757ee698..11ac4ccd7 100644 --- a/cmd/check/imagePullInCluster.go +++ b/cmd/check/imagePullInCluster.go @@ -3,39 +3,55 @@ package check import ( "context" "fmt" - "log" "regexp" "time" "github.com/kubeshark/kubeshark/kubernetes" "github.com/kubeshark/kubeshark/utils" + "github.com/rs/zerolog/log" core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func ImagePullInCluster(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool { - log.Printf("\nimage-pull-in-cluster\n--------------------") + log.Info().Msg("[image-pull-in-cluster]") namespace := "default" podName := "kubeshark-test" defer func() { if err := kubernetesProvider.RemovePod(ctx, namespace, podName); err != nil { - log.Printf("%v error while removing test pod in cluster, err: %v", fmt.Sprintf(utils.Red, "✗"), err) + log.Error(). + Str("namespace", namespace). + Str("pod", podName). + Err(err). + Msg("While removing test pod!") } }() if err := createImagePullInClusterPod(ctx, kubernetesProvider, namespace, podName); err != nil { - log.Printf("%v error while creating test pod in cluster, err: %v", fmt.Sprintf(utils.Red, "✗"), err) + log.Error(). + Str("namespace", namespace). + Str("pod", podName). + Err(err). + Msg("While creating test pod!") return false } if err := checkImagePulled(ctx, kubernetesProvider, namespace, podName); err != nil { log.Printf("%v cluster is not able to pull kubeshark containers from docker hub, err: %v", fmt.Sprintf(utils.Red, "✗"), err) + log.Error(). + Str("namespace", namespace). + Str("pod", podName). + Err(err). + Msg("Unable to pull images from Docker Hub!") return false } - log.Printf("%v cluster is able to pull kubeshark containers from docker hub", fmt.Sprintf(utils.Green, "√")) + log.Info(). + Str("namespace", namespace). + Str("pod", podName). + Msg("Pulling images from Docker Hub is passed.") return true } diff --git a/cmd/check/kubernetesApi.go b/cmd/check/kubernetesApi.go index 03ae4a7bb..08feee2f8 100644 --- a/cmd/check/kubernetesApi.go +++ b/cmd/check/kubernetesApi.go @@ -1,31 +1,28 @@ package check import ( - "fmt" - "log" - "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/kubernetes" "github.com/kubeshark/kubeshark/semver" - "github.com/kubeshark/kubeshark/utils" + "github.com/rs/zerolog/log" ) func KubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) { - log.Printf("\nkubernetes-api\n--------------------") + log.Info().Msg("[kubernetes-api]") kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.KubeContext) if err != nil { - log.Printf("%v can't initialize the client, err: %v", fmt.Sprintf(utils.Red, "✗"), err) + log.Error().Err(err).Msg("Can't initialize the client!") return nil, nil, false } - log.Printf("%v can initialize the client", fmt.Sprintf(utils.Green, "√")) + log.Info().Msg("Initialization of the client is passed.") kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion() if err != nil { - log.Printf("%v can't query the Kubernetes API, err: %v", fmt.Sprintf(utils.Red, "✗"), err) + log.Error().Err(err).Msg("Can't query the Kubernetes API!") return nil, nil, false } - log.Printf("%v can query the Kubernetes API", fmt.Sprintf(utils.Green, "√")) + log.Info().Msg("Querying the Kubernetes API is passed.") return kubernetesProvider, kubernetesVersion, true } diff --git a/cmd/check/kubernetesPermissions.go b/cmd/check/kubernetesPermissions.go index 6505b885d..c31c126b9 100644 --- a/cmd/check/kubernetesPermissions.go +++ b/cmd/check/kubernetesPermissions.go @@ -4,17 +4,16 @@ import ( "context" "embed" "fmt" - "log" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/kubernetes" - "github.com/kubeshark/kubeshark/utils" + "github.com/rs/zerolog/log" rbac "k8s.io/api/rbac/v1" "k8s.io/client-go/kubernetes/scheme" ) func TapKubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesProvider *kubernetes.Provider) bool { - log.Printf("\nkubernetes-permissions\n--------------------") + log.Info().Msg("[kubernetes-permissions]") var filePath string if config.Config.IsNsRestrictedMode() { @@ -25,14 +24,14 @@ func TapKubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesP data, err := embedFS.ReadFile(filePath) if err != nil { - log.Printf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(utils.Red, "✗"), err) + log.Error().Err(err).Msg("While checking Kubernetes permissions!") return false } decode := scheme.Codecs.UniversalDeserializer().Decode obj, _, err := decode(data, nil, nil) if err != nil { - log.Printf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(utils.Red, "✗"), err) + log.Error().Err(err).Msg("While checking Kubernetes permissions!") return false } @@ -43,7 +42,7 @@ func TapKubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesP return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, "") } - log.Printf("%v error while checking kubernetes permissions, err: resource of type 'Role' or 'ClusterRole' not found in permission files", fmt.Sprintf(utils.Red, "✗")) + log.Error().Msg("While checking Kubernetes permissions! Resource of types 'Role' or 'ClusterRole' are not found in permission files.") return false } @@ -75,13 +74,18 @@ func checkPermissionExist(group string, resource string, verb string, namespace } if err != nil { - log.Printf("%v error checking permission for %v %v %v, err: %v", fmt.Sprintf(utils.Red, "✗"), verb, resource, groupAndNamespace, err) + log.Error(). + Str("verb", verb). + Str("resource", resource). + Str("group-and-namespace", groupAndNamespace). + Err(err). + Msg("While checking Kubernetes permissions!") return false } else if !exist { - log.Printf("%v can't %v %v %v", fmt.Sprintf(utils.Red, "✗"), verb, resource, groupAndNamespace) + log.Error().Msg(fmt.Sprintf("Can't %v %v %v", verb, resource, groupAndNamespace)) return false } - log.Printf("%v can %v %v %v", fmt.Sprintf(utils.Green, "√"), verb, resource, groupAndNamespace) + log.Info().Msg(fmt.Sprintf("Can %v %v %v", verb, resource, groupAndNamespace)) return true } diff --git a/cmd/check/kubernetesResources.go b/cmd/check/kubernetesResources.go index eb51d3051..8d258e50c 100644 --- a/cmd/check/kubernetesResources.go +++ b/cmd/check/kubernetesResources.go @@ -3,15 +3,14 @@ package check import ( "context" "fmt" - "log" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/kubernetes" - "github.com/kubeshark/kubeshark/utils" + "github.com/rs/zerolog/log" ) func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool { - log.Printf("\nk8s-components\n--------------------") + log.Info().Msg("[k8s-components]") exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.ResourcesNamespace) allResourcesExist := checkResourceExist(config.Config.ResourcesNamespace, "namespace", exist, err) @@ -46,20 +45,32 @@ func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Pro func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool { if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.ResourcesNamespace, kubernetes.HubPodName); err != nil { - log.Printf("%v error checking if '%v' pod is running, err: %v", fmt.Sprintf(utils.Red, "✗"), kubernetes.HubPodName, err) + log.Error(). + Str("name", kubernetes.HubPodName). + Err(err). + Msg("While checking if pod is running!") return false } else if len(pods) == 0 { - log.Printf("%v '%v' pod doesn't exist", fmt.Sprintf(utils.Red, "✗"), kubernetes.HubPodName) + log.Error(). + Str("name", kubernetes.HubPodName). + Msg("Pod doesn't exist!") return false } else if !kubernetes.IsPodRunning(&pods[0]) { - log.Printf("%v '%v' pod not running", fmt.Sprintf(utils.Red, "✗"), kubernetes.HubPodName) + log.Error(). + Str("name", kubernetes.HubPodName). + Msg("Pod is not running!") return false } - log.Printf("%v '%v' pod running", fmt.Sprintf(utils.Green, "√"), kubernetes.HubPodName) + log.Info(). + Str("name", kubernetes.HubPodName). + Msg("Pod is running.") if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.ResourcesNamespace, kubernetes.TapperPodName); err != nil { - log.Printf("%v error checking if '%v' pods are running, err: %v", fmt.Sprintf(utils.Red, "✗"), kubernetes.TapperPodName, err) + log.Error(). + Str("name", kubernetes.TapperPodName). + Err(err). + Msg("While checking if pods are running!") return false } else { tappers := 0 @@ -73,24 +84,38 @@ func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes. } if notRunningTappers > 0 { - log.Printf("%v '%v' %v/%v pods are not running", fmt.Sprintf(utils.Red, "✗"), kubernetes.TapperPodName, notRunningTappers, tappers) + log.Error(). + Str("name", kubernetes.TapperPodName). + Msg(fmt.Sprintf("%d/%d pods are not running!", notRunningTappers, tappers)) return false } - log.Printf("%v '%v' %v pods running", fmt.Sprintf(utils.Green, "√"), kubernetes.TapperPodName, tappers) + log.Info(). + Str("name", kubernetes.TapperPodName). + Msg(fmt.Sprintf("All %d pods are running.", tappers)) return true } } func checkResourceExist(resourceName string, resourceType string, exist bool, err error) bool { if err != nil { - log.Printf("%v error checking if '%v' %v exists, err: %v", fmt.Sprintf(utils.Red, "✗"), resourceName, resourceType, err) + log.Error(). + Str("name", resourceName). + Str("type", resourceType). + Err(err). + Msg("Checking if resource exists!") return false } else if !exist { - log.Printf("%v '%v' %v doesn't exist", fmt.Sprintf(utils.Red, "✗"), resourceName, resourceType) + log.Error(). + Str("name", resourceName). + Str("type", resourceType). + Msg("Resource doesn't exist!") return false } - log.Printf("%v '%v' %v exists", fmt.Sprintf(utils.Green, "√"), resourceName, resourceType) + log.Info(). + Str("name", resourceName). + Str("type", resourceType). + Msg("Resource exist.") return true } diff --git a/cmd/check/kubernetesVersion.go b/cmd/check/kubernetesVersion.go index 37454d3db..fa9bac7ab 100644 --- a/cmd/check/kubernetesVersion.go +++ b/cmd/check/kubernetesVersion.go @@ -1,22 +1,19 @@ package check import ( - "fmt" - "log" - "github.com/kubeshark/kubeshark/kubernetes" "github.com/kubeshark/kubeshark/semver" - "github.com/kubeshark/kubeshark/utils" + "github.com/rs/zerolog/log" ) func KubernetesVersion(kubernetesVersion *semver.SemVersion) bool { - log.Printf("\nkubernetes-version\n--------------------") + log.Info().Msg("[kubernetes-api]") if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil { - log.Printf("%v not running the minimum Kubernetes API version, err: %v", fmt.Sprintf(utils.Red, "✗"), err) + log.Error().Err(err).Msg("Not running the minimum Kubernetes API version!") return false } - log.Printf("%v is running the minimum Kubernetes API version", fmt.Sprintf(utils.Green, "√")) + log.Info().Msg("Running the minimum Kubernetes API version") return true } diff --git a/cmd/check/serverConnection.go b/cmd/check/serverConnection.go index 3ac9182e3..877fc4f23 100644 --- a/cmd/check/serverConnection.go +++ b/cmd/check/serverConnection.go @@ -2,41 +2,39 @@ package check import ( "context" - "fmt" - "log" "regexp" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/internal/connect" "github.com/kubeshark/kubeshark/kubernetes" - "github.com/kubeshark/kubeshark/utils" + "github.com/rs/zerolog/log" ) func ServerConnection(kubernetesProvider *kubernetes.Provider) bool { - log.Printf("\nHub connectivity\n--------------------") + log.Info().Msg("[hub-connectivity]") serverUrl := kubernetes.GetLocalhostOnPort(config.Config.Hub.PortForward.SrcPort) connector := connect.NewConnector(serverUrl, 1, connect.DefaultTimeout) if err := connector.TestConnection(""); err == nil { - log.Printf("%v found Kubeshark server tunnel available and connected successfully to Hub", fmt.Sprintf(utils.Green, "√")) + log.Info().Msg("Found Kubeshark server tunnel available and connected successfully to Hub!") return true } connectedToHub := false if err := checkProxy(serverUrl, kubernetesProvider); err != nil { - log.Printf("%v couldn't connect to Hub using proxy, err: %v", fmt.Sprintf(utils.Red, "✗"), err) + log.Error().Err(err).Msg("Couldn't connect to Hub using proxy!") } else { connectedToHub = true - log.Printf("%v connected successfully to Hub using proxy", fmt.Sprintf(utils.Green, "√")) + log.Info().Msg("Connected successfully to Hub using proxy.") } if err := checkPortForward(serverUrl, kubernetesProvider); err != nil { - log.Printf("%v couldn't connect to Hub using port-forward, err: %v", fmt.Sprintf(utils.Red, "✗"), err) + log.Error().Err(err).Msg("Couldn't connect to Hub using port-forward!") } else { connectedToHub = true - log.Printf("%v connected successfully to Hub using port-forward", fmt.Sprintf(utils.Green, "√")) + log.Info().Msg("Connected successfully to Hub using port-forward.") } return connectedToHub @@ -57,7 +55,7 @@ func checkProxy(serverUrl string, kubernetesProvider *kubernetes.Provider) error } if err := httpServer.Shutdown(ctx); err != nil { - log.Printf("Error occurred while stopping proxy, err: %v", err) + log.Error().Err(err).Msg("While stopping the proxy!") } return nil diff --git a/cmd/checkRunner.go b/cmd/checkRunner.go index a336cb723..4aae8d671 100644 --- a/cmd/checkRunner.go +++ b/cmd/checkRunner.go @@ -4,11 +4,11 @@ import ( "context" "embed" "fmt" - "log" "github.com/kubeshark/kubeshark/cmd/check" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/utils" + "github.com/rs/zerolog/log" ) var ( @@ -17,7 +17,7 @@ var ( ) func runKubesharkCheck() { - log.Printf("Kubeshark checks\n===================") + log.Info().Msg("Kubeshark checks...") ctx, cancel := context.WithCancel(context.Background()) defer cancel() // cancel will be called when this function exits @@ -51,8 +51,8 @@ func runKubesharkCheck() { } if checkPassed { - log.Printf("\nStatus check results are %v", fmt.Sprintf(utils.Green, "√")) + log.Info().Msg(fmt.Sprintf("Status check results are %v", fmt.Sprintf(utils.Green, "√"))) } else { - log.Printf("\nStatus check results are %v", fmt.Sprintf(utils.Red, "✗")) + log.Info().Msg(fmt.Sprintf("Status check results are %v", fmt.Sprintf(utils.Red, "✗"))) } } diff --git a/cmd/common.go b/cmd/common.go index 9ce0a2faf..4eb064fa0 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -5,51 +5,57 @@ import ( "encoding/json" "errors" "fmt" - "log" "path" "regexp" "time" + "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/config/configStructs" "github.com/kubeshark/kubeshark/errormessage" "github.com/kubeshark/kubeshark/internal/connect" + "github.com/kubeshark/kubeshark/kubernetes" "github.com/kubeshark/kubeshark/kubeshark" "github.com/kubeshark/kubeshark/kubeshark/fsUtils" "github.com/kubeshark/kubeshark/resources" - "github.com/kubeshark/kubeshark/utils" "github.com/kubeshark/worker/models" - - "github.com/kubeshark/kubeshark/config" - "github.com/kubeshark/kubeshark/kubernetes" + "github.com/rs/zerolog/log" ) func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx context.Context, cancel context.CancelFunc, serviceName string, srcPort uint16, dstPort uint16, healthCheck string) { httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, srcPort, dstPort, config.Config.ResourcesNamespace, serviceName, cancel) if err != nil { - log.Printf(utils.Error, fmt.Sprintf("Error occured while running k8s proxy %v\n"+ - "Try setting different port by using --%s", errormessage.FormatError(err), configStructs.GuiPortTapName)) + log.Error(). + Err(errormessage.FormatError(err)). + Msg(fmt.Sprintf("Error occured while running k8s proxy. Try setting different port by using --%s", configStructs.GuiPortTapName)) cancel() return } connector := connect.NewConnector(kubernetes.GetLocalhostOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout) if err := connector.TestConnection(healthCheck); err != nil { - log.Printf("Couldn't connect using proxy, stopping proxy and trying to create port-forward") + log.Error().Msg("Couldn't connect using proxy, stopping proxy and trying to create port-forward..") if err := httpServer.Shutdown(ctx); err != nil { - log.Printf("Error occurred while stopping proxy %v", errormessage.FormatError(err)) + log.Error(). + Err(errormessage.FormatError(err)). + Msg("Error occurred while stopping proxy.") } podRegex, _ := regexp.Compile(kubernetes.HubPodName) if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.ResourcesNamespace, podRegex, srcPort, dstPort, ctx, cancel); err != nil { - log.Printf(utils.Error, fmt.Sprintf("Error occured while running port forward [%s] %v\n"+ - "Try setting different port by using --%s", podRegex, errormessage.FormatError(err), configStructs.GuiPortTapName)) + log.Error(). + Str("pod-regex", podRegex.String()). + Err(errormessage.FormatError(err)). + Msg(fmt.Sprintf("Error occured while running port forward. Try setting different port by using --%s", configStructs.GuiPortTapName)) cancel() return } connector = connect.NewConnector(kubernetes.GetLocalhostOnPort(srcPort), connect.DefaultRetries, connect.DefaultTimeout) if err := connector.TestConnection(healthCheck); err != nil { - log.Printf(utils.Error, fmt.Sprintf("Couldn't connect to [%s].", serviceName)) + log.Error(). + Str("service-name", serviceName). + Err(errormessage.FormatError(err)). + Msg("Couldn't connect to service.") cancel() return } @@ -85,9 +91,9 @@ func getKubernetesProviderForCli() (*kubernetes.Provider, error) { func handleKubernetesProviderError(err error) { var clusterBehindProxyErr *kubernetes.ClusterBehindProxyError if ok := errors.As(err, &clusterBehindProxyErr); ok { - log.Printf("cannot establish http-proxy connection to the Kubernetes cluster. If you’re using Lens or similar tool, please run kubeshark with regular kubectl config using --%v %v=$HOME/.kube/config flag", config.SetCommandName, config.KubeConfigPathConfigName) + log.Error().Msg(fmt.Sprintf("Cannot establish http-proxy connection to the Kubernetes cluster. If you’re using Lens or similar tool, please run kubeshark with regular kubectl config using --%v %v=$HOME/.kube/config flag", config.SetCommandName, config.KubeConfigPathConfigName)) } else { - log.Print(err) + log.Error().Err(err) } } @@ -105,7 +111,7 @@ func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provid kubesharkDir := kubeshark.GetKubesharkFolderPath() filePath := path.Join(kubesharkDir, fmt.Sprintf("kubeshark_logs_%s.zip", time.Now().Format("2006_01_02__15_04_05"))) if err := fsUtils.DumpLogs(ctx, kubernetesProvider, filePath); err != nil { - log.Printf("Failed dump logs %v", err) + log.Error().Err(err).Msg("Failed to dump logs.") } } diff --git a/cmd/config.go b/cmd/config.go index 7958bd55c..c857e76de 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -2,12 +2,12 @@ package cmd import ( "fmt" - "log" "github.com/creasty/defaults" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/config/configStructs" "github.com/kubeshark/kubeshark/utils" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -17,25 +17,25 @@ var configCmd = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { configWithDefaults, err := config.GetConfigWithDefaults() if err != nil { - log.Printf("Failed generating config with defaults, err: %v", err) + log.Error().Err(err).Msg("Failed generating config with defaults.") return nil } if config.Config.Config.Regenerate { if err := config.WriteConfig(configWithDefaults); err != nil { - log.Printf("Failed writing config with defaults, err: %v", err) + log.Error().Err(err).Msg("Failed generating config with defaults.") return nil } - log.Printf("Template File written to %s", fmt.Sprintf(utils.Purple, config.Config.ConfigFilePath)) + log.Info().Str("config-path", config.Config.ConfigFilePath).Msg("Template file written to config path.") } else { template, err := utils.PrettyYaml(configWithDefaults) if err != nil { - log.Printf("Failed converting config with defaults to yaml, err: %v", err) + log.Error().Err(err).Msg("Failed converting config with defaults to YAML.") return nil } - log.Printf("Writing template config.\n%v", template) + log.Debug().Str("template", template).Msg("Writing template config...") fmt.Printf("%v", template) } @@ -48,7 +48,7 @@ func init() { defaultConfig := config.CreateDefaultConfig() if err := defaults.Set(&defaultConfig); err != nil { - log.Print(err) + log.Debug().Err(err) } configCmd.Flags().BoolP(configStructs.RegenerateConfigName, "r", defaultConfig.Config.Regenerate, fmt.Sprintf("Regenerate the config file with default values to path %s or to chosen path using --%s", defaultConfig.ConfigFilePath, config.ConfigFilePathCommandName)) diff --git a/cmd/goUtils/funcWrappers.go b/cmd/goUtils/funcWrappers.go index 7b2a52706..578423948 100644 --- a/cmd/goUtils/funcWrappers.go +++ b/cmd/goUtils/funcWrappers.go @@ -1,21 +1,25 @@ package goUtils import ( - "log" "reflect" "runtime/debug" + + "github.com/rs/zerolog/log" ) func HandleExcWrapper(fn interface{}, params ...interface{}) (result []reflect.Value) { defer func() { if panicMessage := recover(); panicMessage != nil { stack := debug.Stack() - log.Fatalf("Unhandled panic: %v\n stack: %s", panicMessage, stack) + log.Fatal(). + Interface("msg", panicMessage). + Interface("stack", stack). + Msg("Unhandled panic!") } }() f := reflect.ValueOf(fn) if f.Type().NumIn() != len(params) { - panic("incorrect number of parameters!") + panic("Incorrect number of parameters!") } inputs := make([]reflect.Value, len(params)) for k, in := range params { diff --git a/cmd/logs.go b/cmd/logs.go index 74e2281de..0d3eeeab4 100644 --- a/cmd/logs.go +++ b/cmd/logs.go @@ -2,13 +2,13 @@ package cmd import ( "context" - "log" "github.com/creasty/defaults" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/config/configStructs" "github.com/kubeshark/kubeshark/errormessage" "github.com/kubeshark/kubeshark/kubeshark/fsUtils" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -26,10 +26,10 @@ var logsCmd = &cobra.Command{ return errormessage.FormatError(validationErr) } - log.Printf("Using file path %s", config.Config.Logs.FilePath()) + log.Debug().Str("logs-path", config.Config.Logs.FilePath()).Msg("Using this logs path...") if dumpLogsErr := fsUtils.DumpLogs(ctx, kubernetesProvider, config.Config.Logs.FilePath()); dumpLogsErr != nil { - log.Printf("Failed dump logs %v", dumpLogsErr) + log.Error().Err(dumpLogsErr).Msg("Failed to dump logs.") } return nil @@ -41,7 +41,7 @@ func init() { defaultLogsConfig := configStructs.LogsConfig{} if err := defaults.Set(&defaultLogsConfig); err != nil { - log.Print(err) + log.Debug().Err(err) } logsCmd.Flags().StringP(configStructs.FileLogsName, "f", defaultLogsConfig.FileStr, "Path for zip file (default current \\kubeshark_logs.zip)") diff --git a/cmd/root.go b/cmd/root.go index 739009425..095ea7cda 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -2,13 +2,11 @@ package cmd import ( "fmt" - "log" - "time" "github.com/creasty/defaults" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/kubeshark/version" - "github.com/kubeshark/kubeshark/utils" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -19,7 +17,7 @@ var rootCmd = &cobra.Command{ Further info is available at https://github.com/kubeshark/kubeshark`, PersistentPreRunE: func(cmd *cobra.Command, args []string) error { if err := config.InitConfig(cmd); err != nil { - log.Fatal(err) + log.Fatal().Err(err) } return nil @@ -29,29 +27,18 @@ Further info is available at https://github.com/kubeshark/kubeshark`, func init() { defaultConfig := config.CreateDefaultConfig() if err := defaults.Set(&defaultConfig); err != nil { - log.Print(err) + log.Debug().Err(err) } rootCmd.PersistentFlags().StringSlice(config.SetCommandName, []string{}, fmt.Sprintf("Override values using --%s", config.SetCommandName)) rootCmd.PersistentFlags().String(config.ConfigFilePathCommandName, defaultConfig.ConfigFilePath, fmt.Sprintf("Override config file path using --%s", config.ConfigFilePathCommandName)) -} - -func printNewVersionIfNeeded(versionChan chan string) { - select { - case versionMsg := <-versionChan: - if versionMsg != "" { - log.Printf(utils.Yellow, versionMsg) - } - case <-time.After(2 * time.Second): - } + rootCmd.PersistentFlags().Bool("debug", false, "Enable debug mode.") } // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the tapCmd. func Execute() { - versionChan := make(chan string) - defer printNewVersionIfNeeded(versionChan) - go version.CheckNewerVersion(versionChan) + go version.CheckNewerVersion() cobra.CheckErr(rootCmd.Execute()) } diff --git a/cmd/tap.go b/cmd/tap.go index 452a9abe6..d2078bbc3 100644 --- a/cmd/tap.go +++ b/cmd/tap.go @@ -2,12 +2,12 @@ package cmd import ( "errors" - "log" "github.com/creasty/defaults" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/config/configStructs" "github.com/kubeshark/kubeshark/errormessage" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -31,7 +31,9 @@ Supported protocols are HTTP and gRPC.`, return errormessage.FormatError(err) } - log.Printf("Kubeshark will store up to %s of traffic, old traffic will be cleared once the limit is reached.", config.Config.Tap.HumanMaxEntriesDBSize) + log.Info(). + Str("limit", config.Config.Tap.HumanMaxEntriesDBSize). + Msg("Kubeshark will store traffic up to a limit. The old traffic will be cleared once the limit is reached.") return nil }, @@ -42,7 +44,7 @@ func init() { defaultTapConfig := configStructs.TapConfig{} if err := defaults.Set(&defaultTapConfig); err != nil { - log.Print(err) + log.Debug().Err(err) } tapCmd.Flags().Uint16P(configStructs.GuiPortTapName, "p", defaultTapConfig.GuiPort, "Provide a custom port for the web interface webserver") diff --git a/cmd/tapRunner.go b/cmd/tapRunner.go index ee117f45e..05f318d57 100644 --- a/cmd/tapRunner.go +++ b/cmd/tapRunner.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - "log" "regexp" "strings" "time" @@ -24,6 +23,7 @@ import ( "github.com/kubeshark/kubeshark/kubernetes" "github.com/kubeshark/worker/api" "github.com/kubeshark/worker/models" + "github.com/rs/zerolog/log" ) const cleanupTimeout = time.Minute @@ -58,14 +58,13 @@ func RunKubesharkTap() { conf := getTapConfig() serializedKubesharkConfig, err := getSerializedTapConfig(conf) if err != nil { - log.Printf(utils.Error, fmt.Sprintf("Error serializing kubeshark config: %v", errormessage.FormatError(err))) + log.Error().Err(errormessage.FormatError(err)).Msg("Error serializing Kubeshark config!") return } if config.Config.IsNsRestrictedMode() { if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.ResourcesNamespace) { - log.Printf("Not supported mode. Kubeshark can't resolve IPs in other namespaces when running in namespace restricted mode.\n"+ - "You can use the same namespace for --%s and --%s", configStructs.NamespacesTapName, config.ResourcesNamespaceConfigName) + log.Error().Msg(fmt.Sprintf("Kubeshark can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", configStructs.NamespacesTapName, config.ResourcesNamespaceConfigName)) return } } @@ -77,24 +76,24 @@ func RunKubesharkTap() { namespacesStr = "all namespaces" } - log.Printf("Tapping pods in %s", namespacesStr) + log.Info().Str("namespace", namespacesStr).Msg("Tapping pods in:") if err := printTappedPodsPreview(ctx, kubernetesProvider, state.targetNamespaces); err != nil { - log.Printf(utils.Error, fmt.Sprintf("Error listing pods: %v", errormessage.FormatError(err))) + log.Error().Err(errormessage.FormatError(err)).Msg("Error listing pods!") } if config.Config.Tap.DryRun { return } - log.Printf("Waiting for Kubeshark deployment to finish...") + log.Info().Msg("Waiting for Kubeshark deployment to finish...") if state.kubesharkServiceAccountExists, err = resources.CreateTapKubesharkResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil { var statusError *k8serrors.StatusError if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) { - log.Print("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance") + log.Info().Msg("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance") } else { defer resources.CleanUpKubesharkResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace) - log.Printf(utils.Error, fmt.Sprintf("Error creating resources: %v", errormessage.FormatError(err))) + log.Error().Err(errormessage.FormatError(err)).Msg("Error creating resources!") } return @@ -119,7 +118,6 @@ func getTapConfig() *models.Config { MaxDBSizeBytes: config.Config.Tap.MaxEntriesDBSizeBytes(), InsertionFilter: config.Config.Tap.GetInsertionFilter(), PullPolicy: config.Config.ImagePullPolicyStr, - LogLevel: config.Config.LogLevel(), TapperResources: config.Config.Tap.TapperResources, KubesharkResourcesNamespace: config.Config.ResourcesNamespace, AgentDatabasePath: models.DataDirPath, @@ -143,7 +141,7 @@ func printTappedPodsPreview(ctx context.Context, kubernetesProvider *kubernetes. printNoPodsFoundSuggestion(namespaces) } for _, tappedPod := range matchingPods { - log.Printf(utils.Green, fmt.Sprintf("+%s", tappedPod.Name)) + log.Info().Msg(fmt.Sprintf(utils.Green, fmt.Sprintf("+%s", tappedPod.Name))) } return nil } @@ -175,29 +173,29 @@ func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider select { case syncerErr, ok := <-tapperSyncer.ErrorOut: if !ok { - log.Print("kubesharkTapperSyncer err channel closed, ending listener loop") + log.Debug().Msg("kubesharkTapperSyncer err channel closed, ending listener loop") return } - log.Printf(utils.Error, getErrorDisplayTextForK8sTapManagerError(syncerErr)) + log.Error().Msg(getErrorDisplayTextForK8sTapManagerError(syncerErr)) cancel() case _, ok := <-tapperSyncer.TapPodChangesOut: if !ok { - log.Print("kubesharkTapperSyncer pod changes channel closed, ending listener loop") + log.Debug().Msg("kubesharkTapperSyncer pod changes channel closed, ending listener loop") return } if err := connector.ReportTappedPods(tapperSyncer.CurrentlyTappedPods); err != nil { - log.Printf("[Error] failed update tapped pods %v", err) + log.Error().Err(err).Msg("failed update tapped pods.") } case tapperStatus, ok := <-tapperSyncer.TapperStatusChangedOut: if !ok { - log.Print("kubesharkTapperSyncer tapper status changed channel closed, ending listener loop") + log.Debug().Msg("kubesharkTapperSyncer tapper status changed channel closed, ending listener loop") return } if err := connector.ReportTapperStatus(tapperStatus); err != nil { - log.Printf("[Error] failed update tapper status %v", err) + log.Error().Err(err).Msg("failed update tapper status.") } case <-ctx.Done(): - log.Print("kubesharkTapperSyncer event listener loop exiting due to context done") + log.Debug().Msg("kubesharkTapperSyncer event listener loop exiting due to context done") return } } @@ -211,7 +209,7 @@ func printNoPodsFoundSuggestion(targetNamespaces []string) { if !utils.Contains(targetNamespaces, kubernetes.K8sAllNamespaces) { suggestionStr = ". You can also try selecting a different namespace with -n or tap all namespaces with -A" } - log.Printf(utils.Warning, fmt.Sprintf("Did not find any currently running pods that match the regex argument, kubeshark will automatically tap matching pods if any are created later%s", suggestionStr)) + log.Warn().Msg(fmt.Sprintf("Did not find any currently running pods that match the regex argument, kubeshark will automatically tap matching pods if any are created later%s", suggestionStr)) } func getErrorDisplayTextForK8sTapManagerError(err kubernetes.K8sTapManagerError) string { @@ -245,20 +243,24 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c switch wEvent.Type { case kubernetes.EventAdded: - log.Printf("Watching Hub pod loop, added") + log.Info().Str("pod", kubernetes.HubPodName).Msg("Added pod.") case kubernetes.EventDeleted: - log.Printf("%s removed", kubernetes.HubPodName) + log.Info().Str("pod", kubernetes.HubPodName).Msg("Removed pod.") cancel() return case kubernetes.EventModified: modifiedPod, err := wEvent.ToPod() if err != nil { - log.Printf(utils.Error, err) + log.Error().Str("pod", kubernetes.HubPodName).Err(err).Msg("While watching pod.") cancel() continue } - log.Printf("Watching Hub pod loop, modified: %v, containers statuses: %v", modifiedPod.Status.Phase, modifiedPod.Status.ContainerStatuses) + log.Debug(). + Str("pod", kubernetes.HubPodName). + Interface("phase", modifiedPod.Status.Phase). + Interface("containers-statuses", modifiedPod.Status.ContainerStatuses). + Msg("Watching pod.") if modifiedPod.Status.Phase == core.PodRunning && !isPodReady { isPodReady = true @@ -281,16 +283,24 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c continue } - log.Printf("[ERROR] Hub pod creation, watching %v namespace, error: %v", config.Config.ResourcesNamespace, err) + log.Error(). + Str("pod", kubernetes.HubPodName). + Str("namespace", config.Config.ResourcesNamespace). + Err(err). + Msg("Failed creating pod.") cancel() case <-timeAfter: if !isPodReady { - log.Printf(utils.Error, "Kubeshark Hub was not ready in time") + log.Error(). + Str("pod", kubernetes.HubPodName). + Msg("Pod was not ready in time.") cancel() } case <-ctx.Done(): - log.Printf("Watching Hub pod loop, ctx done") + log.Debug(). + Str("pod", kubernetes.HubPodName). + Msg("Watching pod, context done.") return } } @@ -314,20 +324,24 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, switch wEvent.Type { case kubernetes.EventAdded: - log.Printf("Watching Hub pod loop, added") + log.Info().Str("pod", kubernetes.FrontPodName).Msg("Added pod.") case kubernetes.EventDeleted: - log.Printf("%s removed", kubernetes.FrontPodName) + log.Info().Str("pod", kubernetes.FrontPodName).Msg("Removed pod.") cancel() return case kubernetes.EventModified: modifiedPod, err := wEvent.ToPod() if err != nil { - log.Printf(utils.Error, err) + log.Error().Str("pod", kubernetes.FrontPodName).Err(err).Msg("While watching pod.") cancel() continue } - log.Printf("Watching Hub pod loop, modified: %v, containers statuses: %v", modifiedPod.Status.Phase, modifiedPod.Status.ContainerStatuses) + log.Debug(). + Str("pod", kubernetes.FrontPodName). + Interface("phase", modifiedPod.Status.Phase). + Interface("containers-statuses", modifiedPod.Status.ContainerStatuses). + Msg("Watching pod.") if modifiedPod.Status.Phase == core.PodRunning && !isPodReady { isPodReady = true @@ -349,16 +363,24 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, continue } - log.Printf("[ERROR] Front pod creation, watching %v namespace, error: %v", config.Config.ResourcesNamespace, err) + log.Error(). + Str("pod", kubernetes.FrontPodName). + Str("namespace", config.Config.ResourcesNamespace). + Err(err). + Msg("Failed creating pod.") cancel() case <-timeAfter: if !isPodReady { - log.Printf(utils.Error, "Kubeshark Hub was not ready in time") + log.Error(). + Str("pod", kubernetes.FrontPodName). + Msg("Pod was not ready in time.") cancel() } case <-ctx.Done(): - log.Printf("Watching Front pod loop, ctx done") + log.Debug(). + Str("pod", kubernetes.FrontPodName). + Msg("Watching pod, context done.") return } } @@ -378,7 +400,10 @@ func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider event, err := wEvent.ToEvent() if err != nil { - log.Printf("[ERROR] parsing Kubeshark resource event: %+v", err) + log.Error(). + Str("pod", kubernetes.HubPodName). + Err(err). + Msg("Parsing resource event.") continue } @@ -386,19 +411,27 @@ func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider continue } - log.Printf( - "Watching Hub events loop, event %s, time: %v, resource: %s (%s), reason: %s, note: %s", - event.Name, - event.CreationTimestamp.Time, - event.Regarding.Name, - event.Regarding.Kind, - event.Reason, - event.Note, - ) + log.Debug(). + Str("pod", kubernetes.HubPodName). + Str("event", event.Name). + Time("time", event.CreationTimestamp.Time). + Str("name", event.Regarding.Name). + Str("kind", event.Regarding.Kind). + Str("reason", event.Reason). + Str("note", event.Note). + Msg("Watching events.") switch event.Reason { case "FailedScheduling", "Failed": - log.Printf(utils.Error, fmt.Sprintf("Kubeshark Hub status: %s - %s", event.Reason, event.Note)) + log.Error(). + Str("pod", kubernetes.HubPodName). + Str("event", event.Name). + Time("time", event.CreationTimestamp.Time). + Str("name", event.Regarding.Name). + Str("kind", event.Regarding.Kind). + Str("reason", event.Reason). + Str("note", event.Note). + Msg("Watching events.") cancel() } @@ -408,9 +441,15 @@ func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider continue } - log.Printf("[Error] Watching Hub events loop, error: %+v", err) + log.Error(). + Str("pod", kubernetes.HubPodName). + Err(err). + Msg("While watching events.") + case <-ctx.Done(): - log.Printf("Watching Hub events loop, ctx done") + log.Debug(). + Str("pod", kubernetes.HubPodName). + Msg("Watching pod events, context done.") return } } @@ -420,19 +459,19 @@ func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, kubernetes.HubServiceName, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, "/echo") if err := startTapperSyncer(ctx, cancel, kubernetesProvider, state.targetNamespaces, state.startTime); err != nil { - log.Printf(utils.Error, fmt.Sprintf("Error starting kubeshark tapper syncer: %v", errormessage.FormatError(err))) + log.Error().Err(errormessage.FormatError(err)).Msg("Error starting kubeshark tapper syncer") cancel() } url := kubernetes.GetLocalhostOnPort(config.Config.Hub.PortForward.SrcPort) - log.Printf("Hub is available at %s", url) + log.Info().Msg(fmt.Sprintf("Hub is available at %s", url)) } func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) { startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, kubernetes.FrontServiceName, config.Config.Front.PortForward.SrcPort, config.Config.Front.PortForward.DstPort, "") url := kubernetes.GetLocalhostOnPort(config.Config.Front.PortForward.SrcPort) - log.Printf("Kubeshark is available at %s", url) + log.Info().Msg(fmt.Sprintf("Kubeshark is available at %s", url)) if !config.Config.HeadlessMode { utils.OpenBrowser(url) } @@ -446,7 +485,7 @@ func getNamespaces(kubernetesProvider *kubernetes.Provider) []string { } else { currentNamespace, err := kubernetesProvider.CurrentNamespace() if err != nil { - log.Fatalf(utils.Red, fmt.Sprintf("error getting current namespace: %+v", err)) + log.Fatal().Err(err).Msg("Error getting current namespace!") } return []string{currentNamespace} } diff --git a/cmd/version.go b/cmd/version.go index 05aacd8ff..48f3cb817 100644 --- a/cmd/version.go +++ b/cmd/version.go @@ -1,15 +1,13 @@ package cmd import ( - "log" "strconv" "time" - "github.com/kubeshark/kubeshark/config" - "github.com/kubeshark/kubeshark/config/configStructs" - "github.com/creasty/defaults" + "github.com/kubeshark/kubeshark/config/configStructs" "github.com/kubeshark/kubeshark/kubeshark" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -17,14 +15,12 @@ var versionCmd = &cobra.Command{ Use: "version", Short: "Print version info", RunE: func(cmd *cobra.Command, args []string) error { - if config.Config.Version.DebugInfo { - timeStampInt, _ := strconv.ParseInt(kubeshark.BuildTimestamp, 10, 0) - log.Printf("Version: %s \nBranch: %s (%s)", kubeshark.Ver, kubeshark.Branch, kubeshark.GitCommitHash) - log.Printf("Build Time: %s (%s)", kubeshark.BuildTimestamp, time.Unix(timeStampInt, 0)) - - } else { - log.Printf("Version: %s (%s)", kubeshark.Ver, kubeshark.Branch) - } + timeStampInt, _ := strconv.ParseInt(kubeshark.BuildTimestamp, 10, 0) + log.Info(). + Str("version", kubeshark.Ver). + Str("branch", kubeshark.Branch). + Str("commit-hash", kubeshark.GitCommitHash). + Time("build-time", time.Unix(timeStampInt, 0)) return nil }, } diff --git a/cmd/view.go b/cmd/view.go index 3eedfea27..cab92f77c 100644 --- a/cmd/view.go +++ b/cmd/view.go @@ -1,10 +1,9 @@ package cmd import ( - "log" - "github.com/creasty/defaults" "github.com/kubeshark/kubeshark/config/configStructs" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -22,13 +21,13 @@ func init() { defaultViewConfig := configStructs.ViewConfig{} if err := defaults.Set(&defaultViewConfig); err != nil { - log.Print(err) + log.Error().Err(err) } viewCmd.Flags().Uint16P(configStructs.GuiPortViewName, "p", defaultViewConfig.GuiPort, "Provide a custom port for the web interface webserver") viewCmd.Flags().StringP(configStructs.UrlViewName, "u", defaultViewConfig.Url, "Provide a custom host") if err := viewCmd.Flags().MarkHidden(configStructs.UrlViewName); err != nil { - log.Print(err) + log.Error().Err(err) } } diff --git a/cmd/viewRunner.go b/cmd/viewRunner.go index 12645ffcd..117910f55 100644 --- a/cmd/viewRunner.go +++ b/cmd/viewRunner.go @@ -3,14 +3,13 @@ package cmd import ( "context" "fmt" - "log" "net/http" - "github.com/kubeshark/kubeshark/internal/connect" - "github.com/kubeshark/kubeshark/utils" - "github.com/kubeshark/kubeshark/config" + "github.com/kubeshark/kubeshark/internal/connect" "github.com/kubeshark/kubeshark/kubernetes" + "github.com/kubeshark/kubeshark/utils" + "github.com/rs/zerolog/log" ) func runKubesharkView() { @@ -27,12 +26,19 @@ func runKubesharkView() { if url == "" { exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.ResourcesNamespace, kubernetes.HubServiceName) if err != nil { - log.Printf("Failed to found kubeshark service %v", err) + log.Error(). + Str("name", "kubeshark"). + Err(err). + Msg("Failed to found service!") cancel() return } if !exists { log.Printf("%s service not found, you should run `kubeshark tap` command first", kubernetes.HubServiceName) + log.Error(). + Str("name", kubernetes.HubServiceName). + Str("tap-command", "kubeshark tap"). + Msg("Service not found! You should run the tap command first:") cancel() return } @@ -41,20 +47,23 @@ func runKubesharkView() { response, err := http.Get(fmt.Sprintf("%s/", url)) if err == nil && response.StatusCode == 200 { - log.Printf("Found a running service %s and open port %d", kubernetes.HubServiceName, config.Config.Front.PortForward.SrcPort) + log.Info(). + Str("name", kubernetes.HubServiceName). + Int("port", int(config.Config.Front.PortForward.SrcPort)). + Msg("Found a running service.") return } - log.Printf("Establishing connection to k8s cluster...") + log.Info().Msg("Establishing connection to k8s cluster...") startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, kubernetes.FrontServiceName, config.Config.Front.PortForward.SrcPort, config.Config.Front.PortForward.DstPort, "") } connector := connect.NewConnector(url, connect.DefaultRetries, connect.DefaultTimeout) if err := connector.TestConnection(""); err != nil { - log.Printf(utils.Error, "Couldn't connect to Hub.") + log.Error().Msg(fmt.Sprintf(utils.Error, "Couldn't connect to Hub.")) return } - log.Printf("Kubeshark is available at %s", url) + log.Info().Msg(fmt.Sprintf("Kubeshark is available at %s", url)) if !config.Config.HeadlessMode { utils.OpenBrowser(url) diff --git a/config/config.go b/config/config.go index 1ea36282b..c271b5127 100644 --- a/config/config.go +++ b/config/config.go @@ -4,7 +4,6 @@ import ( "errors" "fmt" "io" - "log" "os" "reflect" "strconv" @@ -12,6 +11,8 @@ import ( "github.com/creasty/defaults" "github.com/kubeshark/kubeshark/utils" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/spf13/pflag" "gopkg.in/yaml.v3" @@ -22,6 +23,7 @@ const ( SetCommandName = "set" FieldNameTag = "yaml" ReadonlyTag = "readonly" + DebugFlag = "debug" ) var ( @@ -30,6 +32,15 @@ var ( ) func InitConfig(cmd *cobra.Command) error { + debugMode, err := cmd.Flags().GetBool(DebugFlag) + if err != nil { + log.Error().Err(err).Msg(fmt.Sprintf("Can't recieve '%s' flag", DebugFlag)) + } + + if debugMode { + zerolog.SetGlobalLevel(zerolog.DebugLevel) + } + Config.Hub = HubConfig{ PortForward{ 8898, @@ -64,8 +75,7 @@ func InitConfig(cmd *cobra.Command) error { return fmt.Errorf("config validation failed, err: %v", err) } - finalConfigPrettified, _ := utils.PrettyJson(Config) - log.Printf("Init config finished\n Final config: %v", finalConfigPrettified) + log.Debug().Interface("config", Config).Msg("Init config is finished.") return nil } @@ -111,7 +121,7 @@ func loadConfigFile(configFilePath string, config *ConfigStruct) error { return err } - log.Printf("Found config file, config path: %s", configFilePath) + log.Info().Str("path", configFilePath).Msg("Found config file!") return nil } @@ -129,20 +139,20 @@ func initFlag(f *pflag.Flag) { sliceValue, isSliceValue := f.Value.(pflag.SliceValue) if !isSliceValue { if err := mergeFlagValue(configElemValue, flagPath, strings.Join(flagPath, "."), f.Value.String()); err != nil { - log.Printf(utils.Warning, err) + log.Warn().Err(err) } return } if f.Name == SetCommandName { if err := mergeSetFlag(configElemValue, sliceValue.GetSlice()); err != nil { - log.Printf(utils.Warning, err) + log.Warn().Err(err) } return } if err := mergeFlagValues(configElemValue, flagPath, strings.Join(flagPath, "."), sliceValue.GetSlice()); err != nil { - log.Printf(utils.Warning, err) + log.Warn().Err(err) } } diff --git a/config/configStruct.go b/config/configStruct.go index 2923d90c3..aff3e1dfc 100644 --- a/config/configStruct.go +++ b/config/configStruct.go @@ -9,7 +9,8 @@ import ( "github.com/kubeshark/kubeshark/config/configStructs" "github.com/kubeshark/kubeshark/kubeshark" "github.com/kubeshark/worker/models" - "github.com/op/go-logging" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" v1 "k8s.io/api/core/v1" "k8s.io/client-go/util/homedir" ) @@ -69,13 +70,13 @@ type ConfigStruct struct { KubeContext string `yaml:"kube-context"` ConfigFilePath string `yaml:"config-path,omitempty" readonly:""` HeadlessMode bool `yaml:"headless" default:"false"` - LogLevelStr string `yaml:"log-level,omitempty" default:"INFO" readonly:""` + LogLevelStr string `yaml:"log-level,omitempty" default:"info" readonly:""` ServiceMap bool `yaml:"service-map" default:"true"` OAS models.OASConfig `yaml:"oas"` } func (config *ConfigStruct) validate() error { - if _, err := logging.LogLevel(config.LogLevelStr); err != nil { + if _, err := zerolog.ParseLevel(config.LogLevelStr); err != nil { return fmt.Errorf("%s is not a valid log level, err: %v", config.LogLevelStr, err) } @@ -108,7 +109,10 @@ func (config *ConfigStruct) KubeConfigPath() string { return filepath.Join(home, ".kube", "config") } -func (config *ConfigStruct) LogLevel() logging.Level { - logLevel, _ := logging.LogLevel(config.LogLevelStr) +func (config *ConfigStruct) LogLevel() zerolog.Level { + logLevel, err := zerolog.ParseLevel(config.LogLevelStr) + if err != nil { + log.Error().Err(err).Str("log-level", config.LogLevelStr).Msg("Invalid log level") + } return logLevel } diff --git a/config/configStructs/tapConfig.go b/config/configStructs/tapConfig.go index fd0a2a7fd..0e9fd6d16 100644 --- a/config/configStructs/tapConfig.go +++ b/config/configStructs/tapConfig.go @@ -3,13 +3,13 @@ package configStructs import ( "fmt" "io/fs" - "log" "os" "regexp" "strings" "github.com/kubeshark/kubeshark/utils" "github.com/kubeshark/worker/models" + "github.com/rs/zerolog/log" ) const ( @@ -69,7 +69,7 @@ func (config *TapConfig) GetInsertionFilter() string { if _, err := os.Stat(insertionFilter); err == nil { b, err := os.ReadFile(insertionFilter) if err != nil { - log.Printf(utils.Warning, fmt.Sprintf("Couldn't read the file on path: %s, err: %v", insertionFilter, err)) + log.Warn().Err(err).Str("insertion-filter-path", insertionFilter).Msg("Couldn't read the file! Defaulting to string.") } else { insertionFilter = string(b) } @@ -78,6 +78,7 @@ func (config *TapConfig) GetInsertionFilter() string { redactFilter := getRedactFilter(config) if insertionFilter != "" && redactFilter != "" { + log.Info().Str("filter", insertionFilter).Msg("Using insertion filter:") return fmt.Sprintf("(%s) and (%s)", insertionFilter, redactFilter) } else if insertionFilter == "" && redactFilter != "" { return redactFilter diff --git a/go.mod b/go.mod index 21ad5795d..1e3b9be87 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/docker/go-units v0.4.0 github.com/google/go-github/v37 v37.0.0 github.com/kubeshark/worker v0.1.4 - github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 + github.com/rs/zerolog v1.28.0 github.com/spf13/cobra v1.3.0 github.com/spf13/pflag v1.0.5 gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b @@ -56,6 +56,8 @@ require ( github.com/json-iterator/go v1.1.12 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect @@ -63,6 +65,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/op/go-logging v0.0.0-20160315200505-970db520ece7 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -74,7 +77,7 @@ require ( golang.org/x/crypto v0.0.0-20220208050332-20e1d8d225ab // indirect golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.0.0-20220207234003-57398862261d // indirect + golang.org/x/sys v0.2.0 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect diff --git a/go.sum b/go.sum index 369bfbb1d..5f94f8a6b 100644 --- a/go.sum +++ b/go.sum @@ -136,6 +136,7 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -442,12 +443,16 @@ github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -554,6 +559,9 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.28.0 h1:MirSo27VyNi7RJYP3078AA1+Cyzd2GB66qy3aUHvsWY= +github.com/rs/zerolog v1.28.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= @@ -891,8 +899,10 @@ golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220207234003-57398862261d h1:Bm7BNOQt2Qv7ZqysjeLjgCBanX+88Z/OtdvsrEv1Djc= golang.org/x/sys v0.0.0-20220207234003-57398862261d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= diff --git a/internal/connect/hub.go b/internal/connect/hub.go index b52515090..1b8be6dec 100644 --- a/internal/connect/hub.go +++ b/internal/connect/hub.go @@ -4,7 +4,6 @@ import ( "bytes" "encoding/json" "fmt" - "log" "net/http" "time" @@ -12,6 +11,7 @@ import ( "github.com/kubeshark/worker/models" "github.com/kubeshark/kubeshark/config" + "github.com/rs/zerolog/log" core "k8s.io/api/core/v1" ) @@ -38,9 +38,9 @@ func (connector *Connector) TestConnection(path string) error { retriesLeft := connector.retries for retriesLeft > 0 { if isReachable, err := connector.isReachable(path); err != nil || !isReachable { - log.Printf("Hub is not ready yet %v!", err) + log.Debug().Err(err).Msg("Hub is not ready yet!") } else { - log.Printf("Connection test to Hub passed successfully!") + log.Debug().Msg("Connection test to Hub passed successfully.") break } retriesLeft -= 1 @@ -71,7 +71,7 @@ func (connector *Connector) ReportTapperStatus(tapperStatus models.TapperStatus) if _, err := utils.Post(tapperStatusUrl, "application/json", bytes.NewBuffer(jsonValue), connector.client); err != nil { return fmt.Errorf("Failed sending to Hub the tapped pods %w", err) } else { - log.Printf("Reported to Hub about tapper status: %v", tapperStatus) + log.Debug().Interface("tapper-status", tapperStatus).Msg("Reported to Hub about tapper status:") return nil } } @@ -86,7 +86,7 @@ func (connector *Connector) ReportTappedPods(pods []core.Pod) error { if _, err := utils.Post(tappedPodsUrl, "application/json", bytes.NewBuffer(jsonValue), connector.client); err != nil { return fmt.Errorf("Failed sending to Hub the tapped pods %w", err) } else { - log.Printf("Reported to Hub about %d taped pods successfully", len(pods)) + log.Debug().Int("pod-count", len(pods)).Msg("Reported to Hub about tapped pod count:") return nil } } diff --git a/kubernetes/kubesharkTapperSyncer.go b/kubernetes/kubesharkTapperSyncer.go index 4ecebf057..8f31e2c7e 100644 --- a/kubernetes/kubesharkTapperSyncer.go +++ b/kubernetes/kubesharkTapperSyncer.go @@ -3,7 +3,6 @@ package kubernetes import ( "context" "fmt" - "log" "regexp" "time" @@ -11,7 +10,8 @@ import ( "github.com/kubeshark/kubeshark/utils" "github.com/kubeshark/worker/api" "github.com/kubeshark/worker/models" - "github.com/op/go-logging" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" core "k8s.io/api/core/v1" ) @@ -42,7 +42,7 @@ type TapperSyncerConfig struct { KubesharkResourcesNamespace string TapperResources models.Resources ImagePullPolicy core.PullPolicy - LogLevel logging.Level + LogLevel zerolog.Level KubesharkApiFilteringOptions api.TrafficFilteringOptions KubesharkServiceAccountExists bool ServiceMesh bool @@ -91,11 +91,15 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperPods() { pod, err := wEvent.ToPod() if err != nil { - log.Printf("[ERROR] parsing Kubeshark resource pod: %+v", err) + log.Error().Str("pod", TapperPodName).Err(err).Msg("While parsing Kubeshark resource!") continue } - log.Printf("Watching tapper pods loop, tapper: %v, node: %v, status: %v", pod.Name, pod.Spec.NodeName, pod.Status.Phase) + log.Debug(). + Str("pod", pod.Name). + Str("node", pod.Spec.NodeName). + Interface("phase", pod.Status.Phase). + Msg("Watching pod events...") if pod.Spec.NodeName != "" { tapperStatus := models.TapperStatus{TapperName: pod.Name, NodeName: pod.Spec.NodeName, Status: string(pod.Status.Phase)} tapperSyncer.TapperStatusChangedOut <- tapperStatus @@ -106,10 +110,12 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperPods() { errorChan = nil continue } - log.Printf("[ERROR] Watching tapper pods loop, error: %+v", err) + log.Error().Str("pod", TapperPodName).Err(err).Msg("While watching pod!") case <-tapperSyncer.context.Done(): - log.Printf("Watching tapper pods loop, ctx done") + log.Debug(). + Str("pod", TapperPodName). + Msg("Watching pod, context done.") return } } @@ -130,23 +136,26 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperEvents() { event, err := wEvent.ToEvent() if err != nil { - log.Printf("[ERROR] parsing Kubeshark resource event: %+v", err) + log.Error(). + Str("pod", TapperPodName). + Err(err). + Msg("Parsing resource event.") continue } - log.Printf( - "Watching tapper events loop, event %s, time: %v, resource: %s (%s), reason: %s, note: %s", - event.Name, - event.CreationTimestamp.Time, - event.Regarding.Name, - event.Regarding.Kind, - event.Reason, - event.Note, - ) + log.Debug(). + Str("pod", TapperPodName). + Str("event", event.Name). + Time("time", event.CreationTimestamp.Time). + Str("name", event.Regarding.Name). + Str("kind", event.Regarding.Kind). + Str("reason", event.Reason). + Str("note", event.Note). + Msg("Watching events.") pod, err1 := tapperSyncer.kubernetesProvider.GetPod(tapperSyncer.context, tapperSyncer.config.KubesharkResourcesNamespace, event.Regarding.Name) if err1 != nil { - log.Printf("Couldn't get tapper pod %s", event.Regarding.Name) + log.Error().Str("name", event.Regarding.Name).Msg("Couldn't get pod") continue } @@ -166,10 +175,15 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperEvents() { continue } - log.Printf("[ERROR] Watching tapper events loop, error: %+v", err) + log.Error(). + Str("pod", TapperPodName). + Err(err). + Msg("While watching events.") case <-tapperSyncer.context.Done(): - log.Printf("Watching tapper events loop, ctx done") + log.Debug(). + Str("pod", TapperPodName). + Msg("Watching pod events, context done.") return } } @@ -189,7 +203,7 @@ func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() { } if !changeFound { - log.Printf("Nothing changed update tappers not needed") + log.Debug().Msg("Nothing changed. Updating tappers is not needed.") return } if err := tapperSyncer.updateKubesharkTappers(); err != nil { @@ -217,17 +231,37 @@ func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() { switch wEvent.Type { case EventAdded: - log.Printf("Added matching pod %s, ns: %s", pod.Name, pod.Namespace) + log.Debug(). + Str("pod", pod.Name). + Str("namespace", pod.Namespace). + Msg("Added matching pod.") if err := restartTappersDebouncer.SetOn(); err != nil { - log.Print(err) + log.Error(). + Str("pod", pod.Name). + Str("namespace", pod.Namespace). + Err(err). + Msg("While restarting tappers!") } case EventDeleted: - log.Printf("Removed matching pod %s, ns: %s", pod.Name, pod.Namespace) + log.Debug(). + Str("pod", pod.Name). + Str("namespace", pod.Namespace). + Msg("Removed matching pod.") if err := restartTappersDebouncer.SetOn(); err != nil { - log.Print(err) + log.Error(). + Str("pod", pod.Name). + Str("namespace", pod.Namespace). + Err(err). + Msg("While restarting tappers!") } case EventModified: - log.Printf("Modified matching pod %s, ns: %s, phase: %s, ip: %s", pod.Name, pod.Namespace, pod.Status.Phase, pod.Status.PodIP) + log.Debug(). + Str("pod", pod.Name). + Str("namespace", pod.Namespace). + Str("ip", pod.Status.PodIP). + Interface("phase", pod.Status.Phase). + Msg("Modified matching pod.") + // Act only if the modified pod has already obtained an IP address. // After filtering for IPs, on a normal pod restart this includes the following events: // - Pod deletion @@ -236,7 +270,11 @@ func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() { // Ready/unready transitions might also trigger this event. if pod.Status.PodIP != "" { if err := restartTappersDebouncer.SetOn(); err != nil { - log.Print(err) + log.Error(). + Str("pod", pod.Name). + Str("namespace", pod.Namespace). + Err(err). + Msg("While restarting tappers!") } } case EventBookmark: @@ -254,7 +292,7 @@ func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() { continue case <-tapperSyncer.context.Done(): - log.Printf("Watching pods loop, context done, stopping `restart tappers debouncer`") + log.Debug().Msg("Watching pods, context done. Stopping \"restart tappers debouncer\"") restartTappersDebouncer.Cancel() // TODO: Does this also perform cleanup? return @@ -263,7 +301,7 @@ func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() { } func (tapperSyncer *KubesharkTapperSyncer) handleErrorInWatchLoop(err error, restartTappersDebouncer *debounce.Debouncer) { - log.Printf("Watching pods loop, got error %v, stopping `restart tappers debouncer`", err) + log.Error().Err(err).Msg("While watching pods, got an error! Stopping \"restart tappers debouncer\"") restartTappersDebouncer.Cancel() tapperSyncer.ErrorOut <- K8sTapManagerError{ OriginalError: err, @@ -278,10 +316,10 @@ func (tapperSyncer *KubesharkTapperSyncer) updateCurrentlyTappedPods() (err erro podsToTap := excludeKubesharkPods(matchingPods) addedPods, removedPods := getPodArrayDiff(tapperSyncer.CurrentlyTappedPods, podsToTap) for _, addedPod := range addedPods { - log.Printf("tapping new pod %s", addedPod.Name) + log.Info().Str("pod", addedPod.Name).Msg("Tapping new pod.") } for _, removedPod := range removedPods { - log.Printf("pod %s is no longer running, tapping for it stopped", removedPod.Name) + log.Info().Str("pod", removedPod.Name).Msg("Pod is no longer running. Tapping is stopped.") } if len(addedPods) > 0 || len(removedPods) > 0 { tapperSyncer.CurrentlyTappedPods = podsToTap @@ -305,11 +343,11 @@ func (tapperSyncer *KubesharkTapperSyncer) updateKubesharkTappers() error { } if utils.EqualStringSlices(nodesToTap, tapperSyncer.tappedNodes) { - log.Print("Skipping apply, DaemonSet is up to date") + log.Debug().Msg("Skipping apply, DaemonSet is up to date") return nil } - log.Printf("Updating DaemonSet to run on nodes: %v", nodesToTap) + log.Debug().Strs("nodes", nodesToTap).Msg("Updating DaemonSet to run on nodes.") image := "kubeshark/worker:latest" @@ -345,7 +383,7 @@ func (tapperSyncer *KubesharkTapperSyncer) updateKubesharkTappers() error { return err } - log.Printf("Successfully created %v tappers", len(tapperSyncer.nodeToTappedPodMap)) + log.Debug().Int("tapper-count", len(tapperSyncer.nodeToTappedPodMap)).Msg("Successfully created tappers.") } else { if err := tapperSyncer.kubernetesProvider.ResetKubesharkTapperDaemonSet( tapperSyncer.context, @@ -356,7 +394,7 @@ func (tapperSyncer *KubesharkTapperSyncer) updateKubesharkTappers() error { return err } - log.Printf("Successfully reset tapper daemon set") + log.Debug().Msg("Successfully reset tapper daemon set") } tapperSyncer.tappedNodes = nodesToTap diff --git a/kubernetes/provider.go b/kubernetes/provider.go index 7031312b8..787570910 100644 --- a/kubernetes/provider.go +++ b/kubernetes/provider.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" "io" - "log" "net/url" "path/filepath" "regexp" @@ -17,7 +16,8 @@ import ( "github.com/kubeshark/kubeshark/utils" "github.com/kubeshark/worker/api" "github.com/kubeshark/worker/models" - "github.com/op/go-logging" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" auth "k8s.io/api/authorization/v1" core "k8s.io/api/core/v1" rbac "k8s.io/api/rbac/v1" @@ -78,7 +78,11 @@ func NewProvider(kubeConfigPath string, contextName string) (*Provider, error) { "you can set alternative kube config file path by adding the kube-config-path field to the kubeshark config file, err: %w", kubeConfigPath, err) } - log.Printf("K8s client config, host: %s, api path: %s, user agent: %s", restClientConfig.Host, restClientConfig.APIPath, restClientConfig.UserAgent) + log.Debug(). + Str("host", restClientConfig.Host). + Str("api-path", restClientConfig.APIPath). + Str("user-agent", restClientConfig.UserAgent). + Msg("K8s client config.") return &Provider{ clientSet: clientSet, @@ -181,7 +185,7 @@ type HubOptions struct { MaxEntriesDBSizeBytes int64 Resources models.Resources ImagePullPolicy core.PullPolicy - LogLevel logging.Level + LogLevel zerolog.Level Profiler bool } @@ -806,8 +810,14 @@ func (provider *Provider) CreateConfigMap(ctx context.Context, namespace string, return nil } -func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, hubPodIp string, nodeNames []string, serviceAccountName string, resources models.Resources, imagePullPolicy core.PullPolicy, kubesharkApiFilteringOptions api.TrafficFilteringOptions, logLevel logging.Level, serviceMesh bool, tls bool, maxLiveStreams int) error { - log.Printf("Applying %d tapper daemon sets, ns: %s, daemonSetName: %s, podImage: %s, tapperPodName: %s", len(nodeNames), namespace, daemonSetName, podImage, tapperPodName) +func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, hubPodIp string, nodeNames []string, serviceAccountName string, resources models.Resources, imagePullPolicy core.PullPolicy, kubesharkApiFilteringOptions api.TrafficFilteringOptions, logLevel zerolog.Level, serviceMesh bool, tls bool, maxLiveStreams int) error { + log.Debug(). + Int("node-count", len(nodeNames)). + Str("namespace", namespace). + Str("daemonset-name", daemonSetName). + Str("image", podImage). + Str("pod-name", tapperPodName). + Msg("Applying tapper DaemonSets.") if len(nodeNames) == 0 { return fmt.Errorf("daemon set %s must tap at least 1 pod", daemonSetName) @@ -1159,7 +1169,7 @@ func (provider *Provider) ListManagedRoleBindings(ctx context.Context, namespace func (provider *Provider) ValidateNotProxy() error { kubernetesUrl, err := url.Parse(provider.clientConfig.Host) if err != nil { - log.Printf("ValidateNotProxy - error while parsing kubernetes host, err: %v", err) + log.Debug().Err(err).Msg("While parsing Kubernetes host!") return nil } @@ -1184,7 +1194,7 @@ func (provider *Provider) ValidateNotProxy() error { func (provider *Provider) GetKubernetesVersion() (*semver.SemVersion, error) { serverVersion, err := provider.clientSet.ServerVersion() if err != nil { - log.Printf("error while getting kubernetes server version, err: %v", err) + log.Debug().Err(err).Msg("While getting Kubernetes server version!") return nil, err } @@ -1211,7 +1221,7 @@ func ValidateKubernetesVersion(serverVersionSemVer *semver.SemVersion) error { } func loadKubernetesConfiguration(kubeConfigPath string, context string) clientcmd.ClientConfig { - log.Printf("Using kube config %s", kubeConfigPath) + log.Info().Str("path", kubeConfigPath).Msg("Using kubeconfig:") configPathList := filepath.SplitList(kubeConfigPath) configLoadingRules := &clientcmd.ClientConfigLoadingRules{} if len(configPathList) <= 1 { diff --git a/kubernetes/proxy.go b/kubernetes/proxy.go index 322182a2d..3a9eb902d 100644 --- a/kubernetes/proxy.go +++ b/kubernetes/proxy.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "log" "net" "net/http" "net/url" @@ -12,10 +11,10 @@ import ( "strings" "time" + "github.com/rs/zerolog/log" "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/client-go/tools/portforward" "k8s.io/client-go/transport/spdy" - "k8s.io/kubectl/pkg/proxy" ) @@ -23,7 +22,13 @@ const k8sProxyApiPrefix = "/" const kubesharkServicePort = 80 func StartProxy(kubernetesProvider *Provider, proxyHost string, srcPort uint16, dstPort uint16, kubesharkNamespace string, kubesharkServiceName string, cancel context.CancelFunc) (*http.Server, error) { - log.Printf("Starting proxy - namespace: [%v], service name: [%s], port: [%d:%d]\n", kubesharkNamespace, kubesharkServiceName, srcPort, dstPort) + log.Info(). + Str("namespace", kubesharkNamespace). + Str("service-name", kubesharkServiceName). + Int("src-port", int(srcPort)). + Int("dst-port", int(dstPort)). + Msg("Starting proxy...") + filter := &proxy.FilterServer{ AcceptPaths: proxy.MakeRegexpArrayOrDie(proxy.DefaultPathAcceptRE), RejectPaths: proxy.MakeRegexpArrayOrDie(proxy.DefaultPathRejectRE), @@ -50,7 +55,7 @@ func StartProxy(kubernetesProvider *Provider, proxyHost string, srcPort uint16, go func() { if err := server.Serve(l); err != nil && err != http.ErrServerClosed { - log.Printf("Error creating proxy, %v", err) + log.Error().Err(err).Msg("While creating proxy!") cancel() } }() @@ -105,7 +110,12 @@ func NewPortForward(kubernetesProvider *Provider, namespace string, podRegex *re podName := pods[0].Name - log.Printf("Starting proxy using port-forward method. namespace: [%v], pod name: [%s], %d:%d", namespace, podName, srcPort, dstPort) + log.Info(). + Str("namespace", namespace). + Str("pod-name", podName). + Int("src-port", int(srcPort)). + Int("dst-port", int(dstPort)). + Msg("Starting proxy using port-forward method...") dialer, err := getHttpDialer(kubernetesProvider, namespace, podName) if err != nil { @@ -122,7 +132,7 @@ func NewPortForward(kubernetesProvider *Provider, namespace string, podRegex *re go func() { if err = forwarder.ForwardPorts(); err != nil { - log.Printf("kubernetes port-forwarding error: %v", err) + log.Error().Err(err).Msg("While Kubernetes port-forwarding!") cancel() } }() @@ -133,7 +143,7 @@ func NewPortForward(kubernetesProvider *Provider, namespace string, podRegex *re func getHttpDialer(kubernetesProvider *Provider, namespace string, podName string) (httpstream.Dialer, error) { roundTripper, upgrader, err := spdy.RoundTripperFor(&kubernetesProvider.clientConfig) if err != nil { - log.Printf("Error creating http dialer") + log.Error().Err(err).Msg("While creating HTTP dialer!") return nil, err } @@ -144,7 +154,9 @@ func getHttpDialer(kubernetesProvider *Provider, namespace string, podName strin path := fmt.Sprintf("%s/api/v1/namespaces/%s/pods/%s/portforward", clientConfigHostUrl.Path, namespace, podName) serverURL := url.URL{Scheme: "https", Path: path, Host: clientConfigHostUrl.Host} - log.Printf("Http dialer url %v", serverURL) + log.Debug(). + Str("url", serverURL.String()). + Msg("HTTP dialer URL:") return spdy.NewDialer(upgrader, &http.Client{Transport: roundTripper}, http.MethodPost, &serverURL), nil } diff --git a/kubernetes/watch.go b/kubernetes/watch.go index f7c83a663..91bfe4974 100644 --- a/kubernetes/watch.go +++ b/kubernetes/watch.go @@ -4,12 +4,11 @@ import ( "context" "errors" "fmt" - "log" "sync" "time" "github.com/kubeshark/kubeshark/debounce" - + "github.com/rs/zerolog/log" "k8s.io/apimachinery/pkg/watch" ) @@ -57,13 +56,13 @@ func FilteredWatch(ctx context.Context, watcherCreator WatchCreator, targetNames } else { if !watchRestartDebouncer.IsOn() { if err := watchRestartDebouncer.SetOn(); err != nil { - log.Print(err) + log.Error().Err(err) } - log.Print("k8s watch channel closed, restarting watcher") + log.Warn().Msg("K8s watch channel closed, restarting watcher...") time.Sleep(time.Second * 5) continue } else { - errorChan <- errors.New("k8s watch unstable, closes frequently") + errorChan <- errors.New("K8s watch unstable, closes frequently") break } } diff --git a/kubeshark.go b/kubeshark.go index e9c221519..fa0b0b3ab 100644 --- a/kubeshark.go +++ b/kubeshark.go @@ -1,10 +1,17 @@ package main import ( + "os" + "time" + "github.com/kubeshark/kubeshark/cmd" "github.com/kubeshark/kubeshark/cmd/goUtils" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" ) func main() { + zerolog.SetGlobalLevel(zerolog.InfoLevel) + log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: time.RFC3339}) goUtils.HandleExcWrapper(cmd.Execute) } diff --git a/kubeshark/fsUtils/kubesharkLogsUtils.go b/kubeshark/fsUtils/kubesharkLogsUtils.go index 1d10b3056..e5a2c0bec 100644 --- a/kubeshark/fsUtils/kubesharkLogsUtils.go +++ b/kubeshark/fsUtils/kubesharkLogsUtils.go @@ -4,12 +4,12 @@ import ( "archive/zip" "context" "fmt" - "log" "os" "regexp" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/kubernetes" + "github.com/rs/zerolog/log" ) func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath string) error { @@ -35,39 +35,49 @@ func DumpLogs(ctx context.Context, provider *kubernetes.Provider, filePath strin for _, container := range pod.Spec.Containers { logs, err := provider.GetPodLogs(ctx, pod.Namespace, pod.Name, container.Name) if err != nil { - log.Printf("Failed to get logs, %v", err) + log.Error().Err(err).Msg("Failed to get logs!") continue } else { - log.Printf("Successfully read log length %d for pod: %s.%s.%s", len(logs), pod.Namespace, pod.Name, container.Name) + log.Debug(). + Int("length", len(logs)). + Str("namespace", pod.Namespace). + Str("pod", pod.Name). + Str("container", container.Name). + Msg("Successfully read log length.") } if err := AddStrToZip(zipWriter, logs, fmt.Sprintf("%s.%s.%s.log", pod.Namespace, pod.Name, container.Name)); err != nil { - log.Printf("Failed write logs, %v", err) + log.Error().Err(err).Msg("Failed write logs!") } else { - log.Printf("Successfully added log length %d from pod: %s.%s.%s", len(logs), pod.Namespace, pod.Name, container.Name) + log.Debug(). + Int("length", len(logs)). + Str("namespace", pod.Namespace). + Str("pod", pod.Name). + Str("container", container.Name). + Msg("Successfully added log length.") } } } events, err := provider.GetNamespaceEvents(ctx, config.Config.ResourcesNamespace) if err != nil { - log.Printf("Failed to get k8b events, %v", err) + log.Error().Err(err).Msg("Failed to get k8b events!") } else { - log.Printf("Successfully read events for k8b namespace: %s", config.Config.ResourcesNamespace) + log.Debug().Str("namespace", config.Config.ResourcesNamespace).Msg("Successfully read events.") } if err := AddStrToZip(zipWriter, events, fmt.Sprintf("%s_events.log", config.Config.ResourcesNamespace)); err != nil { - log.Printf("Failed write logs, %v", err) + log.Error().Err(err).Msg("Failed write logs!") } else { - log.Printf("Successfully added events for k8b namespace: %s", config.Config.ResourcesNamespace) + log.Debug().Str("namespace", config.Config.ResourcesNamespace).Msg("Successfully added events.") } if err := AddFileToZip(zipWriter, config.Config.ConfigFilePath); err != nil { - log.Printf("Failed write file, %v", err) + log.Error().Err(err).Msg("Failed write file!") } else { - log.Printf("Successfully added file %s", config.Config.ConfigFilePath) + log.Debug().Str("file-path", config.Config.ConfigFilePath).Msg("Successfully added file.") } - log.Printf("You can find the zip file with all logs in %s", filePath) + log.Info().Str("path", filePath).Msg("You can find the ZIP file with all logs at:") return nil } diff --git a/kubeshark/fsUtils/zipUtils.go b/kubeshark/fsUtils/zipUtils.go index 296e895ad..4e671895f 100644 --- a/kubeshark/fsUtils/zipUtils.go +++ b/kubeshark/fsUtils/zipUtils.go @@ -4,10 +4,11 @@ import ( "archive/zip" "fmt" "io" - "log" "os" "path/filepath" "strings" + + "github.com/rs/zerolog/log" ) func AddFileToZip(zipWriter *zip.Writer, filename string) error { @@ -83,7 +84,7 @@ func Unzip(reader *zip.Reader, dest string) error { _ = os.MkdirAll(path, f.Mode()) } else { _ = os.MkdirAll(filepath.Dir(path), f.Mode()) - log.Printf("writing HAR file [ %v ]", path) + log.Info().Str("path", path).Msg("Writing HAR file...") f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) if err != nil { return err @@ -92,7 +93,7 @@ func Unzip(reader *zip.Reader, dest string) error { if err := f.Close(); err != nil { panic(err) } - log.Print(" done") + log.Info().Str("path", path).Msg("HAR file at:") }() _, err = io.Copy(f, rc) diff --git a/kubeshark/version/versionCheck.go b/kubeshark/version/versionCheck.go index 6476a8bf2..571e1b661 100644 --- a/kubeshark/version/versionCheck.go +++ b/kubeshark/version/versionCheck.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io" - "log" "net/http" "runtime" "strings" @@ -12,18 +11,19 @@ import ( "github.com/kubeshark/kubeshark/kubeshark" "github.com/kubeshark/kubeshark/pkg/version" + "github.com/kubeshark/kubeshark/utils" + "github.com/rs/zerolog/log" "github.com/google/go-github/v37/github" ) -func CheckNewerVersion(versionChan chan string) { - log.Printf("Checking for newer version...") +func CheckNewerVersion() { + log.Info().Msg("Checking for newer version...") start := time.Now() client := github.NewClient(nil) latestRelease, _, err := client.Repositories.GetLatestRelease(context.Background(), "kubeshark", "kubeshark") if err != nil { - log.Printf("[ERROR] Failed to get latest release") - versionChan <- "" + log.Error().Msg("Failed to get latest release.") return } @@ -35,23 +35,20 @@ func CheckNewerVersion(versionChan chan string) { } } if versionFileUrl == "" { - log.Printf("[ERROR] Version file not found in the latest release") - versionChan <- "" + log.Error().Msg("Version file not found in the latest release.") return } res, err := http.Get(versionFileUrl) if err != nil { - log.Printf("[ERROR] Failed to get the version file %v", err) - versionChan <- "" + log.Error().Err(err).Msg("Failed to get the version file.") return } data, err := io.ReadAll(res.Body) res.Body.Close() if err != nil { - log.Printf("[ERROR] Failed to read the version file -> %v", err) - versionChan <- "" + log.Error().Err(err).Msg("Failed to read the version file.") return } gitHubVersion := string(data) @@ -59,12 +56,18 @@ func CheckNewerVersion(versionChan chan string) { greater, err := version.GreaterThen(gitHubVersion, kubeshark.Ver) if err != nil { - log.Printf("[ERROR] Ver version is not valid, github version %v, current version %v", gitHubVersion, kubeshark.Ver) - versionChan <- "" + log.Error(). + Str("upstream-version", gitHubVersion). + Str("local-version", kubeshark.Ver). + Msg("Version is invalid!") return } - log.Printf("Finished version validation, github version %v, current version %v, took %v", gitHubVersion, kubeshark.Ver, time.Since(start)) + log.Debug(). + Str("upstream-version", gitHubVersion). + Str("local-version", kubeshark.Ver). + Dur("elapsed-time", time.Since(start)). + Msg("Finished version validation.") if greater { var downloadCommand string @@ -73,8 +76,7 @@ func CheckNewerVersion(versionChan chan string) { } else { downloadCommand = "sh <(curl -Ls https://kubeshark.co/install)" } - versionChan <- fmt.Sprintf("Update available! %v -> %v run the command: %s", kubeshark.Ver, gitHubVersion, downloadCommand) - } else { - versionChan <- "" + msg := fmt.Sprintf("Update available! %v -> %v run:", kubeshark.Ver, gitHubVersion) + log.Info().Str("command", downloadCommand).Msg(fmt.Sprintf(utils.Yellow, msg)) } } diff --git a/resources/cleanResources.go b/resources/cleanResources.go index da20bbf33..676878560 100644 --- a/resources/cleanResources.go +++ b/resources/cleanResources.go @@ -3,16 +3,16 @@ package resources import ( "context" "fmt" - "log" "github.com/kubeshark/kubeshark/errormessage" "github.com/kubeshark/kubeshark/kubernetes" "github.com/kubeshark/kubeshark/utils" + "github.com/rs/zerolog/log" "k8s.io/apimachinery/pkg/util/wait" ) func CleanUpKubesharkResources(ctx context.Context, cancel context.CancelFunc, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, kubesharkResourcesNamespace string) { - log.Printf("\nRemoving kubeshark resources") + log.Info().Msg("Removing Kubeshark resources...") var leftoverResources []string @@ -27,7 +27,7 @@ func CleanUpKubesharkResources(ctx context.Context, cancel context.CancelFunc, k for _, resource := range leftoverResources { errMsg += "\n- " + resource } - log.Printf(utils.Error, errMsg) + log.Error().Msg(fmt.Sprintf(utils.Error, errMsg)) } } @@ -78,10 +78,18 @@ func waitUntilNamespaceDeleted(ctx context.Context, cancel context.CancelFunc, k switch { case ctx.Err() == context.Canceled: log.Printf("Do nothing. User interrupted the wait") + log.Warn(). + Str("namespace", kubesharkResourcesNamespace). + Msg("Did nothing. User interrupted the wait.") case err == wait.ErrWaitTimeout: - log.Printf(utils.Error, fmt.Sprintf("Timeout while removing Namespace %s", kubesharkResourcesNamespace)) + log.Warn(). + Str("namespace", kubesharkResourcesNamespace). + Msg("Timed out while deleting the namespace.") default: - log.Printf(utils.Error, fmt.Sprintf("Error while waiting for Namespace %s to be deleted: %v", kubesharkResourcesNamespace, errormessage.FormatError(err))) + log.Warn(). + Err(errormessage.FormatError(err)). + Str("namespace", kubesharkResourcesNamespace). + Msg("Unknown error while deleting the namespace.") } } } @@ -149,6 +157,6 @@ func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.P } func handleDeletionError(err error, resourceDesc string, leftoverResources *[]string) { - log.Printf("Error removing %s: %v", resourceDesc, errormessage.FormatError(err)) + log.Warn().Err(errormessage.FormatError(err)).Msg(fmt.Sprintf("Error while removing %s", resourceDesc)) *leftoverResources = append(*leftoverResources, resourceDesc) } diff --git a/resources/createResources.go b/resources/createResources.go index fe9c41125..5d45f3db7 100644 --- a/resources/createResources.go +++ b/resources/createResources.go @@ -2,20 +2,18 @@ package resources import ( "context" - "fmt" - "log" "github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/errormessage" "github.com/kubeshark/kubeshark/kubernetes" "github.com/kubeshark/kubeshark/kubeshark" - "github.com/kubeshark/kubeshark/utils" "github.com/kubeshark/worker/models" - "github.com/op/go-logging" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" core "k8s.io/api/core/v1" ) -func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, maxEntriesDBSizeBytes int64, hubResources models.Resources, imagePullPolicy core.PullPolicy, logLevel logging.Level, profiler bool) (bool, error) { +func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, maxEntriesDBSizeBytes int64, hubResources models.Resources, imagePullPolicy core.PullPolicy, logLevel zerolog.Level, profiler bool) (bool, error) { if !isNsRestrictedMode { if err := createKubesharkNamespace(ctx, kubernetesProvider, kubesharkResourcesNamespace); err != nil { return false, err @@ -28,7 +26,7 @@ func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubern kubesharkServiceAccountExists, err := createRBACIfNecessary(ctx, kubernetesProvider, isNsRestrictedMode, kubesharkResourcesNamespace, []string{"pods", "services", "endpoints"}) if err != nil { - log.Printf(utils.Warning, fmt.Sprintf("Failed to ensure the resources required for IP resolving. Kubeshark will not resolve target IPs to names. error: %v", errormessage.FormatError(err))) + log.Warn().Err(errormessage.FormatError(err)).Msg("Failed to ensure the resources required for IP resolving. Kubeshark will not resolve target IPs to names.") } var serviceAccountName string @@ -81,14 +79,14 @@ func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubern return kubesharkServiceAccountExists, err } - log.Printf("Successfully created service: %s", kubernetes.HubServiceName) + log.Info().Str("service-name", kubernetes.HubServiceName).Msg("Successfully created service:") _, err = kubernetesProvider.CreateService(ctx, kubesharkResourcesNamespace, kubernetes.FrontServiceName, kubernetes.FrontServiceName, 80, int32(config.Config.Front.PortForward.DstPort), int32(config.Config.Front.PortForward.SrcPort)) if err != nil { return kubesharkServiceAccountExists, err } - log.Printf("Successfully created service: %s", kubernetes.FrontServiceName) + log.Info().Str("service-name", kubernetes.FrontServiceName).Msg("Successfully created service:") return kubesharkServiceAccountExists, nil } @@ -125,7 +123,7 @@ func createKubesharkHubPod(ctx context.Context, kubernetesProvider *kubernetes.P if _, err = kubernetesProvider.CreatePod(ctx, opts.Namespace, pod); err != nil { return err } - log.Printf("Successfully created pod: [%s]", pod.Name) + log.Info().Str("pod-name", pod.Name).Msg("Successfully created pod.") return nil } @@ -137,6 +135,6 @@ func createFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider if _, err = kubernetesProvider.CreatePod(ctx, opts.Namespace, pod); err != nil { return err } - log.Printf("Successfully created pod: [%s]", pod.Name) + log.Info().Str("pod-name", pod.Name).Msg("Successfully created pod.") return nil } diff --git a/utils/browser.go b/utils/browser.go index 459ffde41..28983c0ca 100644 --- a/utils/browser.go +++ b/utils/browser.go @@ -2,9 +2,10 @@ package utils import ( "fmt" - "log" "os/exec" "runtime" + + "github.com/rs/zerolog/log" ) func OpenBrowser(url string) { @@ -22,6 +23,6 @@ func OpenBrowser(url string) { } if err != nil { - log.Printf("error while opening browser, %v", err) + log.Error().Err(err).Msg("While trying to open a browser") } } diff --git a/utils/wait.go b/utils/wait.go index 75a7d411b..5346482b4 100644 --- a/utils/wait.go +++ b/utils/wait.go @@ -2,24 +2,25 @@ package utils import ( "context" - "log" "os" "os/signal" "syscall" + + "github.com/rs/zerolog/log" ) func WaitForFinish(ctx context.Context, cancel context.CancelFunc) { - log.Printf("waiting for finish...") + log.Debug().Msg("Waiting to finish...") sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) // block until ctx cancel is called or termination signal is received select { case <-ctx.Done(): - log.Printf("ctx done") + log.Debug().Msg("Context done.") break case <-sigChan: - log.Printf("Got termination signal, canceling execution...") + log.Debug().Msg("Got a termination signal, canceling execution...") cancel() } }