🔨 Move cli folder contents into project root (#1253)

* Remove `logger` module

* Remove `shared` module

* Move `cli` folder contents into project root

* Fix linter

* Change the module name from `github.com/kubeshark/kubeshark/cli` to `github.com/kubeshark/kubeshark`

* Set the default `Makefile` rule to `build`

* Add `lint` rule

* Fix the linter errors
This commit is contained in:
M. Mert Yildiran
2022-11-25 14:17:50 -08:00
committed by GitHub
parent 9aeb1fadea
commit cb60a4cc4c
92 changed files with 512 additions and 2219 deletions

31
cmd/check.go Normal file
View File

@@ -0,0 +1,31 @@
package cmd
import (
"log"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/spf13/cobra"
)
var checkCmd = &cobra.Command{
Use: "check",
Short: "Check the Kubeshark installation for potential problems",
RunE: func(cmd *cobra.Command, args []string) error {
runKubesharkCheck()
return nil
},
}
func init() {
rootCmd.AddCommand(checkCmd)
defaultCheckConfig := configStructs.CheckConfig{}
if err := defaults.Set(&defaultCheckConfig); err != nil {
log.Print(err)
}
checkCmd.Flags().Bool(configStructs.PreTapCheckName, defaultCheckConfig.PreTap, "Check pre-tap Kubeshark installation for potential problems")
checkCmd.Flags().Bool(configStructs.PreInstallCheckName, defaultCheckConfig.PreInstall, "Check pre-install Kubeshark installation for potential problems")
checkCmd.Flags().Bool(configStructs.ImagePullCheckName, defaultCheckConfig.ImagePull, "Test connectivity to container image registry by creating and removing a temporary pod in 'default' namespace")
}

View File

@@ -0,0 +1,103 @@
package check
import (
"context"
"fmt"
"log"
"regexp"
"time"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/uiUtils"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func ImagePullInCluster(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
log.Printf("\nimage-pull-in-cluster\n--------------------")
namespace := "default"
podName := "kubeshark-test"
defer func() {
if err := kubernetesProvider.RemovePod(ctx, namespace, podName); err != nil {
log.Printf("%v error while removing test pod in cluster, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
}
}()
if err := createImagePullInClusterPod(ctx, kubernetesProvider, namespace, podName); err != nil {
log.Printf("%v error while creating test pod in cluster, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
if err := checkImagePulled(ctx, kubernetesProvider, namespace, podName); err != nil {
log.Printf("%v cluster is not able to pull kubeshark containers from docker hub, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
log.Printf("%v cluster is able to pull kubeshark containers from docker hub", fmt.Sprintf(uiUtils.Green, "√"))
return true
}
func checkImagePulled(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespace string, podName string) error {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", podName))
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{namespace}, podWatchHelper)
timeAfter := time.After(30 * time.Second)
for {
select {
case wEvent, ok := <-eventChan:
if !ok {
eventChan = nil
continue
}
pod, err := wEvent.ToPod()
if err != nil {
return err
}
if pod.Status.Phase == core.PodRunning {
return nil
}
case err, ok := <-errorChan:
if !ok {
errorChan = nil
continue
}
return err
case <-timeAfter:
return fmt.Errorf("image not pulled in time")
}
}
}
func createImagePullInClusterPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespace string, podName string) error {
var zero int64
pod := &core.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: core.PodSpec{
Containers: []core.Container{
{
Name: "probe",
Image: "kubeshark/busybox",
ImagePullPolicy: "Always",
Command: []string{"cat"},
Stdin: true,
},
},
TerminationGracePeriodSeconds: &zero,
},
}
if _, err := kubernetesProvider.CreatePod(ctx, namespace, pod); err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,31 @@
package check
import (
"fmt"
"log"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/semver"
"github.com/kubeshark/kubeshark/uiUtils"
)
func KubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) {
log.Printf("\nkubernetes-api\n--------------------")
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.KubeContext)
if err != nil {
log.Printf("%v can't initialize the client, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return nil, nil, false
}
log.Printf("%v can initialize the client", fmt.Sprintf(uiUtils.Green, "√"))
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
log.Printf("%v can't query the Kubernetes API, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return nil, nil, false
}
log.Printf("%v can query the Kubernetes API", fmt.Sprintf(uiUtils.Green, "√"))
return kubernetesProvider, kubernetesVersion, true
}

View File

@@ -0,0 +1,132 @@
package check
import (
"context"
"embed"
"fmt"
"log"
"strings"
"github.com/kubeshark/kubeshark/bucket"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/uiUtils"
rbac "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
)
func TapKubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesProvider *kubernetes.Provider) bool {
log.Printf("\nkubernetes-permissions\n--------------------")
var filePath string
if config.Config.IsNsRestrictedMode() {
filePath = "permissionFiles/permissions-ns-tap.yaml"
} else {
filePath = "permissionFiles/permissions-all-namespaces-tap.yaml"
}
data, err := embedFS.ReadFile(filePath)
if err != nil {
log.Printf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
decode := scheme.Codecs.UniversalDeserializer().Decode
obj, _, err := decode(data, nil, nil)
if err != nil {
log.Printf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
switch resource := obj.(type) {
case *rbac.Role:
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, config.Config.KubesharkResourcesNamespace)
case *rbac.ClusterRole:
return checkRulesPermissions(ctx, kubernetesProvider, resource.Rules, "")
}
log.Printf("%v error while checking kubernetes permissions, err: resource of type 'Role' or 'ClusterRole' not found in permission files", fmt.Sprintf(uiUtils.Red, "✗"))
return false
}
func InstallKubernetesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
log.Printf("\nkubernetes-permissions\n--------------------")
bucketProvider := bucket.NewProvider(config.Config.Install.TemplateUrl, bucket.DefaultTimeout)
installTemplate, err := bucketProvider.GetInstallTemplate(config.Config.Install.TemplateName)
if err != nil {
log.Printf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
resourcesTemplate := strings.Split(installTemplate, "---")[1:]
permissionsExist := true
decode := scheme.Codecs.UniversalDeserializer().Decode
for _, resourceTemplate := range resourcesTemplate {
obj, _, err := decode([]byte(resourceTemplate), nil, nil)
if err != nil {
log.Printf("%v error while checking kubernetes permissions, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
groupVersionKind := obj.GetObjectKind().GroupVersionKind()
resource := fmt.Sprintf("%vs", strings.ToLower(groupVersionKind.Kind))
permissionsExist = checkCreatePermission(ctx, kubernetesProvider, resource, groupVersionKind.Group, obj.(metav1.Object).GetNamespace()) && permissionsExist
switch resourceObj := obj.(type) {
case *rbac.Role:
permissionsExist = checkRulesPermissions(ctx, kubernetesProvider, resourceObj.Rules, resourceObj.Namespace) && permissionsExist
case *rbac.ClusterRole:
permissionsExist = checkRulesPermissions(ctx, kubernetesProvider, resourceObj.Rules, "") && permissionsExist
}
}
return permissionsExist
}
func checkCreatePermission(ctx context.Context, kubernetesProvider *kubernetes.Provider, resource string, group string, namespace string) bool {
exist, err := kubernetesProvider.CanI(ctx, namespace, resource, "create", group)
return checkPermissionExist(group, resource, "create", namespace, exist, err)
}
func checkRulesPermissions(ctx context.Context, kubernetesProvider *kubernetes.Provider, rules []rbac.PolicyRule, namespace string) bool {
permissionsExist := true
for _, rule := range rules {
for _, group := range rule.APIGroups {
for _, resource := range rule.Resources {
for _, verb := range rule.Verbs {
exist, err := kubernetesProvider.CanI(ctx, namespace, resource, verb, group)
permissionsExist = checkPermissionExist(group, resource, verb, namespace, exist, err) && permissionsExist
}
}
}
}
return permissionsExist
}
func checkPermissionExist(group string, resource string, verb string, namespace string, exist bool, err error) bool {
var groupAndNamespace string
if group != "" && namespace != "" {
groupAndNamespace = fmt.Sprintf("in api group '%v' and namespace '%v'", group, namespace)
} else if group != "" {
groupAndNamespace = fmt.Sprintf("in api group '%v'", group)
} else if namespace != "" {
groupAndNamespace = fmt.Sprintf("in namespace '%v'", namespace)
}
if err != nil {
log.Printf("%v error checking permission for %v %v %v, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), verb, resource, groupAndNamespace, err)
return false
} else if !exist {
log.Printf("%v can't %v %v %v", fmt.Sprintf(uiUtils.Red, "✗"), verb, resource, groupAndNamespace)
return false
}
log.Printf("%v can %v %v %v", fmt.Sprintf(uiUtils.Green, "√"), verb, resource, groupAndNamespace)
return true
}

View File

@@ -0,0 +1,96 @@
package check
import (
"context"
"fmt"
"log"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/uiUtils"
)
func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
log.Printf("\nk8s-components\n--------------------")
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.KubesharkResourcesNamespace)
allResourcesExist := checkResourceExist(config.Config.KubesharkResourcesNamespace, "namespace", exist, err)
exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.ConfigMapName)
allResourcesExist = checkResourceExist(kubernetes.ConfigMapName, "config map", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.ServiceAccountName)
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err) && allResourcesExist
if config.Config.IsNsRestrictedMode() {
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.RoleName)
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.RoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err) && allResourcesExist
} else {
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleName, "cluster role", exist, err) && allResourcesExist
exist, err = kubernetesProvider.DoesClusterRoleBindingExist(ctx, kubernetes.ClusterRoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
}
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.ApiServerPodName)
allResourcesExist = checkResourceExist(kubernetes.ApiServerPodName, "service", exist, err) && allResourcesExist
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
return allResourcesExist
}
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.ApiServerPodName); err != nil {
log.Printf("%v error checking if '%v' pod is running, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName, err)
return false
} else if len(pods) == 0 {
log.Printf("%v '%v' pod doesn't exist", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName)
return false
} else if !kubernetes.IsPodRunning(&pods[0]) {
log.Printf("%v '%v' pod not running", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.ApiServerPodName)
return false
}
log.Printf("%v '%v' pod running", fmt.Sprintf(uiUtils.Green, "√"), kubernetes.ApiServerPodName)
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.TapperPodName); err != nil {
log.Printf("%v error checking if '%v' pods are running, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.TapperPodName, err)
return false
} else {
tappers := 0
notRunningTappers := 0
for _, pod := range pods {
tappers += 1
if !kubernetes.IsPodRunning(&pod) {
notRunningTappers += 1
}
}
if notRunningTappers > 0 {
log.Printf("%v '%v' %v/%v pods are not running", fmt.Sprintf(uiUtils.Red, "✗"), kubernetes.TapperPodName, notRunningTappers, tappers)
return false
}
log.Printf("%v '%v' %v pods running", fmt.Sprintf(uiUtils.Green, "√"), kubernetes.TapperPodName, tappers)
return true
}
}
func checkResourceExist(resourceName string, resourceType string, exist bool, err error) bool {
if err != nil {
log.Printf("%v error checking if '%v' %v exists, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), resourceName, resourceType, err)
return false
} else if !exist {
log.Printf("%v '%v' %v doesn't exist", fmt.Sprintf(uiUtils.Red, "✗"), resourceName, resourceType)
return false
}
log.Printf("%v '%v' %v exists", fmt.Sprintf(uiUtils.Green, "√"), resourceName, resourceType)
return true
}

View File

@@ -0,0 +1,22 @@
package check
import (
"fmt"
"log"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/semver"
"github.com/kubeshark/kubeshark/uiUtils"
)
func KubernetesVersion(kubernetesVersion *semver.SemVersion) bool {
log.Printf("\nkubernetes-version\n--------------------")
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
log.Printf("%v not running the minimum Kubernetes API version, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
log.Printf("%v is running the minimum Kubernetes API version", fmt.Sprintf(uiUtils.Green, "√"))
return true
}

View File

@@ -0,0 +1,84 @@
package check
import (
"context"
"fmt"
"log"
"regexp"
"github.com/kubeshark/kubeshark/apiserver"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/uiUtils"
)
func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
log.Printf("\nAPI-server-connectivity\n--------------------")
serverUrl := kubernetes.GetLocalhostOnPort(config.Config.Hub.PortForward.SrcPort)
apiServerProvider := apiserver.NewProvider(serverUrl, 1, apiserver.DefaultTimeout)
if err := apiServerProvider.TestConnection(""); err == nil {
log.Printf("%v found Kubeshark server tunnel available and connected successfully to API server", fmt.Sprintf(uiUtils.Green, "√"))
return true
}
connectedToApiServer := false
if err := checkProxy(serverUrl, kubernetesProvider); err != nil {
log.Printf("%v couldn't connect to API server using proxy, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
} else {
connectedToApiServer = true
log.Printf("%v connected successfully to API server using proxy", fmt.Sprintf(uiUtils.Green, "√"))
}
if err := checkPortForward(serverUrl, kubernetesProvider); err != nil {
log.Printf("%v couldn't connect to API server using port-forward, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
} else {
connectedToApiServer = true
log.Printf("%v connected successfully to API server using port-forward", fmt.Sprintf(uiUtils.Green, "√"))
}
return connectedToApiServer
}
func checkProxy(serverUrl string, kubernetesProvider *kubernetes.Provider) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, config.Config.KubesharkResourcesNamespace, kubernetes.ApiServerPodName, cancel)
if err != nil {
return err
}
apiServerProvider := apiserver.NewProvider(serverUrl, apiserver.DefaultRetries, apiserver.DefaultTimeout)
if err := apiServerProvider.TestConnection(""); err != nil {
return err
}
if err := httpServer.Shutdown(ctx); err != nil {
log.Printf("Error occurred while stopping proxy, err: %v", err)
}
return nil
}
func checkPortForward(serverUrl string, kubernetesProvider *kubernetes.Provider) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
podRegex, _ := regexp.Compile(kubernetes.ApiServerPodName)
forwarder, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.KubesharkResourcesNamespace, podRegex, config.Config.Tap.GuiPort, config.Config.Tap.GuiPort, ctx, cancel)
if err != nil {
return err
}
apiServerProvider := apiserver.NewProvider(serverUrl, apiserver.DefaultRetries, apiserver.DefaultTimeout)
if err := apiServerProvider.TestConnection(""); err != nil {
return err
}
forwarder.Close()
return nil
}

62
cmd/checkRunner.go Normal file
View File

@@ -0,0 +1,62 @@
package cmd
import (
"context"
"embed"
"fmt"
"log"
"github.com/kubeshark/kubeshark/cmd/check"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/uiUtils"
)
var (
//go:embed permissionFiles
embedFS embed.FS
)
func runKubesharkCheck() {
log.Printf("Kubeshark checks\n===================")
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel will be called when this function exits
kubernetesProvider, kubernetesVersion, checkPassed := check.KubernetesApi()
if checkPassed {
checkPassed = check.KubernetesVersion(kubernetesVersion)
}
if config.Config.Check.PreTap || config.Config.Check.PreInstall || config.Config.Check.ImagePull {
if config.Config.Check.PreTap {
if checkPassed {
checkPassed = check.TapKubernetesPermissions(ctx, embedFS, kubernetesProvider)
}
} else if config.Config.Check.PreInstall {
if checkPassed {
checkPassed = check.InstallKubernetesPermissions(ctx, kubernetesProvider)
}
}
if config.Config.Check.ImagePull {
if checkPassed {
checkPassed = check.ImagePullInCluster(ctx, kubernetesProvider)
}
}
} else {
if checkPassed {
checkPassed = check.KubernetesResources(ctx, kubernetesProvider)
}
if checkPassed {
checkPassed = check.ServerConnection(kubernetesProvider)
}
}
if checkPassed {
log.Printf("\nStatus check results are %v", fmt.Sprintf(uiUtils.Green, "√"))
} else {
log.Printf("\nStatus check results are %v", fmt.Sprintf(uiUtils.Red, "✗"))
}
}

18
cmd/clean.go Normal file
View File

@@ -0,0 +1,18 @@
package cmd
import (
"github.com/spf13/cobra"
)
var cleanCmd = &cobra.Command{
Use: "clean",
Short: "Removes all kubeshark resources",
RunE: func(cmd *cobra.Command, args []string) error {
performCleanCommand()
return nil
},
}
func init() {
rootCmd.AddCommand(cleanCmd)
}

14
cmd/cleanRunner.go Normal file
View File

@@ -0,0 +1,14 @@
package cmd
import (
"github.com/kubeshark/kubeshark/config"
)
func performCleanCommand() {
kubernetesProvider, err := getKubernetesProviderForCli()
if err != nil {
return
}
finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace)
}

119
cmd/common.go Normal file
View File

@@ -0,0 +1,119 @@
package cmd
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"path"
"regexp"
"time"
"github.com/kubeshark/kubeshark/apiserver"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/errormessage"
"github.com/kubeshark/kubeshark/kubeshark"
"github.com/kubeshark/kubeshark/kubeshark/fsUtils"
"github.com/kubeshark/kubeshark/resources"
"github.com/kubeshark/kubeshark/uiUtils"
"github.com/kubeshark/worker/models"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
)
func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx context.Context, cancel context.CancelFunc, serviceName string, srcPort uint16, dstPort uint16, healthCheck string) {
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, srcPort, dstPort, config.Config.KubesharkResourcesNamespace, serviceName, cancel)
if err != nil {
log.Printf(uiUtils.Error, fmt.Sprintf("Error occured while running k8s proxy %v\n"+
"Try setting different port by using --%s", errormessage.FormatError(err), configStructs.GuiPortTapName))
cancel()
return
}
provider := apiserver.NewProvider(kubernetes.GetLocalhostOnPort(srcPort), apiserver.DefaultRetries, apiserver.DefaultTimeout)
if err := provider.TestConnection(healthCheck); err != nil {
log.Printf("Couldn't connect using proxy, stopping proxy and trying to create port-forward")
if err := httpServer.Shutdown(ctx); err != nil {
log.Printf("Error occurred while stopping proxy %v", errormessage.FormatError(err))
}
podRegex, _ := regexp.Compile(kubernetes.ApiServerPodName)
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.KubesharkResourcesNamespace, podRegex, srcPort, dstPort, ctx, cancel); err != nil {
log.Printf(uiUtils.Error, fmt.Sprintf("Error occured while running port forward [%s] %v\n"+
"Try setting different port by using --%s", podRegex, errormessage.FormatError(err), configStructs.GuiPortTapName))
cancel()
return
}
provider = apiserver.NewProvider(kubernetes.GetLocalhostOnPort(srcPort), apiserver.DefaultRetries, apiserver.DefaultTimeout)
if err := provider.TestConnection(healthCheck); err != nil {
log.Printf(uiUtils.Error, "Couldn't connect to [%s].")
// cancel()
return
}
}
}
func getKubernetesProviderForCli() (*kubernetes.Provider, error) {
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.KubeContext)
if err != nil {
handleKubernetesProviderError(err)
return nil, err
}
if err := kubernetesProvider.ValidateNotProxy(); err != nil {
handleKubernetesProviderError(err)
return nil, err
}
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
handleKubernetesProviderError(err)
return nil, err
}
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
handleKubernetesProviderError(err)
return nil, err
}
return kubernetesProvider, nil
}
func handleKubernetesProviderError(err error) {
var clusterBehindProxyErr *kubernetes.ClusterBehindProxyError
if ok := errors.As(err, &clusterBehindProxyErr); ok {
log.Printf("cannot establish http-proxy connection to the Kubernetes cluster. If youre using Lens or similar tool, please run kubeshark with regular kubectl config using --%v %v=$HOME/.kube/config flag", config.SetCommandName, config.KubeConfigPathConfigName)
} else {
log.Print(err)
}
}
func finishKubesharkExecution(kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, kubesharkResourcesNamespace string) {
removalCtx, cancel := context.WithTimeout(context.Background(), cleanupTimeout)
defer cancel()
dumpLogsIfNeeded(removalCtx, kubernetesProvider)
resources.CleanUpKubesharkResources(removalCtx, cancel, kubernetesProvider, isNsRestrictedMode, kubesharkResourcesNamespace)
}
func dumpLogsIfNeeded(ctx context.Context, kubernetesProvider *kubernetes.Provider) {
if !config.Config.DumpLogs {
return
}
kubesharkDir := kubeshark.GetKubesharkFolderPath()
filePath := path.Join(kubesharkDir, fmt.Sprintf("kubeshark_logs_%s.zip", time.Now().Format("2006_01_02__15_04_05")))
if err := fsUtils.DumpLogs(ctx, kubernetesProvider, filePath); err != nil {
log.Printf("Failed dump logs %v", err)
}
}
func getSerializedKubesharkAgentConfig(kubesharkAgentConfig *models.Config) (string, error) {
serializedConfig, err := json.Marshal(kubesharkAgentConfig)
if err != nil {
return "", err
}
return string(serializedConfig), nil
}

55
cmd/config.go Normal file
View File

@@ -0,0 +1,55 @@
package cmd
import (
"fmt"
"log"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/uiUtils"
"github.com/spf13/cobra"
)
var configCmd = &cobra.Command{
Use: "config",
Short: "Generate config with default values",
RunE: func(cmd *cobra.Command, args []string) error {
configWithDefaults, err := config.GetConfigWithDefaults()
if err != nil {
log.Printf("Failed generating config with defaults, err: %v", err)
return nil
}
if config.Config.Config.Regenerate {
if err := config.WriteConfig(configWithDefaults); err != nil {
log.Printf("Failed writing config with defaults, err: %v", err)
return nil
}
log.Printf("Template File written to %s", fmt.Sprintf(uiUtils.Purple, config.Config.ConfigFilePath))
} else {
template, err := uiUtils.PrettyYaml(configWithDefaults)
if err != nil {
log.Printf("Failed converting config with defaults to yaml, err: %v", err)
return nil
}
log.Printf("Writing template config.\n%v", template)
fmt.Printf("%v", template)
}
return nil
},
}
func init() {
rootCmd.AddCommand(configCmd)
defaultConfig := config.CreateDefaultConfig()
if err := defaults.Set(&defaultConfig); err != nil {
log.Print(err)
}
configCmd.Flags().BoolP(configStructs.RegenerateConfigName, "r", defaultConfig.Config.Regenerate, fmt.Sprintf("Regenerate the config file with default values to path %s or to chosen path using --%s", defaultConfig.ConfigFilePath, config.ConfigFilePathCommandName))
}

View File

@@ -0,0 +1,25 @@
package goUtils
import (
"log"
"reflect"
"runtime/debug"
)
func HandleExcWrapper(fn interface{}, params ...interface{}) (result []reflect.Value) {
defer func() {
if panicMessage := recover(); panicMessage != nil {
stack := debug.Stack()
log.Fatalf("Unhandled panic: %v\n stack: %s", panicMessage, stack)
}
}()
f := reflect.ValueOf(fn)
if f.Type().NumIn() != len(params) {
panic("incorrect number of parameters!")
}
inputs := make([]reflect.Value, len(params))
for k, in := range params {
inputs[k] = reflect.ValueOf(in)
}
return f.Call(inputs)
}

29
cmd/install.go Normal file
View File

@@ -0,0 +1,29 @@
package cmd
import (
"log"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/spf13/cobra"
)
var installCmd = &cobra.Command{
Use: "install",
Short: "Installs kubeshark components",
RunE: func(cmd *cobra.Command, args []string) error {
runKubesharkInstall()
return nil
},
}
func init() {
rootCmd.AddCommand(installCmd)
defaultInstallConfig := configStructs.InstallConfig{}
if err := defaults.Set(&defaultInstallConfig); err != nil {
log.Print(err)
}
installCmd.Flags().BoolP(configStructs.OutInstallName, "o", defaultInstallConfig.Out, "print (to stdout) Kubernetes manifest used to install Kubeshark Pro edition")
}

33
cmd/installRunner.go Normal file
View File

@@ -0,0 +1,33 @@
package cmd
import (
"fmt"
"log"
"strings"
"github.com/kubeshark/kubeshark/bucket"
"github.com/kubeshark/kubeshark/config"
)
func runKubesharkInstall() {
// TODO: Remove this function
if config.Config.Install.Out {
bucketProvider := bucket.NewProvider(config.Config.Install.TemplateUrl, bucket.DefaultTimeout)
installTemplate, err := bucketProvider.GetInstallTemplate(config.Config.Install.TemplateName)
if err != nil {
log.Printf("Failed getting install template, err: %v", err)
return
}
fmt.Print(installTemplate)
return
}
var sb strings.Builder
sb.WriteString("Hello! This command can be used to install Kubeshark Pro edition on your Kubernetes cluster.")
sb.WriteString("\nPlease run:")
sb.WriteString("\n\tkubeshark install -o | kubectl apply -n kubeshark -f -")
sb.WriteString("\n\nor use helm chart as described in https://getkubeshark.io/docs/installing-kubeshark/centralized-installation\n")
fmt.Print(sb.String())
}

48
cmd/logs.go Normal file
View File

@@ -0,0 +1,48 @@
package cmd
import (
"context"
"log"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/errormessage"
"github.com/kubeshark/kubeshark/kubeshark/fsUtils"
"github.com/spf13/cobra"
)
var logsCmd = &cobra.Command{
Use: "logs",
Short: "Create a zip file with logs for Github issue or troubleshoot",
RunE: func(cmd *cobra.Command, args []string) error {
kubernetesProvider, err := getKubernetesProviderForCli()
if err != nil {
return nil
}
ctx := context.Background()
if validationErr := config.Config.Logs.Validate(); validationErr != nil {
return errormessage.FormatError(validationErr)
}
log.Printf("Using file path %s", config.Config.Logs.FilePath())
if dumpLogsErr := fsUtils.DumpLogs(ctx, kubernetesProvider, config.Config.Logs.FilePath()); dumpLogsErr != nil {
log.Printf("Failed dump logs %v", dumpLogsErr)
}
return nil
},
}
func init() {
rootCmd.AddCommand(logsCmd)
defaultLogsConfig := configStructs.LogsConfig{}
if err := defaults.Set(&defaultLogsConfig); err != nil {
log.Print(err)
}
logsCmd.Flags().StringP(configStructs.FileLogsName, "f", defaultLogsConfig.FileStr, "Path for zip file (default current <pwd>\\kubeshark_logs.zip)")
}

View File

@@ -0,0 +1,25 @@
# This example shows permissions that enrich the logs with additional info
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-clusterrole
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-runner-debug-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,37 @@
# This example shows permissions that are required for Kubeshark to resolve IPs to service names
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-clusterrole
rules:
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "create"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterroles"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["clusterrolebindings"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["", "apps", "extensions"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["services"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["endpoints"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-resolver-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,40 @@
# This example shows the permissions that are required in order to run the `kubeshark tap` command
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-clusterrole
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "watch", "create"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "create"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["create", "patch"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["services/proxy"]
verbs: ["get", "create"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-clusterrolebindings
subjects:
- kind: User
name: user-with-clusterwide-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: kubeshark-runner-clusterrole
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,25 @@
# This example shows permissions that enrich the logs with additional info in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-role
rules:
- apiGroups: ["events.k8s.io"]
resources: ["events"]
verbs: ["watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-debug-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-runner-debug-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,37 @@
# This example shows permissions that are required for Kubeshark to resolve IPs to service names in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-role
rules:
- apiGroups: [""]
resources: ["serviceaccounts"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["roles"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["rbac.authorization.k8s.io"]
resources: ["rolebindings"]
verbs: ["get", "list", "create", "delete"]
- apiGroups: ["", "apps", "extensions"]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["services"]
verbs: ["get", "list", "watch"]
- apiGroups: ["", "apps", "extensions"]
resources: ["endpoints"]
verbs: ["get", "list", "watch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-resolver-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-resolver-role
apiGroup: rbac.authorization.k8s.io

View File

@@ -0,0 +1,37 @@
# This example shows the permissions that are required in order to run the `kubeshark tap` command in namespace-restricted mode
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-role
rules:
- apiGroups: [""]
resources: ["pods"]
verbs: ["list", "watch", "create"]
- apiGroups: [""]
resources: ["services"]
verbs: ["get", "create", "delete"]
- apiGroups: ["apps"]
resources: ["daemonsets"]
verbs: ["create", "patch", "delete"]
- apiGroups: [""]
resources: ["services/proxy"]
verbs: ["get", "create", "delete"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create", "delete"]
- apiGroups: [""]
resources: ["pods/log"]
verbs: ["get"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubeshark-runner-rolebindings
subjects:
- kind: User
name: user-with-restricted-access
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: Role
name: kubeshark-runner-role
apiGroup: rbac.authorization.k8s.io

57
cmd/root.go Normal file
View File

@@ -0,0 +1,57 @@
package cmd
import (
"fmt"
"log"
"time"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubeshark/version"
"github.com/kubeshark/kubeshark/uiUtils"
"github.com/spf13/cobra"
)
var rootCmd = &cobra.Command{
Use: "kubeshark",
Short: "A web traffic viewer for kubernetes",
Long: `A web traffic viewer for kubernetes
Further info is available at https://github.com/kubeshark/kubeshark`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
if err := config.InitConfig(cmd); err != nil {
log.Fatal(err)
}
return nil
},
}
func init() {
defaultConfig := config.CreateDefaultConfig()
if err := defaults.Set(&defaultConfig); err != nil {
log.Print(err)
}
rootCmd.PersistentFlags().StringSlice(config.SetCommandName, []string{}, fmt.Sprintf("Override values using --%s", config.SetCommandName))
rootCmd.PersistentFlags().String(config.ConfigFilePathCommandName, defaultConfig.ConfigFilePath, fmt.Sprintf("Override config file path using --%s", config.ConfigFilePathCommandName))
}
func printNewVersionIfNeeded(versionChan chan string) {
select {
case versionMsg := <-versionChan:
if versionMsg != "" {
log.Printf(uiUtils.Yellow, versionMsg)
}
case <-time.After(2 * time.Second):
}
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the tapCmd.
func Execute() {
versionChan := make(chan string)
defer printNewVersionIfNeeded(versionChan)
go version.CheckNewerVersion(versionChan)
cobra.CheckErr(rootCmd.Execute())
}

59
cmd/tap.go Normal file
View File

@@ -0,0 +1,59 @@
package cmd
import (
"errors"
"log"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/errormessage"
"github.com/spf13/cobra"
)
var tapCmd = &cobra.Command{
Use: "tap [POD REGEX]",
Short: "Record ingoing traffic of a kubernetes pod",
Long: `Record the ingoing traffic of a kubernetes pod.
Supported protocols are HTTP and gRPC.`,
RunE: func(cmd *cobra.Command, args []string) error {
RunKubesharkTap()
return nil
},
PreRunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 1 {
config.Config.Tap.PodRegexStr = args[0]
} else if len(args) > 1 {
return errors.New("unexpected number of arguments")
}
if err := config.Config.Tap.Validate(); err != nil {
return errormessage.FormatError(err)
}
log.Printf("Kubeshark will store up to %s of traffic, old traffic will be cleared once the limit is reached.", config.Config.Tap.HumanMaxEntriesDBSize)
return nil
},
}
func init() {
rootCmd.AddCommand(tapCmd)
defaultTapConfig := configStructs.TapConfig{}
if err := defaults.Set(&defaultTapConfig); err != nil {
log.Print(err)
}
tapCmd.Flags().Uint16P(configStructs.GuiPortTapName, "p", defaultTapConfig.GuiPort, "Provide a custom port for the web interface webserver")
tapCmd.Flags().StringSliceP(configStructs.NamespacesTapName, "n", defaultTapConfig.Namespaces, "Namespaces selector")
tapCmd.Flags().BoolP(configStructs.AllNamespacesTapName, "A", defaultTapConfig.AllNamespaces, "Tap all namespaces")
tapCmd.Flags().Bool(configStructs.EnableRedactionTapName, defaultTapConfig.EnableRedaction, "Enables redaction of potentially sensitive request/response headers and body values")
tapCmd.Flags().String(configStructs.HumanMaxEntriesDBSizeTapName, defaultTapConfig.HumanMaxEntriesDBSize, "Override the default max entries db size")
tapCmd.Flags().String(configStructs.InsertionFilterName, defaultTapConfig.InsertionFilter, "Set the insertion filter. Accepts string or a file path.")
tapCmd.Flags().Bool(configStructs.DryRunTapName, defaultTapConfig.DryRun, "Preview of all pods matching the regex, without tapping them")
tapCmd.Flags().Bool(configStructs.ServiceMeshName, defaultTapConfig.ServiceMesh, "Record decrypted traffic if the cluster is configured with a service mesh and with mtls")
tapCmd.Flags().Bool(configStructs.TlsName, defaultTapConfig.Tls, "Record tls traffic")
tapCmd.Flags().Bool(configStructs.ProfilerName, defaultTapConfig.Profiler, "Run pprof server")
tapCmd.Flags().Int(configStructs.MaxLiveStreamsName, defaultTapConfig.MaxLiveStreams, "Maximum live tcp streams to handle concurrently")
}

456
cmd/tapRunner.go Normal file
View File

@@ -0,0 +1,456 @@
package cmd
import (
"context"
"errors"
"fmt"
"log"
"regexp"
"strings"
"time"
"github.com/kubeshark/kubeshark/resources"
"github.com/kubeshark/kubeshark/utils"
core "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/kubeshark/kubeshark/apiserver"
"github.com/kubeshark/kubeshark/cmd/goUtils"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/errormessage"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/uiUtils"
"github.com/kubeshark/worker/api"
"github.com/kubeshark/worker/models"
)
const cleanupTimeout = time.Minute
type tapState struct {
startTime time.Time
targetNamespaces []string
kubesharkServiceAccountExists bool
}
var state tapState
var apiProvider *apiserver.Provider
var apiServerPodReady bool
var frontPodReady bool
var proxyDone bool
func RunKubesharkTap() {
state.startTime = time.Now()
apiProvider = apiserver.NewProvider(kubernetes.GetLocalhostOnPort(config.Config.Hub.PortForward.SrcPort), apiserver.DefaultRetries, apiserver.DefaultTimeout)
kubernetesProvider, err := getKubernetesProviderForCli()
if err != nil {
return
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel will be called when this function exits
state.targetNamespaces = getNamespaces(kubernetesProvider)
kubesharkAgentConfig := getTapKubesharkAgentConfig()
serializedKubesharkConfig, err := getSerializedKubesharkAgentConfig(kubesharkAgentConfig)
if err != nil {
log.Printf(uiUtils.Error, fmt.Sprintf("Error serializing kubeshark config: %v", errormessage.FormatError(err)))
return
}
if config.Config.IsNsRestrictedMode() {
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.KubesharkResourcesNamespace) {
log.Printf("Not supported mode. Kubeshark can't resolve IPs in other namespaces when running in namespace restricted mode.\n"+
"You can use the same namespace for --%s and --%s", configStructs.NamespacesTapName, config.KubesharkResourcesNamespaceConfigName)
return
}
}
var namespacesStr string
if !utils.Contains(state.targetNamespaces, kubernetes.K8sAllNamespaces) {
namespacesStr = fmt.Sprintf("namespaces \"%s\"", strings.Join(state.targetNamespaces, "\", \""))
} else {
namespacesStr = "all namespaces"
}
log.Printf("Tapping pods in %s", namespacesStr)
if err := printTappedPodsPreview(ctx, kubernetesProvider, state.targetNamespaces); err != nil {
log.Printf(uiUtils.Error, fmt.Sprintf("Error listing pods: %v", errormessage.FormatError(err)))
}
if config.Config.Tap.DryRun {
return
}
log.Printf("Waiting for Kubeshark Agent to start...")
if state.kubesharkServiceAccountExists, err = resources.CreateTapKubesharkResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace, config.Config.AgentImage, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.ApiServerResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil {
var statusError *k8serrors.StatusError
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
log.Print("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
} else {
defer resources.CleanUpKubesharkResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace)
log.Printf(uiUtils.Error, fmt.Sprintf("Error creating resources: %v", errormessage.FormatError(err)))
}
return
}
defer finishTapExecution(kubernetesProvider)
go goUtils.HandleExcWrapper(watchApiServerEvents, ctx, kubernetesProvider, cancel)
go goUtils.HandleExcWrapper(watchApiServerPod, ctx, kubernetesProvider, cancel)
go goUtils.HandleExcWrapper(watchFrontPod, ctx, kubernetesProvider, cancel)
// block until exit signal or error
utils.WaitForFinish(ctx, cancel)
}
func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace)
}
func getTapKubesharkAgentConfig() *models.Config {
kubesharkAgentConfig := models.Config{
MaxDBSizeBytes: config.Config.Tap.MaxEntriesDBSizeBytes(),
InsertionFilter: config.Config.Tap.GetInsertionFilter(),
AgentImage: config.Config.AgentImage,
PullPolicy: config.Config.ImagePullPolicyStr,
LogLevel: config.Config.LogLevel(),
TapperResources: config.Config.Tap.TapperResources,
KubesharkResourcesNamespace: config.Config.KubesharkResourcesNamespace,
AgentDatabasePath: models.DataDirPath,
ServiceMap: config.Config.ServiceMap,
OAS: config.Config.OAS,
}
return &kubesharkAgentConfig
}
/*
this function is a bit problematic as it might be detached from the actual pods the kubeshark api server will tap.
The alternative would be to wait for api server to be ready and then query it for the pods it listens to, this has
the arguably worse drawback of taking a relatively very long time before the user sees which pods are targeted, if any.
*/
func printTappedPodsPreview(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespaces []string) error {
if matchingPods, err := kubernetesProvider.ListAllRunningPodsMatchingRegex(ctx, config.Config.Tap.PodRegex(), namespaces); err != nil {
return err
} else {
if len(matchingPods) == 0 {
printNoPodsFoundSuggestion(namespaces)
}
for _, tappedPod := range matchingPods {
log.Printf(uiUtils.Green, fmt.Sprintf("+%s", tappedPod.Name))
}
return nil
}
}
func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider *kubernetes.Provider, targetNamespaces []string, startTime time.Time) error {
tapperSyncer, err := kubernetes.CreateAndStartKubesharkTapperSyncer(ctx, provider, kubernetes.TapperSyncerConfig{
TargetNamespaces: targetNamespaces,
PodFilterRegex: *config.Config.Tap.PodRegex(),
KubesharkResourcesNamespace: config.Config.KubesharkResourcesNamespace,
AgentImage: config.Config.AgentImage,
TapperResources: config.Config.Tap.TapperResources,
ImagePullPolicy: config.Config.ImagePullPolicy(),
LogLevel: config.Config.LogLevel(),
KubesharkApiFilteringOptions: api.TrafficFilteringOptions{
IgnoredUserAgents: config.Config.Tap.IgnoredUserAgents,
},
KubesharkServiceAccountExists: state.kubesharkServiceAccountExists,
ServiceMesh: config.Config.Tap.ServiceMesh,
Tls: config.Config.Tap.Tls,
MaxLiveStreams: config.Config.Tap.MaxLiveStreams,
}, startTime)
if err != nil {
return err
}
go func() {
for {
select {
case syncerErr, ok := <-tapperSyncer.ErrorOut:
if !ok {
log.Print("kubesharkTapperSyncer err channel closed, ending listener loop")
return
}
log.Printf(uiUtils.Error, getErrorDisplayTextForK8sTapManagerError(syncerErr))
cancel()
case _, ok := <-tapperSyncer.TapPodChangesOut:
if !ok {
log.Print("kubesharkTapperSyncer pod changes channel closed, ending listener loop")
return
}
if err := apiProvider.ReportTappedPods(tapperSyncer.CurrentlyTappedPods); err != nil {
log.Printf("[Error] failed update tapped pods %v", err)
}
case tapperStatus, ok := <-tapperSyncer.TapperStatusChangedOut:
if !ok {
log.Print("kubesharkTapperSyncer tapper status changed channel closed, ending listener loop")
return
}
if err := apiProvider.ReportTapperStatus(tapperStatus); err != nil {
log.Printf("[Error] failed update tapper status %v", err)
}
case <-ctx.Done():
log.Print("kubesharkTapperSyncer event listener loop exiting due to context done")
return
}
}
}()
return nil
}
func printNoPodsFoundSuggestion(targetNamespaces []string) {
var suggestionStr string
if !utils.Contains(targetNamespaces, kubernetes.K8sAllNamespaces) {
suggestionStr = ". You can also try selecting a different namespace with -n or tap all namespaces with -A"
}
log.Printf(uiUtils.Warning, fmt.Sprintf("Did not find any currently running pods that match the regex argument, kubeshark will automatically tap matching pods if any are created later%s", suggestionStr))
}
func getErrorDisplayTextForK8sTapManagerError(err kubernetes.K8sTapManagerError) string {
switch err.TapManagerReason {
case kubernetes.TapManagerPodListError:
return fmt.Sprintf("Failed to update currently tapped pods: %v", err.OriginalError)
case kubernetes.TapManagerPodWatchError:
return fmt.Sprintf("Error occured in k8s pod watch: %v", err.OriginalError)
case kubernetes.TapManagerTapperUpdateError:
return fmt.Sprintf("Error updating tappers: %v", err.OriginalError)
default:
return fmt.Sprintf("Unknown error occured in k8s tap manager: %v", err.OriginalError)
}
}
func watchApiServerPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.ApiServerPodName))
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, podWatchHelper)
isPodReady := false
apiServerTimeoutSec := config.GetIntEnvConfig(config.ApiServerTimeoutSec, 120)
timeAfter := time.After(time.Duration(apiServerTimeoutSec) * time.Second)
for {
select {
case wEvent, ok := <-eventChan:
if !ok {
eventChan = nil
continue
}
switch wEvent.Type {
case kubernetes.EventAdded:
log.Printf("Watching API Server pod loop, added")
case kubernetes.EventDeleted:
log.Printf("%s removed", kubernetes.ApiServerPodName)
cancel()
return
case kubernetes.EventModified:
modifiedPod, err := wEvent.ToPod()
if err != nil {
log.Printf(uiUtils.Error, err)
cancel()
continue
}
log.Printf("Watching API Server pod loop, modified: %v, containers statuses: %v", modifiedPod.Status.Phase, modifiedPod.Status.ContainerStatuses)
if modifiedPod.Status.Phase == core.PodRunning && !isPodReady {
isPodReady = true
apiServerPodReady = true
postApiServerStarted(ctx, kubernetesProvider, cancel)
}
if !proxyDone && apiServerPodReady && frontPodReady {
proxyDone = true
postFrontStarted(ctx, kubernetesProvider, cancel)
}
case kubernetes.EventBookmark:
break
case kubernetes.EventError:
break
}
case err, ok := <-errorChan:
if !ok {
errorChan = nil
continue
}
log.Printf("[ERROR] Agent creation, watching %v namespace, error: %v", config.Config.KubesharkResourcesNamespace, err)
cancel()
case <-timeAfter:
if !isPodReady {
log.Printf(uiUtils.Error, "Kubeshark API server was not ready in time")
cancel()
}
case <-ctx.Done():
log.Printf("Watching API Server pod loop, ctx done")
return
}
}
}
func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", "front"))
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, podWatchHelper)
isPodReady := false
apiServerTimeoutSec := config.GetIntEnvConfig(config.ApiServerTimeoutSec, 120)
timeAfter := time.After(time.Duration(apiServerTimeoutSec) * time.Second)
for {
select {
case wEvent, ok := <-eventChan:
if !ok {
eventChan = nil
continue
}
switch wEvent.Type {
case kubernetes.EventAdded:
log.Printf("Watching API Server pod loop, added")
case kubernetes.EventDeleted:
log.Printf("%s removed", "front")
cancel()
return
case kubernetes.EventModified:
modifiedPod, err := wEvent.ToPod()
if err != nil {
log.Printf(uiUtils.Error, err)
cancel()
continue
}
log.Printf("Watching API Server pod loop, modified: %v, containers statuses: %v", modifiedPod.Status.Phase, modifiedPod.Status.ContainerStatuses)
if modifiedPod.Status.Phase == core.PodRunning && !isPodReady {
isPodReady = true
frontPodReady = true
}
if !proxyDone && apiServerPodReady && frontPodReady {
proxyDone = true
postFrontStarted(ctx, kubernetesProvider, cancel)
}
case kubernetes.EventBookmark:
break
case kubernetes.EventError:
break
}
case err, ok := <-errorChan:
if !ok {
errorChan = nil
continue
}
log.Printf("[ERROR] Agent creation, watching %v namespace, error: %v", config.Config.KubesharkResourcesNamespace, err)
cancel()
case <-timeAfter:
if !isPodReady {
log.Printf(uiUtils.Error, "Kubeshark API server was not ready in time")
cancel()
}
case <-ctx.Done():
log.Printf("Watching API Server pod loop, ctx done")
return
}
}
}
func watchApiServerEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.ApiServerPodName))
eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, podExactRegex, "pod")
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, eventWatchHelper)
for {
select {
case wEvent, ok := <-eventChan:
if !ok {
eventChan = nil
continue
}
event, err := wEvent.ToEvent()
if err != nil {
log.Printf("[ERROR] parsing Kubeshark resource event: %+v", err)
continue
}
if state.startTime.After(event.CreationTimestamp.Time) {
continue
}
log.Printf(
"Watching API server events loop, event %s, time: %v, resource: %s (%s), reason: %s, note: %s",
event.Name,
event.CreationTimestamp.Time,
event.Regarding.Name,
event.Regarding.Kind,
event.Reason,
event.Note,
)
switch event.Reason {
case "FailedScheduling", "Failed":
log.Printf(uiUtils.Error, fmt.Sprintf("Kubeshark API Server status: %s - %s", event.Reason, event.Note))
cancel()
}
case err, ok := <-errorChan:
if !ok {
errorChan = nil
continue
}
log.Printf("[Error] Watching API server events loop, error: %+v", err)
case <-ctx.Done():
log.Printf("Watching API server events loop, ctx done")
return
}
}
}
func postApiServerStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, "kubeshark-api-server", config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, "/echo")
if err := startTapperSyncer(ctx, cancel, kubernetesProvider, state.targetNamespaces, state.startTime); err != nil {
log.Printf(uiUtils.Error, fmt.Sprintf("Error starting kubeshark tapper syncer: %v", errormessage.FormatError(err)))
cancel()
}
url := kubernetes.GetLocalhostOnPort(config.Config.Hub.PortForward.SrcPort)
log.Printf("API Server is available at %s", url)
}
func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, "front", config.Config.Front.PortForward.SrcPort, config.Config.Front.PortForward.DstPort, "")
url := kubernetes.GetLocalhostOnPort(config.Config.Front.PortForward.SrcPort)
log.Printf("Kubeshark is available at %s", url)
if !config.Config.HeadlessMode {
uiUtils.OpenBrowser(url)
}
}
func getNamespaces(kubernetesProvider *kubernetes.Provider) []string {
if config.Config.Tap.AllNamespaces {
return []string{kubernetes.K8sAllNamespaces}
} else if len(config.Config.Tap.Namespaces) > 0 {
return utils.Unique(config.Config.Tap.Namespaces)
} else {
currentNamespace, err := kubernetesProvider.CurrentNamespace()
if err != nil {
log.Fatalf(uiUtils.Red, fmt.Sprintf("error getting current namespace: %+v", err))
}
return []string{currentNamespace}
}
}

42
cmd/version.go Normal file
View File

@@ -0,0 +1,42 @@
package cmd
import (
"log"
"strconv"
"time"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/kubeshark"
"github.com/spf13/cobra"
)
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print version info",
RunE: func(cmd *cobra.Command, args []string) error {
if config.Config.Version.DebugInfo {
timeStampInt, _ := strconv.ParseInt(kubeshark.BuildTimestamp, 10, 0)
log.Printf("Version: %s \nBranch: %s (%s)", kubeshark.Ver, kubeshark.Branch, kubeshark.GitCommitHash)
log.Printf("Build Time: %s (%s)", kubeshark.BuildTimestamp, time.Unix(timeStampInt, 0))
} else {
log.Printf("Version: %s (%s)", kubeshark.Ver, kubeshark.Branch)
}
return nil
},
}
func init() {
rootCmd.AddCommand(versionCmd)
defaultVersionConfig := configStructs.VersionConfig{}
if err := defaults.Set(&defaultVersionConfig); err != nil {
log.Print(err)
}
versionCmd.Flags().BoolP(configStructs.DebugInfoVersionName, "d", defaultVersionConfig.DebugInfo, "Provide all information about version")
}

34
cmd/view.go Normal file
View File

@@ -0,0 +1,34 @@
package cmd
import (
"log"
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/spf13/cobra"
)
var viewCmd = &cobra.Command{
Use: "view",
Short: "Open GUI in browser",
RunE: func(cmd *cobra.Command, args []string) error {
runKubesharkView()
return nil
},
}
func init() {
rootCmd.AddCommand(viewCmd)
defaultViewConfig := configStructs.ViewConfig{}
if err := defaults.Set(&defaultViewConfig); err != nil {
log.Print(err)
}
viewCmd.Flags().Uint16P(configStructs.GuiPortViewName, "p", defaultViewConfig.GuiPort, "Provide a custom port for the web interface webserver")
viewCmd.Flags().StringP(configStructs.UrlViewName, "u", defaultViewConfig.Url, "Provide a custom host")
if err := viewCmd.Flags().MarkHidden(configStructs.UrlViewName); err != nil {
log.Print(err)
}
}

65
cmd/viewRunner.go Normal file
View File

@@ -0,0 +1,65 @@
package cmd
import (
"context"
"fmt"
"log"
"net/http"
"github.com/kubeshark/kubeshark/utils"
"github.com/kubeshark/kubeshark/apiserver"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/uiUtils"
)
func runKubesharkView() {
kubernetesProvider, err := getKubernetesProviderForCli()
if err != nil {
return
}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
url := config.Config.View.Url
if url == "" {
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.ApiServerPodName)
if err != nil {
log.Printf("Failed to found kubeshark service %v", err)
cancel()
return
}
if !exists {
log.Printf("%s service not found, you should run `kubeshark tap` command first", kubernetes.ApiServerPodName)
cancel()
return
}
url = kubernetes.GetLocalhostOnPort(config.Config.Front.PortForward.SrcPort)
response, err := http.Get(fmt.Sprintf("%s/", url))
if err == nil && response.StatusCode == 200 {
log.Printf("Found a running service %s and open port %d", kubernetes.ApiServerPodName, config.Config.Front.PortForward.SrcPort)
return
}
log.Printf("Establishing connection to k8s cluster...")
startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, "front", config.Config.Front.PortForward.SrcPort, config.Config.Front.PortForward.DstPort, "")
}
apiServerProvider := apiserver.NewProvider(url, apiserver.DefaultRetries, apiserver.DefaultTimeout)
if err := apiServerProvider.TestConnection(""); err != nil {
log.Printf(uiUtils.Error, "Couldn't connect to API server.")
return
}
log.Printf("Kubeshark is available at %s", url)
if !config.Config.HeadlessMode {
uiUtils.OpenBrowser(url)
}
utils.WaitForFinish(ctx, cancel)
}