🐛 Fix the check command

This commit is contained in:
M. Mert Yildiran 2022-11-29 06:03:42 +03:00
parent cae3b4fe17
commit 0e31fce585
No known key found for this signature in database
GPG Key ID: DA5D6DCBB758A461
10 changed files with 45 additions and 108 deletions

View File

@ -1,9 +1,6 @@
package cmd
import (
"github.com/creasty/defaults"
"github.com/kubeshark/kubeshark/config/configStructs"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
@ -18,12 +15,4 @@ var checkCmd = &cobra.Command{
func init() {
rootCmd.AddCommand(checkCmd)
defaultCheckConfig := configStructs.CheckConfig{}
if err := defaults.Set(&defaultCheckConfig); err != nil {
log.Error().Err(err).Send()
}
checkCmd.Flags().Bool(configStructs.PreTapCheckName, defaultCheckConfig.PreTap, "Check pre-tap Kubeshark installation for potential problems")
checkCmd.Flags().Bool(configStructs.ImagePullCheckName, defaultCheckConfig.ImagePull, "Test connectivity to container image registry by creating and removing a temporary pod in 'default' namespace")
}

View File

@ -14,7 +14,7 @@ import (
)
func ImagePullInCluster(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Msg("[image-pull-in-cluster]")
log.Info().Str("procedure", "image-pull-in-cluster").Msg("Checking:")
namespace := "default"
podName := "kubeshark-test"
@ -92,6 +92,8 @@ func checkImagePulled(ctx context.Context, kubernetesProvider *kubernetes.Provid
}
func createImagePullInClusterPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespace string, podName string) error {
image := "kubeshark/worker:latest"
log.Info().Str("image", image).Msg("Testing image pull:")
var zero int64
pod := &core.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -101,7 +103,7 @@ func createImagePullInClusterPod(ctx context.Context, kubernetesProvider *kubern
Containers: []core.Container{
{
Name: "probe",
Image: "kubeshark/busybox",
Image: "kubeshark/worker:latest",
ImagePullPolicy: "Always",
Command: []string{"cat"},
Stdin: true,

View File

@ -8,7 +8,7 @@ import (
)
func KubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) {
log.Info().Msg("[kubernetes-api]")
log.Info().Str("procedure", "kubernetes-api").Msg("Checking:")
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath(), config.Config.KubeContext)
if err != nil {

View File

@ -13,7 +13,7 @@ import (
)
func TapKubernetesPermissions(ctx context.Context, embedFS embed.FS, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Msg("[kubernetes-permissions]")
log.Info().Str("procedure", "kubernetes-permissions").Msg("Checking:")
var filePath string
if config.Config.IsNsRestrictedMode() {

View File

@ -10,7 +10,7 @@ import (
)
func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
log.Info().Msg("[k8s-components]")
log.Info().Str("procedure", "k8s-components").Msg("Checking:")
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.ResourcesNamespace)
allResourcesExist := checkResourceExist(config.Config.ResourcesNamespace, "namespace", exist, err)

View File

@ -1,19 +1,22 @@
package check
import (
"fmt"
"github.com/kubeshark/kubeshark/kubernetes"
"github.com/kubeshark/kubeshark/semver"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
)
func KubernetesVersion(kubernetesVersion *semver.SemVersion) bool {
log.Info().Msg("[kubernetes-api]")
log.Info().Str("procedure", "kubernetes-version").Msg("Checking:")
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
log.Error().Err(err).Msg("Not running the minimum Kubernetes API version!")
log.Error().Str("k8s-version", string(*kubernetesVersion)).Err(err).Msg(fmt.Sprintf(utils.Red, "The cluster does not have the minimum required Kubernetes API version!"))
return false
}
log.Info().Msg("Running the minimum Kubernetes API version")
log.Info().Str("k8s-version", string(*kubernetesVersion)).Msg("Minimum required Kubernetes API version is passed.")
return true
}

View File

@ -1,9 +1,6 @@
package check
import (
"context"
"regexp"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/internal/connect"
"github.com/kubeshark/kubeshark/kubernetes"
@ -11,72 +8,33 @@ import (
)
func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
log.Info().Msg("[hub-connectivity]")
log.Info().Str("procedure", "server-connectivity").Msg("Checking:")
serverUrl := kubernetes.GetLocalhostOnPort(config.Config.Hub.PortForward.SrcPort)
var connectedToHub, connectedToFront bool
connector := connect.NewConnector(serverUrl, 1, connect.DefaultTimeout)
if err := connector.TestConnection(""); err == nil {
log.Info().Msg("Found Kubeshark server tunnel available and connected successfully to Hub!")
return true
}
connectedToHub := false
if err := checkProxy(serverUrl, kubernetesProvider); err != nil {
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Hub.PortForward.SrcPort), "/echo", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Hub using proxy!")
} else {
connectedToHub = true
log.Info().Msg("Connected successfully to Hub using proxy.")
}
if err := checkPortForward(serverUrl, kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Hub using port-forward!")
if err := checkProxy(kubernetes.GetLocalhostOnPort(config.Config.Front.PortForward.SrcPort), "", kubernetesProvider); err != nil {
log.Error().Err(err).Msg("Couldn't connect to Front using proxy!")
} else {
connectedToHub = true
log.Info().Msg("Connected successfully to Hub using port-forward.")
connectedToFront = true
log.Info().Msg("Connected successfully to Front using proxy.")
}
return connectedToHub
return connectedToHub && connectedToFront
}
func checkProxy(serverUrl string, kubernetesProvider *kubernetes.Provider) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, config.Config.ResourcesNamespace, kubernetes.HubServiceName, cancel)
if err != nil {
return err
}
func checkProxy(serverUrl string, path string, kubernetesProvider *kubernetes.Provider) error {
log.Info().Str("url", serverUrl).Msg("Connecting:")
connector := connect.NewConnector(serverUrl, connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(""); err != nil {
if err := connector.TestConnection(path); err != nil {
return err
}
if err := httpServer.Shutdown(ctx); err != nil {
log.Error().Err(err).Msg("While stopping the proxy!")
}
return nil
}
func checkPortForward(serverUrl string, kubernetesProvider *kubernetes.Provider) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
podRegex, _ := regexp.Compile(kubernetes.HubPodName)
forwarder, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.ResourcesNamespace, podRegex, config.Config.Tap.GuiPort, config.Config.Tap.GuiPort, ctx, cancel)
if err != nil {
return err
}
connector := connect.NewConnector(serverUrl, connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(""); err != nil {
return err
}
forwarder.Close()
return nil
}

View File

@ -4,9 +4,9 @@ import (
"context"
"embed"
"fmt"
"os"
"github.com/kubeshark/kubeshark/cmd/check"
"github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/utils"
"github.com/rs/zerolog/log"
)
@ -17,7 +17,7 @@ var (
)
func runKubesharkCheck() {
log.Info().Msg("Kubeshark checks...")
log.Info().Msg("Checking the deployment...")
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel will be called when this function exits
@ -28,31 +28,28 @@ func runKubesharkCheck() {
checkPassed = check.KubernetesVersion(kubernetesVersion)
}
if config.Config.Check.PreTap || config.Config.Check.ImagePull {
if config.Config.Check.PreTap {
if checkPassed {
checkPassed = check.TapKubernetesPermissions(ctx, embedFS, kubernetesProvider)
}
}
if config.Config.Check.ImagePull {
if checkPassed {
checkPassed = check.ImagePullInCluster(ctx, kubernetesProvider)
}
}
} else {
if checkPassed {
checkPassed = check.KubernetesResources(ctx, kubernetesProvider)
}
if checkPassed {
checkPassed = check.ServerConnection(kubernetesProvider)
}
if checkPassed {
checkPassed = check.TapKubernetesPermissions(ctx, embedFS, kubernetesProvider)
}
if checkPassed {
log.Info().Msg(fmt.Sprintf("Status check results are %v", fmt.Sprintf(utils.Green, "√")))
checkPassed = check.ImagePullInCluster(ctx, kubernetesProvider)
}
if checkPassed {
checkPassed = check.KubernetesResources(ctx, kubernetesProvider)
}
if checkPassed {
checkPassed = check.ServerConnection(kubernetesProvider)
}
if checkPassed {
log.Info().Msg(fmt.Sprintf(utils.Green, "All checks are passed."))
} else {
log.Info().Msg(fmt.Sprintf("Status check results are %v", fmt.Sprintf(utils.Red, "✗")))
log.Error().
Str("command1", "kubeshark clean").
Str("command2", "kubeshark tap").
Msg(fmt.Sprintf(utils.Red, "There are issues in your deployment! Run these commands:"))
os.Exit(1)
}
}

View File

@ -58,7 +58,6 @@ type ConfigStruct struct {
Hub HubConfig `yaml:"hub"`
Front FrontConfig `yaml:"front"`
Tap configStructs.TapConfig `yaml:"tap"`
Check configStructs.CheckConfig `yaml:"check"`
View configStructs.ViewConfig `yaml:"view"`
Logs configStructs.LogsConfig `yaml:"logs"`
Config configStructs.ConfigConfig `yaml:"config,omitempty"`

View File

@ -1,11 +0,0 @@
package configStructs
const (
PreTapCheckName = "pre-tap"
ImagePullCheckName = "image-pull"
)
type CheckConfig struct {
PreTap bool `yaml:"pre-tap"`
ImagePull bool `yaml:"image-pull"`
}