Added post install check (#630)

This commit is contained in:
RoyUP9 2022-01-23 16:52:58 +02:00 committed by GitHub
parent bcea6cdc49
commit 569f8ae143
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 323 additions and 54 deletions

20
cli/cmd/check.go Normal file
View File

@ -0,0 +1,20 @@
package cmd
import (
"github.com/spf13/cobra"
"github.com/up9inc/mizu/cli/telemetry"
)
var checkCmd = &cobra.Command{
Use: "check",
Short: "Check the Mizu installation for potential problems",
RunE: func(cmd *cobra.Command, args []string) error {
go telemetry.ReportRun("check", nil)
runMizuCheck()
return nil
},
}
func init() {
rootCmd.AddCommand(checkCmd)
}

186
cli/cmd/checkRunner.go Normal file
View File

@ -0,0 +1,186 @@
package cmd
import (
"context"
"fmt"
"github.com/up9inc/mizu/cli/apiserver"
"github.com/up9inc/mizu/cli/config"
"github.com/up9inc/mizu/cli/uiUtils"
"github.com/up9inc/mizu/shared/kubernetes"
"github.com/up9inc/mizu/shared/logger"
"github.com/up9inc/mizu/shared/semver"
"net/http"
)
func runMizuCheck() {
logger.Log.Infof("Mizu install checks\n===================")
ctx, cancel := context.WithCancel(context.Background())
defer cancel() // cancel will be called when this function exits
kubernetesProvider, kubernetesVersion, checkPassed := checkKubernetesApi()
if checkPassed {
checkPassed = checkKubernetesVersion(kubernetesVersion)
}
var isInstallCommand bool
if checkPassed {
checkPassed, isInstallCommand = checkMizuMode(ctx, kubernetesProvider)
}
if checkPassed {
checkPassed = checkAllResourcesExist(ctx, kubernetesProvider, isInstallCommand)
}
if checkPassed {
checkPassed = checkServerConnection(kubernetesProvider, cancel)
}
if checkPassed {
logger.Log.Infof("\nStatus check results are %v", fmt.Sprintf(uiUtils.Green, "√"))
} else {
logger.Log.Errorf("\nStatus check results are %v", fmt.Sprintf(uiUtils.Red, "✗"))
}
}
func checkKubernetesApi() (*kubernetes.Provider, *semver.SemVersion, bool) {
logger.Log.Infof("\nkubernetes-api\n--------------------")
kubernetesProvider, err := kubernetes.NewProvider(config.Config.KubeConfigPath())
if err != nil {
logger.Log.Errorf("%v can't initialize the client, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return nil, nil, false
}
logger.Log.Infof("%v can initialize the client", fmt.Sprintf(uiUtils.Green, "√"))
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
logger.Log.Errorf("%v can't query the Kubernetes API, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return nil, nil, false
}
logger.Log.Infof("%v can query the Kubernetes API", fmt.Sprintf(uiUtils.Green, "√"))
return kubernetesProvider, kubernetesVersion, true
}
func checkMizuMode(ctx context.Context, kubernetesProvider *kubernetes.Provider) (bool, bool) {
logger.Log.Infof("\nmizu-mode\n--------------------")
if exist, err := kubernetesProvider.DoesDeploymentExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName); err != nil {
logger.Log.Errorf("%v can't check mizu command, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false, false
} else if exist {
logger.Log.Infof("%v mizu running with install command", fmt.Sprintf(uiUtils.Green, "√"))
return true, true
} else {
logger.Log.Infof("%v mizu running with tap command", fmt.Sprintf(uiUtils.Green, "√"))
return true, false
}
}
func checkKubernetesVersion(kubernetesVersion *semver.SemVersion) bool {
logger.Log.Infof("\nkubernetes-version\n--------------------")
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
logger.Log.Errorf("%v not running the minimum Kubernetes API version, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
logger.Log.Infof("%v is running the minimum Kubernetes API version", fmt.Sprintf(uiUtils.Green, "√"))
return true
}
func checkServerConnection(kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) bool {
logger.Log.Infof("\nmizu-connectivity\n--------------------")
serverUrl := GetApiServerUrl()
if response, err := http.Get(fmt.Sprintf("%s/", serverUrl)); err != nil || response.StatusCode != 200 {
startProxyReportErrorIfAny(kubernetesProvider, cancel)
}
apiServerProvider := apiserver.NewProvider(serverUrl, apiserver.DefaultRetries, apiserver.DefaultTimeout)
if err := apiServerProvider.TestConnection(); err != nil {
logger.Log.Errorf("%v couldn't connect to API server, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), err)
return false
}
logger.Log.Infof("%v connected successfully to API server", fmt.Sprintf(uiUtils.Green, "√"))
return true
}
func checkAllResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider, isInstallCommand bool) bool {
logger.Log.Infof("\nmizu-existence\n--------------------")
exist, err := kubernetesProvider.DoesNamespaceExist(ctx, config.Config.MizuResourcesNamespace)
allResourcesExist := checkResourceExist(config.Config.MizuResourcesNamespace, "namespace", exist, err)
exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ConfigMapName)
allResourcesExist = checkResourceExist(kubernetes.ConfigMapName, "config map", exist, err)
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ServiceAccountName)
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err)
if config.Config.IsNsRestrictedMode() {
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.RoleName)
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err)
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.RoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err)
} else {
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleName, "cluster role", exist, err)
exist, err = kubernetesProvider.DoesClusterRoleBindingExist(ctx, kubernetes.ClusterRoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err)
}
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName)
allResourcesExist = checkResourceExist(kubernetes.ApiServerPodName, "service", exist, err)
if isInstallCommand {
allResourcesExist = checkInstallResourcesExist(ctx, kubernetesProvider)
} else {
allResourcesExist = checkTapResourcesExist(ctx, kubernetesProvider)
}
return allResourcesExist
}
func checkInstallResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
exist, err := kubernetesProvider.DoesRoleExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.DaemonRoleName)
installResourcesExist := checkResourceExist(kubernetes.DaemonRoleName, "role", exist, err)
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.DaemonRoleBindingName)
installResourcesExist = checkResourceExist(kubernetes.DaemonRoleBindingName, "role binding", exist, err)
exist, err = kubernetesProvider.DoesPersistentVolumeClaimExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.PersistentVolumeClaimName)
installResourcesExist = checkResourceExist(kubernetes.PersistentVolumeClaimName, "persistent volume claim", exist, err)
exist, err = kubernetesProvider.DoesDeploymentExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName)
installResourcesExist = checkResourceExist(kubernetes.ApiServerPodName, "deployment", exist, err)
return installResourcesExist
}
func checkTapResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
exist, err := kubernetesProvider.DoesPodExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName)
tapResourcesExist := checkResourceExist(kubernetes.ApiServerPodName, "pod", exist, err)
return tapResourcesExist
}
func checkResourceExist(resourceName string, resourceType string, exist bool, err error) bool {
if err != nil {
logger.Log.Errorf("%v error checking if '%v' %v exists, err: %v", fmt.Sprintf(uiUtils.Red, "✗"), resourceName, resourceType, err)
return false
} else if !exist {
logger.Log.Errorf("%v '%v' %v doesn't exist", fmt.Sprintf(uiUtils.Red, "✗"), resourceName, resourceType)
return false
} else {
logger.Log.Infof("%v '%v' %v exists", fmt.Sprintf(uiUtils.Green, "√"), resourceName, resourceType)
}
return true
}

View File

@ -64,6 +64,23 @@ func getKubernetesProviderForCli() (*kubernetes.Provider, error) {
handleKubernetesProviderError(err)
return nil, err
}
if err := kubernetesProvider.ValidateNotProxy(); err != nil {
handleKubernetesProviderError(err)
return nil, err
}
kubernetesVersion, err := kubernetesProvider.GetKubernetesVersion()
if err != nil {
handleKubernetesProviderError(err)
return nil, err
}
if err := kubernetes.ValidateKubernetesVersion(kubernetesVersion); err != nil {
handleKubernetesProviderError(err)
return nil, err
}
return kubernetesProvider, nil
}

View File

@ -27,7 +27,7 @@ func runMizuView() {
url := config.Config.View.Url
if url == "" {
exists, err := kubernetesProvider.DoesServicesExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName)
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName)
if err != nil {
logger.Log.Errorf("Failed to found mizu service %v", err)
cancel()

View File

@ -76,14 +76,6 @@ func NewProvider(kubeConfigPath string) (*Provider, error) {
"you can set alternative kube config file path by adding the kube-config-path field to the mizu config file, err: %w", kubeConfigPath, err)
}
if err := validateNotProxy(kubernetesConfig, restClientConfig); err != nil {
return nil, err
}
if err := validateKubernetesVersion(clientSet); err != nil {
return nil, err
}
return &Provider{
clientSet: clientSet,
kubernetesConfig: kubernetesConfig,
@ -419,11 +411,61 @@ func (provider *Provider) CreateService(ctx context.Context, namespace string, s
return provider.clientSet.CoreV1().Services(namespace).Create(ctx, &service, metav1.CreateOptions{})
}
func (provider *Provider) DoesServicesExist(ctx context.Context, namespace string, name string) (bool, error) {
func (provider *Provider) DoesNamespaceExist(ctx context.Context, name string) (bool, error) {
namespaceResource, err := provider.clientSet.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(namespaceResource, err)
}
func (provider *Provider) DoesConfigMapExist(ctx context.Context, namespace string, name string) (bool, error) {
configMapResource, err := provider.clientSet.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(configMapResource, err)
}
func (provider *Provider) DoesServiceAccountExist(ctx context.Context, namespace string, name string) (bool, error) {
serviceAccountResource, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(serviceAccountResource, err)
}
func (provider *Provider) DoesPersistentVolumeClaimExist(ctx context.Context, namespace string, name string) (bool, error) {
persistentVolumeClaimResource, err := provider.clientSet.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(persistentVolumeClaimResource, err)
}
func (provider *Provider) DoesDeploymentExist(ctx context.Context, namespace string, name string) (bool, error) {
deploymentResource, err := provider.clientSet.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(deploymentResource, err)
}
func (provider *Provider) DoesPodExist(ctx context.Context, namespace string, name string) (bool, error) {
podResource, err := provider.clientSet.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(podResource, err)
}
func (provider *Provider) DoesServiceExist(ctx context.Context, namespace string, name string) (bool, error) {
serviceResource, err := provider.clientSet.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(serviceResource, err)
}
func (provider *Provider) DoesClusterRoleExist(ctx context.Context, name string) (bool, error) {
clusterRoleResource, err := provider.clientSet.RbacV1().ClusterRoles().Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(clusterRoleResource, err)
}
func (provider *Provider) DoesClusterRoleBindingExist(ctx context.Context, name string) (bool, error) {
clusterRoleBindingResource, err := provider.clientSet.RbacV1().ClusterRoleBindings().Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(clusterRoleBindingResource, err)
}
func (provider *Provider) DoesRoleExist(ctx context.Context, namespace string, name string) (bool, error) {
roleResource, err := provider.clientSet.RbacV1().Roles(namespace).Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(roleResource, err)
}
func (provider *Provider) DoesRoleBindingExist(ctx context.Context, namespace string, name string) (bool, error) {
roleBindingResource, err := provider.clientSet.RbacV1().RoleBindings(namespace).Get(ctx, name, metav1.GetOptions{})
return provider.doesResourceExist(roleBindingResource, err)
}
func (provider *Provider) doesResourceExist(resource interface{}, err error) (bool, error) {
// Getting NotFound error is the expected behavior when a resource does not exist.
if k8serrors.IsNotFound(err) {
@ -1045,6 +1087,45 @@ func (provider *Provider) CreatePersistentVolumeClaim(ctx context.Context, names
return provider.clientSet.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, volumeClaim, metav1.CreateOptions{})
}
// ValidateNotProxy We added this after a customer tried to run mizu from lens, which used len's kube config, which have cluster server configuration, which points to len's local proxy.
// The workaround was to use the user's local default kube config.
// For now - we are blocking the option to run mizu through a proxy to k8s server
func (provider *Provider) ValidateNotProxy() error {
kubernetesUrl, err := url.Parse(provider.clientConfig.Host)
if err != nil {
logger.Log.Debugf("ValidateNotProxy - error while parsing kubernetes host, err: %v", err)
return nil
}
restProxyClientConfig, _ := provider.kubernetesConfig.ClientConfig()
restProxyClientConfig.Host = kubernetesUrl.Host
clientProxySet, err := getClientSet(restProxyClientConfig)
if err == nil {
proxyServerVersion, err := clientProxySet.ServerVersion()
if err != nil {
return nil
}
if *proxyServerVersion == (version.Info{}) {
return &ClusterBehindProxyError{}
}
}
return nil
}
func (provider *Provider) GetKubernetesVersion() (*semver.SemVersion, error) {
serverVersion, err := provider.clientSet.ServerVersion()
if err != nil {
logger.Log.Debugf("error while getting kubernetes server version, err: %v", err)
return nil, err
}
serverVersionSemVer := semver.SemVersion(serverVersion.GitVersion)
return &serverVersionSemVer, nil
}
func getClientSet(config *restclient.Config) (*kubernetes.Clientset, error) {
clientSet, err := kubernetes.NewForConfig(config)
if err != nil {
@ -1054,6 +1135,15 @@ func getClientSet(config *restclient.Config) (*kubernetes.Clientset, error) {
return clientSet, nil
}
func ValidateKubernetesVersion(serverVersionSemVer *semver.SemVersion) error {
minKubernetesServerVersionSemVer := semver.SemVersion(MinKubernetesServerVersion)
if minKubernetesServerVersionSemVer.GreaterThan(*serverVersionSemVer) {
return fmt.Errorf("kubernetes server version %v is not supported, supporting only kubernetes server version of %v or higher", serverVersionSemVer, MinKubernetesServerVersion)
}
return nil
}
func loadKubernetesConfiguration(kubeConfigPath string) clientcmd.ClientConfig {
logger.Log.Debugf("Using kube config %s", kubeConfigPath)
configPathList := filepath.SplitList(kubeConfigPath)
@ -1075,47 +1165,3 @@ func loadKubernetesConfiguration(kubeConfigPath string) clientcmd.ClientConfig {
func isPodRunning(pod *core.Pod) bool {
return pod.Status.Phase == core.PodRunning
}
// We added this after a customer tried to run mizu from lens, which used len's kube config, which have cluster server configuration, which points to len's local proxy.
// The workaround was to use the user's local default kube config.
// For now - we are blocking the option to run mizu through a proxy to k8s server
func validateNotProxy(kubernetesConfig clientcmd.ClientConfig, restClientConfig *restclient.Config) error {
kubernetesUrl, err := url.Parse(restClientConfig.Host)
if err != nil {
logger.Log.Debugf("validateNotProxy - error while parsing kubernetes host, err: %v", err)
return nil
}
restProxyClientConfig, _ := kubernetesConfig.ClientConfig()
restProxyClientConfig.Host = kubernetesUrl.Host
clientProxySet, err := getClientSet(restProxyClientConfig)
if err == nil {
proxyServerVersion, err := clientProxySet.ServerVersion()
if err != nil {
return nil
}
if *proxyServerVersion == (version.Info{}) {
return &ClusterBehindProxyError{}
}
}
return nil
}
func validateKubernetesVersion(clientSet *kubernetes.Clientset) error {
serverVersion, err := clientSet.ServerVersion()
if err != nil {
logger.Log.Debugf("error while getting kubernetes server version, err: %v", err)
return nil
}
serverVersionSemVer := semver.SemVersion(serverVersion.GitVersion)
minKubernetesServerVersionSemVer := semver.SemVersion(MinKubernetesServerVersion)
if minKubernetesServerVersionSemVer.GreaterThan(serverVersionSemVer) {
return fmt.Errorf("kubernetes server version %v is not supported, supporting only kubernetes server version of %v or higher", serverVersion.GitVersion, MinKubernetesServerVersion)
}
return nil
}