Fixing minor bugs and remove unused dependency (#714)

This commit is contained in:
Igor Gov 2022-01-30 08:51:17 +02:00 committed by GitHub
parent 3bab83754f
commit 5536e5bb44
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 19 additions and 19 deletions

View File

@ -12,7 +12,6 @@ require (
github.com/go-playground/locales v0.13.0
github.com/go-playground/universal-translator v0.17.0
github.com/go-playground/validator/v10 v10.5.0
github.com/google/martian v2.1.0+incompatible
github.com/google/uuid v1.1.2
github.com/gorilla/websocket v1.4.2
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7

View File

@ -3,13 +3,14 @@ package cmd
import (
"context"
"fmt"
"regexp"
"github.com/up9inc/mizu/cli/apiserver"
"github.com/up9inc/mizu/cli/config"
"github.com/up9inc/mizu/cli/uiUtils"
"github.com/up9inc/mizu/shared/kubernetes"
"github.com/up9inc/mizu/shared/logger"
"github.com/up9inc/mizu/shared/semver"
"regexp"
)
func runMizuCheck() {
@ -169,32 +170,32 @@ func checkAllResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.
allResourcesExist := checkResourceExist(config.Config.MizuResourcesNamespace, "namespace", exist, err)
exist, err = kubernetesProvider.DoesConfigMapExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ConfigMapName)
allResourcesExist = checkResourceExist(kubernetes.ConfigMapName, "config map", exist, err)
allResourcesExist = allResourcesExist && checkResourceExist(kubernetes.ConfigMapName, "config map", exist, err)
exist, err = kubernetesProvider.DoesServiceAccountExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ServiceAccountName)
allResourcesExist = checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err)
allResourcesExist = allResourcesExist && checkResourceExist(kubernetes.ServiceAccountName, "service account", exist, err)
if config.Config.IsNsRestrictedMode() {
exist, err = kubernetesProvider.DoesRoleExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.RoleName)
allResourcesExist = checkResourceExist(kubernetes.RoleName, "role", exist, err)
allResourcesExist = allResourcesExist && checkResourceExist(kubernetes.RoleName, "role", exist, err)
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.RoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err)
allResourcesExist = allResourcesExist && checkResourceExist(kubernetes.RoleBindingName, "role binding", exist, err)
} else {
exist, err = kubernetesProvider.DoesClusterRoleExist(ctx, kubernetes.ClusterRoleName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleName, "cluster role", exist, err)
allResourcesExist = allResourcesExist && checkResourceExist(kubernetes.ClusterRoleName, "cluster role", exist, err)
exist, err = kubernetesProvider.DoesClusterRoleBindingExist(ctx, kubernetes.ClusterRoleBindingName)
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err)
allResourcesExist = allResourcesExist && checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err)
}
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName)
allResourcesExist = checkResourceExist(kubernetes.ApiServerPodName, "service", exist, err)
allResourcesExist = allResourcesExist && checkResourceExist(kubernetes.ApiServerPodName, "service", exist, err)
if isInstallCommand {
allResourcesExist = checkInstallResourcesExist(ctx, kubernetesProvider)
allResourcesExist = allResourcesExist && checkInstallResourcesExist(ctx, kubernetesProvider)
} else {
allResourcesExist = checkTapResourcesExist(ctx, kubernetesProvider)
allResourcesExist = allResourcesExist && checkTapResourcesExist(ctx, kubernetesProvider)
}
return allResourcesExist
@ -205,13 +206,13 @@ func checkInstallResourcesExist(ctx context.Context, kubernetesProvider *kuberne
installResourcesExist := checkResourceExist(kubernetes.DaemonRoleName, "role", exist, err)
exist, err = kubernetesProvider.DoesRoleBindingExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.DaemonRoleBindingName)
installResourcesExist = checkResourceExist(kubernetes.DaemonRoleBindingName, "role binding", exist, err)
installResourcesExist = installResourcesExist && checkResourceExist(kubernetes.DaemonRoleBindingName, "role binding", exist, err)
exist, err = kubernetesProvider.DoesPersistentVolumeClaimExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.PersistentVolumeClaimName)
installResourcesExist = checkResourceExist(kubernetes.PersistentVolumeClaimName, "persistent volume claim", exist, err)
installResourcesExist = installResourcesExist && checkResourceExist(kubernetes.PersistentVolumeClaimName, "persistent volume claim", exist, err)
exist, err = kubernetesProvider.DoesDeploymentExist(ctx, config.Config.MizuResourcesNamespace, kubernetes.ApiServerPodName)
installResourcesExist = checkResourceExist(kubernetes.ApiServerPodName, "deployment", exist, err)
installResourcesExist = installResourcesExist && checkResourceExist(kubernetes.ApiServerPodName, "deployment", exist, err)
return installResourcesExist
}

View File

@ -23,7 +23,7 @@ var logsCmd = &cobra.Command{
if err != nil {
return nil
}
ctx, _ := context.WithCancel(context.Background())
ctx := context.Background()
if validationErr := config.Config.Logs.Validate(); validationErr != nil {
return errormessage.FormatError(validationErr)

View File

@ -342,7 +342,7 @@ func watchApiServerPod(ctx context.Context, kubernetesProvider *kubernetes.Provi
if modifiedPod.Status.Phase == core.PodRunning && !isPodReady {
isPodReady = true
postApiServerStarted(ctx, kubernetesProvider, cancel, err)
postApiServerStarted(ctx, kubernetesProvider, cancel)
}
case kubernetes.EventBookmark:
break
@ -405,7 +405,7 @@ func watchApiServerEvents(ctx context.Context, kubernetesProvider *kubernetes.Pr
case "FailedScheduling", "Failed":
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Mizu API Server status: %s - %s", event.Reason, event.Note))
cancel()
break
}
case err, ok := <-errorChan:
if !ok {
@ -421,11 +421,11 @@ func watchApiServerEvents(ctx context.Context, kubernetesProvider *kubernetes.Pr
}
}
func postApiServerStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, err error) {
func postApiServerStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel)
options, _ := getMizuApiFilteringOptions()
if err = startTapperSyncer(ctx, cancel, kubernetesProvider, state.targetNamespaces, *options, state.startTime); err != nil {
if err := startTapperSyncer(ctx, cancel, kubernetesProvider, state.targetNamespaces, *options, state.startTime); err != nil {
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error starting mizu tapper syncer: %v", err))
cancel()
}