mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-09-13 13:22:01 +00:00
Added post install check (#630)
This commit is contained in:
@@ -76,14 +76,6 @@ func NewProvider(kubeConfigPath string) (*Provider, error) {
|
||||
"you can set alternative kube config file path by adding the kube-config-path field to the mizu config file, err: %w", kubeConfigPath, err)
|
||||
}
|
||||
|
||||
if err := validateNotProxy(kubernetesConfig, restClientConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := validateKubernetesVersion(clientSet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Provider{
|
||||
clientSet: clientSet,
|
||||
kubernetesConfig: kubernetesConfig,
|
||||
@@ -419,11 +411,61 @@ func (provider *Provider) CreateService(ctx context.Context, namespace string, s
|
||||
return provider.clientSet.CoreV1().Services(namespace).Create(ctx, &service, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesServicesExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
func (provider *Provider) DoesNamespaceExist(ctx context.Context, name string) (bool, error) {
|
||||
namespaceResource, err := provider.clientSet.CoreV1().Namespaces().Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(namespaceResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesConfigMapExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
configMapResource, err := provider.clientSet.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(configMapResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesServiceAccountExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
serviceAccountResource, err := provider.clientSet.CoreV1().ServiceAccounts(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(serviceAccountResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesPersistentVolumeClaimExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
persistentVolumeClaimResource, err := provider.clientSet.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(persistentVolumeClaimResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesDeploymentExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
deploymentResource, err := provider.clientSet.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(deploymentResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesPodExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
podResource, err := provider.clientSet.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(podResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesServiceExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
serviceResource, err := provider.clientSet.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(serviceResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesClusterRoleExist(ctx context.Context, name string) (bool, error) {
|
||||
clusterRoleResource, err := provider.clientSet.RbacV1().ClusterRoles().Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(clusterRoleResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesClusterRoleBindingExist(ctx context.Context, name string) (bool, error) {
|
||||
clusterRoleBindingResource, err := provider.clientSet.RbacV1().ClusterRoleBindings().Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(clusterRoleBindingResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesRoleExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
roleResource, err := provider.clientSet.RbacV1().Roles(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(roleResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) DoesRoleBindingExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||
roleBindingResource, err := provider.clientSet.RbacV1().RoleBindings(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
return provider.doesResourceExist(roleBindingResource, err)
|
||||
}
|
||||
|
||||
func (provider *Provider) doesResourceExist(resource interface{}, err error) (bool, error) {
|
||||
// Getting NotFound error is the expected behavior when a resource does not exist.
|
||||
if k8serrors.IsNotFound(err) {
|
||||
@@ -1045,6 +1087,45 @@ func (provider *Provider) CreatePersistentVolumeClaim(ctx context.Context, names
|
||||
return provider.clientSet.CoreV1().PersistentVolumeClaims(namespace).Create(ctx, volumeClaim, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
// ValidateNotProxy We added this after a customer tried to run mizu from lens, which used len's kube config, which have cluster server configuration, which points to len's local proxy.
|
||||
// The workaround was to use the user's local default kube config.
|
||||
// For now - we are blocking the option to run mizu through a proxy to k8s server
|
||||
func (provider *Provider) ValidateNotProxy() error {
|
||||
kubernetesUrl, err := url.Parse(provider.clientConfig.Host)
|
||||
if err != nil {
|
||||
logger.Log.Debugf("ValidateNotProxy - error while parsing kubernetes host, err: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
restProxyClientConfig, _ := provider.kubernetesConfig.ClientConfig()
|
||||
restProxyClientConfig.Host = kubernetesUrl.Host
|
||||
|
||||
clientProxySet, err := getClientSet(restProxyClientConfig)
|
||||
if err == nil {
|
||||
proxyServerVersion, err := clientProxySet.ServerVersion()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if *proxyServerVersion == (version.Info{}) {
|
||||
return &ClusterBehindProxyError{}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (provider *Provider) GetKubernetesVersion() (*semver.SemVersion, error) {
|
||||
serverVersion, err := provider.clientSet.ServerVersion()
|
||||
if err != nil {
|
||||
logger.Log.Debugf("error while getting kubernetes server version, err: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serverVersionSemVer := semver.SemVersion(serverVersion.GitVersion)
|
||||
return &serverVersionSemVer, nil
|
||||
}
|
||||
|
||||
func getClientSet(config *restclient.Config) (*kubernetes.Clientset, error) {
|
||||
clientSet, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
@@ -1054,6 +1135,15 @@ func getClientSet(config *restclient.Config) (*kubernetes.Clientset, error) {
|
||||
return clientSet, nil
|
||||
}
|
||||
|
||||
func ValidateKubernetesVersion(serverVersionSemVer *semver.SemVersion) error {
|
||||
minKubernetesServerVersionSemVer := semver.SemVersion(MinKubernetesServerVersion)
|
||||
if minKubernetesServerVersionSemVer.GreaterThan(*serverVersionSemVer) {
|
||||
return fmt.Errorf("kubernetes server version %v is not supported, supporting only kubernetes server version of %v or higher", serverVersionSemVer, MinKubernetesServerVersion)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadKubernetesConfiguration(kubeConfigPath string) clientcmd.ClientConfig {
|
||||
logger.Log.Debugf("Using kube config %s", kubeConfigPath)
|
||||
configPathList := filepath.SplitList(kubeConfigPath)
|
||||
@@ -1075,47 +1165,3 @@ func loadKubernetesConfiguration(kubeConfigPath string) clientcmd.ClientConfig {
|
||||
func isPodRunning(pod *core.Pod) bool {
|
||||
return pod.Status.Phase == core.PodRunning
|
||||
}
|
||||
|
||||
// We added this after a customer tried to run mizu from lens, which used len's kube config, which have cluster server configuration, which points to len's local proxy.
|
||||
// The workaround was to use the user's local default kube config.
|
||||
// For now - we are blocking the option to run mizu through a proxy to k8s server
|
||||
func validateNotProxy(kubernetesConfig clientcmd.ClientConfig, restClientConfig *restclient.Config) error {
|
||||
kubernetesUrl, err := url.Parse(restClientConfig.Host)
|
||||
if err != nil {
|
||||
logger.Log.Debugf("validateNotProxy - error while parsing kubernetes host, err: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
restProxyClientConfig, _ := kubernetesConfig.ClientConfig()
|
||||
restProxyClientConfig.Host = kubernetesUrl.Host
|
||||
|
||||
clientProxySet, err := getClientSet(restProxyClientConfig)
|
||||
if err == nil {
|
||||
proxyServerVersion, err := clientProxySet.ServerVersion()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if *proxyServerVersion == (version.Info{}) {
|
||||
return &ClusterBehindProxyError{}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateKubernetesVersion(clientSet *kubernetes.Clientset) error {
|
||||
serverVersion, err := clientSet.ServerVersion()
|
||||
if err != nil {
|
||||
logger.Log.Debugf("error while getting kubernetes server version, err: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
serverVersionSemVer := semver.SemVersion(serverVersion.GitVersion)
|
||||
minKubernetesServerVersionSemVer := semver.SemVersion(MinKubernetesServerVersion)
|
||||
if minKubernetesServerVersionSemVer.GreaterThan(serverVersionSemVer) {
|
||||
return fmt.Errorf("kubernetes server version %v is not supported, supporting only kubernetes server version of %v or higher", serverVersion.GitVersion, MinKubernetesServerVersion)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user