🔥 Remove getTapConfig method

This commit is contained in:
M. Mert Yildiran 2022-12-29 03:34:57 +03:00
parent de38ef259e
commit f21fe93664
No known key found for this signature in database
GPG Key ID: DA5D6DCBB758A461
2 changed files with 13 additions and 43 deletions

View File

@ -16,7 +16,6 @@ import (
k8serrors "k8s.io/apimachinery/pkg/api/errors" k8serrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/kubeshark/base/pkg/models"
"github.com/kubeshark/kubeshark/config" "github.com/kubeshark/kubeshark/config"
"github.com/kubeshark/kubeshark/config/configStructs" "github.com/kubeshark/kubeshark/config/configStructs"
"github.com/kubeshark/kubeshark/errormessage" "github.com/kubeshark/kubeshark/errormessage"
@ -60,13 +59,6 @@ func tap() {
state.targetNamespaces = getNamespaces(kubernetesProvider) state.targetNamespaces = getNamespaces(kubernetesProvider)
conf := getTapConfig()
serializedKubesharkConfig, err := getSerializedTapConfig(conf)
if err != nil {
log.Error().Err(errormessage.FormatError(err)).Msg("Error serializing Kubeshark config!")
return
}
if config.Config.IsNsRestrictedMode() { if config.Config.IsNsRestrictedMode() {
if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.SelfNamespace) { if len(state.targetNamespaces) != 1 || !utils.Contains(state.targetNamespaces, config.Config.SelfNamespace) {
log.Error().Msg(fmt.Sprintf("Kubeshark can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", configStructs.NamespacesLabel, config.SelfNamespaceConfigName)) log.Error().Msg(fmt.Sprintf("Kubeshark can't resolve IPs in other namespaces when running in namespace restricted mode. You can use the same namespace for --%s and --%s", configStructs.NamespacesLabel, config.SelfNamespaceConfigName))
@ -85,7 +77,7 @@ func tap() {
} }
log.Info().Msg("Waiting for the creation of Kubeshark resources...") log.Info().Msg("Waiting for the creation of Kubeshark resources...")
if state.kubesharkServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace, config.Config.Tap.Resources.Hub, config.Config.ImagePullPolicy(), config.Config.Tap.Debug); err != nil { if state.kubesharkServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace, config.Config.Tap.Resources.Hub, config.Config.ImagePullPolicy(), config.Config.Tap.Debug); err != nil {
var statusError *k8serrors.StatusError var statusError *k8serrors.StatusError
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) { if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
log.Warn().Msg("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance") log.Warn().Msg("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
@ -111,19 +103,6 @@ func finishTapExecution(kubernetesProvider *kubernetes.Provider) {
finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace) finishKubesharkExecution(kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.SelfNamespace)
} }
func getTapConfig() *models.Config {
// TODO: Remove models.Config
conf := models.Config{
MaxDBSizeBytes: config.Config.Tap.StorageLimitBytes(),
PullPolicy: config.Config.Tap.Docker.ImagePullPolicy,
WorkerResources: config.Config.Tap.Resources.Worker,
ResourcesNamespace: config.Config.SelfNamespace,
DatabasePath: models.DataDirPath,
}
return &conf
}
/* /*
This function is a bit problematic as it might be detached from the actual pods the Kubeshark that targets. This function is a bit problematic as it might be detached from the actual pods the Kubeshark that targets.
The alternative would be to wait for Hub to be ready and then query it for the pods it listens to, this has The alternative would be to wait for Hub to be ready and then query it for the pods it listens to, this has

View File

@ -13,18 +13,14 @@ import (
core "k8s.io/api/core/v1" core "k8s.io/api/core/v1"
) )
func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, hubResources models.Resources, imagePullPolicy core.PullPolicy, debug bool) (bool, error) { func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, hubResources models.Resources, imagePullPolicy core.PullPolicy, debug bool) (bool, error) {
if !isNsRestrictedMode { if !isNsRestrictedMode {
if err := createKubesharkNamespace(ctx, kubernetesProvider, kubesharkResourcesNamespace); err != nil { if err := createSelfNamespace(ctx, kubernetesProvider, selfNamespace); err != nil {
return false, err return false, err
} }
} }
if err := createKubesharkConfigmap(ctx, kubernetesProvider, serializedKubesharkConfig, kubesharkResourcesNamespace); err != nil { kubesharkServiceAccountExists, err := createRBACIfNecessary(ctx, kubernetesProvider, isNsRestrictedMode, selfNamespace, []string{"pods", "services", "endpoints"})
return false, err
}
kubesharkServiceAccountExists, err := createRBACIfNecessary(ctx, kubernetesProvider, isNsRestrictedMode, kubesharkResourcesNamespace, []string{"pods", "services", "endpoints"})
if err != nil { if err != nil {
log.Warn().Err(errormessage.FormatError(err)).Msg("Failed to ensure the resources required for IP resolving. Kubeshark will not resolve target IPs to names.") log.Warn().Err(errormessage.FormatError(err)).Msg("Failed to ensure the resources required for IP resolving. Kubeshark will not resolve target IPs to names.")
} }
@ -37,7 +33,7 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
} }
opts := &kubernetes.PodOptions{ opts := &kubernetes.PodOptions{
Namespace: kubesharkResourcesNamespace, Namespace: selfNamespace,
PodName: kubernetes.HubPodName, PodName: kubernetes.HubPodName,
PodImage: docker.GetHubImage(), PodImage: docker.GetHubImage(),
ServiceAccountName: serviceAccountName, ServiceAccountName: serviceAccountName,
@ -47,7 +43,7 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
} }
frontOpts := &kubernetes.PodOptions{ frontOpts := &kubernetes.PodOptions{
Namespace: kubesharkResourcesNamespace, Namespace: selfNamespace,
PodName: kubernetes.FrontPodName, PodName: kubernetes.FrontPodName,
PodImage: docker.GetWorkerImage(), PodImage: docker.GetWorkerImage(),
ServiceAccountName: serviceAccountName, ServiceAccountName: serviceAccountName,
@ -65,14 +61,14 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
} }
// TODO: Why the port values need to be 80? // TODO: Why the port values need to be 80?
_, err = kubernetesProvider.CreateService(ctx, kubesharkResourcesNamespace, kubernetes.HubServiceName, kubernetes.HubServiceName, 80, 80) _, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetes.HubServiceName, kubernetes.HubServiceName, 80, 80)
if err != nil { if err != nil {
return kubesharkServiceAccountExists, err return kubesharkServiceAccountExists, err
} }
log.Info().Str("service", kubernetes.HubServiceName).Msg("Successfully created a service.") log.Info().Str("service", kubernetes.HubServiceName).Msg("Successfully created a service.")
_, err = kubernetesProvider.CreateService(ctx, kubesharkResourcesNamespace, kubernetes.FrontServiceName, kubernetes.FrontServiceName, 80, int32(config.Config.Tap.Proxy.Front.DstPort)) _, err = kubernetesProvider.CreateService(ctx, selfNamespace, kubernetes.FrontServiceName, kubernetes.FrontServiceName, 80, int32(config.Config.Tap.Proxy.Front.DstPort))
if err != nil { if err != nil {
return kubesharkServiceAccountExists, err return kubesharkServiceAccountExists, err
} }
@ -82,23 +78,18 @@ func CreateHubResources(ctx context.Context, kubernetesProvider *kubernetes.Prov
return kubesharkServiceAccountExists, nil return kubesharkServiceAccountExists, nil
} }
func createKubesharkNamespace(ctx context.Context, kubernetesProvider *kubernetes.Provider, kubesharkResourcesNamespace string) error { func createSelfNamespace(ctx context.Context, kubernetesProvider *kubernetes.Provider, selfNamespace string) error {
_, err := kubernetesProvider.CreateNamespace(ctx, kubesharkResourcesNamespace) _, err := kubernetesProvider.CreateNamespace(ctx, selfNamespace)
return err return err
} }
func createKubesharkConfigmap(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, kubesharkResourcesNamespace string) error { func createRBACIfNecessary(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, selfNamespace string, resources []string) (bool, error) {
err := kubernetesProvider.CreateConfigMap(ctx, kubesharkResourcesNamespace, kubernetes.ConfigMapName, serializedKubesharkConfig)
return err
}
func createRBACIfNecessary(ctx context.Context, kubernetesProvider *kubernetes.Provider, isNsRestrictedMode bool, kubesharkResourcesNamespace string, resources []string) (bool, error) {
if !isNsRestrictedMode { if !isNsRestrictedMode {
if err := kubernetesProvider.CreateKubesharkRBAC(ctx, kubesharkResourcesNamespace, kubernetes.ServiceAccountName, kubernetes.ClusterRoleName, kubernetes.ClusterRoleBindingName, kubeshark.RBACVersion, resources); err != nil { if err := kubernetesProvider.CreateKubesharkRBAC(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.ClusterRoleName, kubernetes.ClusterRoleBindingName, kubeshark.RBACVersion, resources); err != nil {
return false, err return false, err
} }
} else { } else {
if err := kubernetesProvider.CreateKubesharkRBACNamespaceRestricted(ctx, kubesharkResourcesNamespace, kubernetes.ServiceAccountName, kubernetes.RoleName, kubernetes.RoleBindingName, kubeshark.RBACVersion); err != nil { if err := kubernetesProvider.CreateKubesharkRBACNamespaceRestricted(ctx, selfNamespace, kubernetes.ServiceAccountName, kubernetes.RoleName, kubernetes.RoleBindingName, kubeshark.RBACVersion); err != nil {
return false, err return false, err
} }
} }