🔥 Remove envConfig.go

This commit is contained in:
M. Mert Yildiran 2022-12-27 09:29:17 +03:00
parent 33f23ea849
commit f6bde5fe76
No known key found for this signature in database
GPG Key ID: DA5D6DCBB758A461
3 changed files with 5 additions and 32 deletions

View File

@ -84,7 +84,7 @@ func tap() {
if state.kubesharkServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.Tap.Debug); err != nil {
var statusError *k8serrors.StatusError
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
log.Info().Msg("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
log.Warn().Msg("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
} else {
defer resources.CleanUpKubesharkResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace)
log.Error().Err(errormessage.FormatError(err)).Msg("Error creating resources!")
@ -214,8 +214,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.ResourcesNamespace}, podWatchHelper)
isPodReady := false
hubTimeoutSec := config.GetIntEnvConfig(config.HubTimeoutSec, 120)
timeAfter := time.After(time.Duration(hubTimeoutSec) * time.Second)
timeAfter := time.After(120 * time.Second)
for {
select {
case wEvent, ok := <-eventChan:
@ -295,8 +294,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.ResourcesNamespace}, podWatchHelper)
isPodReady := false
hubTimeoutSec := config.GetIntEnvConfig(config.HubTimeoutSec, 120)
timeAfter := time.After(time.Duration(hubTimeoutSec) * time.Second)
timeAfter := time.After(120 * time.Second)
for {
select {
case wEvent, ok := <-eventChan:

View File

@ -1,25 +0,0 @@
package config
import (
"os"
"strconv"
)
const (
HubRetries = "HUB_SERVER_RETRIES"
HubTimeoutSec = "HUB_SERVER_TIMEOUT_SEC"
)
func GetIntEnvConfig(key string, defaultValue int) int {
value := os.Getenv(key)
if value == "" {
return defaultValue
}
intValue, err := strconv.Atoi(value)
if err != nil {
return defaultValue
}
return intValue
}

View File

@ -10,7 +10,6 @@ import (
"github.com/kubeshark/kubeshark/utils"
"github.com/kubeshark/kubeshark/config"
"github.com/rs/zerolog/log"
core "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
@ -28,7 +27,7 @@ const DefaultTimeout = 2 * time.Second
func NewConnector(url string, retries int, timeout time.Duration) *Connector {
return &Connector{
url: url,
retries: config.GetIntEnvConfig(config.HubRetries, retries),
retries: retries,
client: &http.Client{
Timeout: timeout,
},
@ -64,6 +63,7 @@ func (connector *Connector) isReachable(path string) (bool, error) {
}
func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) {
// TODO: This request is responsible for proxy_server.go:147] Error while proxying request: context canceled log
postWorkerUrl := fmt.Sprintf("%s/pods/worker", connector.url)
if podMarshalled, err := json.Marshal(pod); err != nil {