mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-08-13 22:27:12 +00:00
🔥 Remove envConfig.go
This commit is contained in:
parent
33f23ea849
commit
f6bde5fe76
@ -84,7 +84,7 @@ func tap() {
|
|||||||
if state.kubesharkServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.Tap.Debug); err != nil {
|
if state.kubesharkServiceAccountExists, err = resources.CreateHubResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.Tap.Debug); err != nil {
|
||||||
var statusError *k8serrors.StatusError
|
var statusError *k8serrors.StatusError
|
||||||
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
|
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
|
||||||
log.Info().Msg("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
|
log.Warn().Msg("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
|
||||||
} else {
|
} else {
|
||||||
defer resources.CleanUpKubesharkResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace)
|
defer resources.CleanUpKubesharkResources(ctx, cancel, kubernetesProvider, config.Config.IsNsRestrictedMode(), config.Config.ResourcesNamespace)
|
||||||
log.Error().Err(errormessage.FormatError(err)).Msg("Error creating resources!")
|
log.Error().Err(errormessage.FormatError(err)).Msg("Error creating resources!")
|
||||||
@ -214,8 +214,7 @@ func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, c
|
|||||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.ResourcesNamespace}, podWatchHelper)
|
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.ResourcesNamespace}, podWatchHelper)
|
||||||
isPodReady := false
|
isPodReady := false
|
||||||
|
|
||||||
hubTimeoutSec := config.GetIntEnvConfig(config.HubTimeoutSec, 120)
|
timeAfter := time.After(120 * time.Second)
|
||||||
timeAfter := time.After(time.Duration(hubTimeoutSec) * time.Second)
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case wEvent, ok := <-eventChan:
|
case wEvent, ok := <-eventChan:
|
||||||
@ -295,8 +294,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
|
|||||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.ResourcesNamespace}, podWatchHelper)
|
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.ResourcesNamespace}, podWatchHelper)
|
||||||
isPodReady := false
|
isPodReady := false
|
||||||
|
|
||||||
hubTimeoutSec := config.GetIntEnvConfig(config.HubTimeoutSec, 120)
|
timeAfter := time.After(120 * time.Second)
|
||||||
timeAfter := time.After(time.Duration(hubTimeoutSec) * time.Second)
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case wEvent, ok := <-eventChan:
|
case wEvent, ok := <-eventChan:
|
||||||
|
@ -1,25 +0,0 @@
|
|||||||
package config
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
HubRetries = "HUB_SERVER_RETRIES"
|
|
||||||
HubTimeoutSec = "HUB_SERVER_TIMEOUT_SEC"
|
|
||||||
)
|
|
||||||
|
|
||||||
func GetIntEnvConfig(key string, defaultValue int) int {
|
|
||||||
value := os.Getenv(key)
|
|
||||||
if value == "" {
|
|
||||||
return defaultValue
|
|
||||||
}
|
|
||||||
|
|
||||||
intValue, err := strconv.Atoi(value)
|
|
||||||
if err != nil {
|
|
||||||
return defaultValue
|
|
||||||
}
|
|
||||||
|
|
||||||
return intValue
|
|
||||||
}
|
|
@ -10,7 +10,6 @@ import (
|
|||||||
|
|
||||||
"github.com/kubeshark/kubeshark/utils"
|
"github.com/kubeshark/kubeshark/utils"
|
||||||
|
|
||||||
"github.com/kubeshark/kubeshark/config"
|
|
||||||
"github.com/rs/zerolog/log"
|
"github.com/rs/zerolog/log"
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@ -28,7 +27,7 @@ const DefaultTimeout = 2 * time.Second
|
|||||||
func NewConnector(url string, retries int, timeout time.Duration) *Connector {
|
func NewConnector(url string, retries int, timeout time.Duration) *Connector {
|
||||||
return &Connector{
|
return &Connector{
|
||||||
url: url,
|
url: url,
|
||||||
retries: config.GetIntEnvConfig(config.HubRetries, retries),
|
retries: retries,
|
||||||
client: &http.Client{
|
client: &http.Client{
|
||||||
Timeout: timeout,
|
Timeout: timeout,
|
||||||
},
|
},
|
||||||
@ -64,6 +63,7 @@ func (connector *Connector) isReachable(path string) (bool, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) {
|
func (connector *Connector) PostWorkerPodToHub(pod *v1.Pod) {
|
||||||
|
// TODO: This request is responsible for proxy_server.go:147] Error while proxying request: context canceled log
|
||||||
postWorkerUrl := fmt.Sprintf("%s/pods/worker", connector.url)
|
postWorkerUrl := fmt.Sprintf("%s/pods/worker", connector.url)
|
||||||
|
|
||||||
if podMarshalled, err := json.Marshal(pod); err != nil {
|
if podMarshalled, err := json.Marshal(pod); err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user