🔨 Replace ApiServer naming with Hub

This commit is contained in:
M. Mert Yildiran 2022-11-26 22:06:06 +03:00
parent 5bd8aea8b9
commit 671aa783c5
No known key found for this signature in database
GPG Key ID: DA5D6DCBB758A461
15 changed files with 100 additions and 100 deletions

View File

@ -36,8 +36,8 @@ func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Pro
allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist allResourcesExist = checkResourceExist(kubernetes.ClusterRoleBindingName, "cluster role binding", exist, err) && allResourcesExist
} }
exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.ApiServerServiceName) exist, err = kubernetesProvider.DoesServiceExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.HubServiceName)
allResourcesExist = checkResourceExist(kubernetes.ApiServerServiceName, "service", exist, err) && allResourcesExist allResourcesExist = checkResourceExist(kubernetes.HubServiceName, "service", exist, err) && allResourcesExist
allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist allResourcesExist = checkPodResourcesExist(ctx, kubernetesProvider) && allResourcesExist
@ -45,18 +45,18 @@ func KubernetesResources(ctx context.Context, kubernetesProvider *kubernetes.Pro
} }
func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool { func checkPodResourcesExist(ctx context.Context, kubernetesProvider *kubernetes.Provider) bool {
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.ApiServerPodName); err != nil { if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.HubPodName); err != nil {
log.Printf("%v error checking if '%v' pod is running, err: %v", fmt.Sprintf(utils.Red, "✗"), kubernetes.ApiServerPodName, err) log.Printf("%v error checking if '%v' pod is running, err: %v", fmt.Sprintf(utils.Red, "✗"), kubernetes.HubPodName, err)
return false return false
} else if len(pods) == 0 { } else if len(pods) == 0 {
log.Printf("%v '%v' pod doesn't exist", fmt.Sprintf(utils.Red, "✗"), kubernetes.ApiServerPodName) log.Printf("%v '%v' pod doesn't exist", fmt.Sprintf(utils.Red, "✗"), kubernetes.HubPodName)
return false return false
} else if !kubernetes.IsPodRunning(&pods[0]) { } else if !kubernetes.IsPodRunning(&pods[0]) {
log.Printf("%v '%v' pod not running", fmt.Sprintf(utils.Red, "✗"), kubernetes.ApiServerPodName) log.Printf("%v '%v' pod not running", fmt.Sprintf(utils.Red, "✗"), kubernetes.HubPodName)
return false return false
} }
log.Printf("%v '%v' pod running", fmt.Sprintf(utils.Green, "√"), kubernetes.ApiServerPodName) log.Printf("%v '%v' pod running", fmt.Sprintf(utils.Green, "√"), kubernetes.HubPodName)
if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.TapperPodName); err != nil { if pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.TapperPodName); err != nil {
log.Printf("%v error checking if '%v' pods are running, err: %v", fmt.Sprintf(utils.Red, "✗"), kubernetes.TapperPodName, err) log.Printf("%v error checking if '%v' pods are running, err: %v", fmt.Sprintf(utils.Red, "✗"), kubernetes.TapperPodName, err)

View File

@ -13,40 +13,40 @@ import (
) )
func ServerConnection(kubernetesProvider *kubernetes.Provider) bool { func ServerConnection(kubernetesProvider *kubernetes.Provider) bool {
log.Printf("\nAPI-server-connectivity\n--------------------") log.Printf("\nHub connectivity\n--------------------")
serverUrl := kubernetes.GetLocalhostOnPort(config.Config.Hub.PortForward.SrcPort) serverUrl := kubernetes.GetLocalhostOnPort(config.Config.Hub.PortForward.SrcPort)
connector := connect.NewConnector(serverUrl, 1, connect.DefaultTimeout) connector := connect.NewConnector(serverUrl, 1, connect.DefaultTimeout)
if err := connector.TestConnection(""); err == nil { if err := connector.TestConnection(""); err == nil {
log.Printf("%v found Kubeshark server tunnel available and connected successfully to API server", fmt.Sprintf(utils.Green, "√")) log.Printf("%v found Kubeshark server tunnel available and connected successfully to Hub", fmt.Sprintf(utils.Green, "√"))
return true return true
} }
connectedToApiServer := false connectedToHub := false
if err := checkProxy(serverUrl, kubernetesProvider); err != nil { if err := checkProxy(serverUrl, kubernetesProvider); err != nil {
log.Printf("%v couldn't connect to API server using proxy, err: %v", fmt.Sprintf(utils.Red, "✗"), err) log.Printf("%v couldn't connect to Hub using proxy, err: %v", fmt.Sprintf(utils.Red, "✗"), err)
} else { } else {
connectedToApiServer = true connectedToHub = true
log.Printf("%v connected successfully to API server using proxy", fmt.Sprintf(utils.Green, "√")) log.Printf("%v connected successfully to Hub using proxy", fmt.Sprintf(utils.Green, "√"))
} }
if err := checkPortForward(serverUrl, kubernetesProvider); err != nil { if err := checkPortForward(serverUrl, kubernetesProvider); err != nil {
log.Printf("%v couldn't connect to API server using port-forward, err: %v", fmt.Sprintf(utils.Red, "✗"), err) log.Printf("%v couldn't connect to Hub using port-forward, err: %v", fmt.Sprintf(utils.Red, "✗"), err)
} else { } else {
connectedToApiServer = true connectedToHub = true
log.Printf("%v connected successfully to API server using port-forward", fmt.Sprintf(utils.Green, "√")) log.Printf("%v connected successfully to Hub using port-forward", fmt.Sprintf(utils.Green, "√"))
} }
return connectedToApiServer return connectedToHub
} }
func checkProxy(serverUrl string, kubernetesProvider *kubernetes.Provider) error { func checkProxy(serverUrl string, kubernetesProvider *kubernetes.Provider) error {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, config.Config.KubesharkResourcesNamespace, kubernetes.ApiServerServiceName, cancel) httpServer, err := kubernetes.StartProxy(kubernetesProvider, config.Config.Tap.ProxyHost, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, config.Config.KubesharkResourcesNamespace, kubernetes.HubServiceName, cancel)
if err != nil { if err != nil {
return err return err
} }
@ -67,7 +67,7 @@ func checkPortForward(serverUrl string, kubernetesProvider *kubernetes.Provider)
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
podRegex, _ := regexp.Compile(kubernetes.ApiServerPodName) podRegex, _ := regexp.Compile(kubernetes.HubPodName)
forwarder, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.KubesharkResourcesNamespace, podRegex, config.Config.Tap.GuiPort, config.Config.Tap.GuiPort, ctx, cancel) forwarder, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.KubesharkResourcesNamespace, podRegex, config.Config.Tap.GuiPort, config.Config.Tap.GuiPort, ctx, cancel)
if err != nil { if err != nil {
return err return err

View File

@ -39,7 +39,7 @@ func startProxyReportErrorIfAny(kubernetesProvider *kubernetes.Provider, ctx con
log.Printf("Error occurred while stopping proxy %v", errormessage.FormatError(err)) log.Printf("Error occurred while stopping proxy %v", errormessage.FormatError(err))
} }
podRegex, _ := regexp.Compile(kubernetes.ApiServerPodName) podRegex, _ := regexp.Compile(kubernetes.HubPodName)
if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.KubesharkResourcesNamespace, podRegex, srcPort, dstPort, ctx, cancel); err != nil { if _, err := kubernetes.NewPortForward(kubernetesProvider, config.Config.KubesharkResourcesNamespace, podRegex, srcPort, dstPort, ctx, cancel); err != nil {
log.Printf(utils.Error, fmt.Sprintf("Error occured while running port forward [%s] %v\n"+ log.Printf(utils.Error, fmt.Sprintf("Error occured while running port forward [%s] %v\n"+
"Try setting different port by using --%s", podRegex, errormessage.FormatError(err), configStructs.GuiPortTapName)) "Try setting different port by using --%s", podRegex, errormessage.FormatError(err), configStructs.GuiPortTapName))

View File

@ -36,7 +36,7 @@ type tapState struct {
var state tapState var state tapState
var connector *connect.Connector var connector *connect.Connector
var apiServerPodReady bool var hubPodReady bool
var frontPodReady bool var frontPodReady bool
var proxyDone bool var proxyDone bool
@ -88,7 +88,7 @@ func RunKubesharkTap() {
} }
log.Printf("Waiting for Kubeshark Agent to start...") log.Printf("Waiting for Kubeshark Agent to start...")
if state.kubesharkServiceAccountExists, err = resources.CreateTapKubesharkResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace, config.Config.AgentImage, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.ApiServerResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil { if state.kubesharkServiceAccountExists, err = resources.CreateTapKubesharkResources(ctx, kubernetesProvider, serializedKubesharkConfig, config.Config.IsNsRestrictedMode(), config.Config.KubesharkResourcesNamespace, config.Config.AgentImage, config.Config.Tap.MaxEntriesDBSizeBytes(), config.Config.Tap.HubResources, config.Config.ImagePullPolicy(), config.Config.LogLevel(), config.Config.Tap.Profiler); err != nil {
var statusError *k8serrors.StatusError var statusError *k8serrors.StatusError
if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) { if errors.As(err, &statusError) && (statusError.ErrStatus.Reason == metav1.StatusReasonAlreadyExists) {
log.Print("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance") log.Print("Kubeshark is already running in this namespace, change the `kubeshark-resources-namespace` configuration or run `kubeshark clean` to remove the currently running Kubeshark instance")
@ -102,8 +102,8 @@ func RunKubesharkTap() {
defer finishTapExecution(kubernetesProvider) defer finishTapExecution(kubernetesProvider)
go goUtils.HandleExcWrapper(watchApiServerEvents, ctx, kubernetesProvider, cancel) go goUtils.HandleExcWrapper(watchHubEvents, ctx, kubernetesProvider, cancel)
go goUtils.HandleExcWrapper(watchApiServerPod, ctx, kubernetesProvider, cancel) go goUtils.HandleExcWrapper(watchHubPod, ctx, kubernetesProvider, cancel)
go goUtils.HandleExcWrapper(watchFrontPod, ctx, kubernetesProvider, cancel) go goUtils.HandleExcWrapper(watchFrontPod, ctx, kubernetesProvider, cancel)
// block until exit signal or error // block until exit signal or error
@ -132,8 +132,8 @@ func getTapKubesharkAgentConfig() *models.Config {
} }
/* /*
this function is a bit problematic as it might be detached from the actual pods the kubeshark api server will tap. this function is a bit problematic as it might be detached from the actual pods the Kubeshark Hub will tap.
The alternative would be to wait for api server to be ready and then query it for the pods it listens to, this has The alternative would be to wait for Hub to be ready and then query it for the pods it listens to, this has
the arguably worse drawback of taking a relatively very long time before the user sees which pods are targeted, if any. the arguably worse drawback of taking a relatively very long time before the user sees which pods are targeted, if any.
*/ */
func printTappedPodsPreview(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespaces []string) error { func printTappedPodsPreview(ctx context.Context, kubernetesProvider *kubernetes.Provider, namespaces []string) error {
@ -229,14 +229,14 @@ func getErrorDisplayTextForK8sTapManagerError(err kubernetes.K8sTapManagerError)
} }
} }
func watchApiServerPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) { func watchHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.ApiServerPodName)) podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.HubPodName))
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex) podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, podWatchHelper) eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, podWatchHelper)
isPodReady := false isPodReady := false
apiServerTimeoutSec := config.GetIntEnvConfig(config.ApiServerTimeoutSec, 120) hubTimeoutSec := config.GetIntEnvConfig(config.HubTimeoutSec, 120)
timeAfter := time.After(time.Duration(apiServerTimeoutSec) * time.Second) timeAfter := time.After(time.Duration(hubTimeoutSec) * time.Second)
for { for {
select { select {
case wEvent, ok := <-eventChan: case wEvent, ok := <-eventChan:
@ -247,9 +247,9 @@ func watchApiServerPod(ctx context.Context, kubernetesProvider *kubernetes.Provi
switch wEvent.Type { switch wEvent.Type {
case kubernetes.EventAdded: case kubernetes.EventAdded:
log.Printf("Watching API Server pod loop, added") log.Printf("Watching Hub pod loop, added")
case kubernetes.EventDeleted: case kubernetes.EventDeleted:
log.Printf("%s removed", kubernetes.ApiServerPodName) log.Printf("%s removed", kubernetes.HubPodName)
cancel() cancel()
return return
case kubernetes.EventModified: case kubernetes.EventModified:
@ -260,15 +260,15 @@ func watchApiServerPod(ctx context.Context, kubernetesProvider *kubernetes.Provi
continue continue
} }
log.Printf("Watching API Server pod loop, modified: %v, containers statuses: %v", modifiedPod.Status.Phase, modifiedPod.Status.ContainerStatuses) log.Printf("Watching Hub pod loop, modified: %v, containers statuses: %v", modifiedPod.Status.Phase, modifiedPod.Status.ContainerStatuses)
if modifiedPod.Status.Phase == core.PodRunning && !isPodReady { if modifiedPod.Status.Phase == core.PodRunning && !isPodReady {
isPodReady = true isPodReady = true
apiServerPodReady = true hubPodReady = true
postApiServerStarted(ctx, kubernetesProvider, cancel) postHubStarted(ctx, kubernetesProvider, cancel)
} }
if !proxyDone && apiServerPodReady && frontPodReady { if !proxyDone && hubPodReady && frontPodReady {
proxyDone = true proxyDone = true
postFrontStarted(ctx, kubernetesProvider, cancel) postFrontStarted(ctx, kubernetesProvider, cancel)
} }
@ -288,11 +288,11 @@ func watchApiServerPod(ctx context.Context, kubernetesProvider *kubernetes.Provi
case <-timeAfter: case <-timeAfter:
if !isPodReady { if !isPodReady {
log.Printf(utils.Error, "Kubeshark API server was not ready in time") log.Printf(utils.Error, "Kubeshark Hub was not ready in time")
cancel() cancel()
} }
case <-ctx.Done(): case <-ctx.Done():
log.Printf("Watching API Server pod loop, ctx done") log.Printf("Watching Hub pod loop, ctx done")
return return
} }
} }
@ -304,8 +304,8 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, podWatchHelper) eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, podWatchHelper)
isPodReady := false isPodReady := false
apiServerTimeoutSec := config.GetIntEnvConfig(config.ApiServerTimeoutSec, 120) hubTimeoutSec := config.GetIntEnvConfig(config.HubTimeoutSec, 120)
timeAfter := time.After(time.Duration(apiServerTimeoutSec) * time.Second) timeAfter := time.After(time.Duration(hubTimeoutSec) * time.Second)
for { for {
select { select {
case wEvent, ok := <-eventChan: case wEvent, ok := <-eventChan:
@ -316,7 +316,7 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
switch wEvent.Type { switch wEvent.Type {
case kubernetes.EventAdded: case kubernetes.EventAdded:
log.Printf("Watching API Server pod loop, added") log.Printf("Watching Hub pod loop, added")
case kubernetes.EventDeleted: case kubernetes.EventDeleted:
log.Printf("%s removed", kubernetes.FrontPodName) log.Printf("%s removed", kubernetes.FrontPodName)
cancel() cancel()
@ -329,14 +329,14 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
continue continue
} }
log.Printf("Watching API Server pod loop, modified: %v, containers statuses: %v", modifiedPod.Status.Phase, modifiedPod.Status.ContainerStatuses) log.Printf("Watching Hub pod loop, modified: %v, containers statuses: %v", modifiedPod.Status.Phase, modifiedPod.Status.ContainerStatuses)
if modifiedPod.Status.Phase == core.PodRunning && !isPodReady { if modifiedPod.Status.Phase == core.PodRunning && !isPodReady {
isPodReady = true isPodReady = true
frontPodReady = true frontPodReady = true
} }
if !proxyDone && apiServerPodReady && frontPodReady { if !proxyDone && hubPodReady && frontPodReady {
proxyDone = true proxyDone = true
postFrontStarted(ctx, kubernetesProvider, cancel) postFrontStarted(ctx, kubernetesProvider, cancel)
} }
@ -356,18 +356,18 @@ func watchFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider,
case <-timeAfter: case <-timeAfter:
if !isPodReady { if !isPodReady {
log.Printf(utils.Error, "Kubeshark API server was not ready in time") log.Printf(utils.Error, "Kubeshark Hub was not ready in time")
cancel() cancel()
} }
case <-ctx.Done(): case <-ctx.Done():
log.Printf("Watching API Server pod loop, ctx done") log.Printf("Watching Hub pod loop, ctx done")
return return
} }
} }
} }
func watchApiServerEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) { func watchHubEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.ApiServerPodName)) podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.HubPodName))
eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, podExactRegex, "pod") eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, podExactRegex, "pod")
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, eventWatchHelper) eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.KubesharkResourcesNamespace}, eventWatchHelper)
for { for {
@ -389,7 +389,7 @@ func watchApiServerEvents(ctx context.Context, kubernetesProvider *kubernetes.Pr
} }
log.Printf( log.Printf(
"Watching API server events loop, event %s, time: %v, resource: %s (%s), reason: %s, note: %s", "Watching Hub events loop, event %s, time: %v, resource: %s (%s), reason: %s, note: %s",
event.Name, event.Name,
event.CreationTimestamp.Time, event.CreationTimestamp.Time,
event.Regarding.Name, event.Regarding.Name,
@ -400,7 +400,7 @@ func watchApiServerEvents(ctx context.Context, kubernetesProvider *kubernetes.Pr
switch event.Reason { switch event.Reason {
case "FailedScheduling", "Failed": case "FailedScheduling", "Failed":
log.Printf(utils.Error, fmt.Sprintf("Kubeshark API Server status: %s - %s", event.Reason, event.Note)) log.Printf(utils.Error, fmt.Sprintf("Kubeshark Hub status: %s - %s", event.Reason, event.Note))
cancel() cancel()
} }
@ -410,16 +410,16 @@ func watchApiServerEvents(ctx context.Context, kubernetesProvider *kubernetes.Pr
continue continue
} }
log.Printf("[Error] Watching API server events loop, error: %+v", err) log.Printf("[Error] Watching Hub events loop, error: %+v", err)
case <-ctx.Done(): case <-ctx.Done():
log.Printf("Watching API server events loop, ctx done") log.Printf("Watching Hub events loop, ctx done")
return return
} }
} }
} }
func postApiServerStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) { func postHubStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, kubernetes.ApiServerServiceName, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, "/echo") startProxyReportErrorIfAny(kubernetesProvider, ctx, cancel, kubernetes.HubServiceName, config.Config.Hub.PortForward.SrcPort, config.Config.Hub.PortForward.DstPort, "/echo")
if err := startTapperSyncer(ctx, cancel, kubernetesProvider, state.targetNamespaces, state.startTime); err != nil { if err := startTapperSyncer(ctx, cancel, kubernetesProvider, state.targetNamespaces, state.startTime); err != nil {
log.Printf(utils.Error, fmt.Sprintf("Error starting kubeshark tapper syncer: %v", errormessage.FormatError(err))) log.Printf(utils.Error, fmt.Sprintf("Error starting kubeshark tapper syncer: %v", errormessage.FormatError(err)))
@ -427,7 +427,7 @@ func postApiServerStarted(ctx context.Context, kubernetesProvider *kubernetes.Pr
} }
url := kubernetes.GetLocalhostOnPort(config.Config.Hub.PortForward.SrcPort) url := kubernetes.GetLocalhostOnPort(config.Config.Hub.PortForward.SrcPort)
log.Printf("API Server is available at %s", url) log.Printf("Hub is available at %s", url)
} }
func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) { func postFrontStarted(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {

View File

@ -25,14 +25,14 @@ func runKubesharkView() {
url := config.Config.View.Url url := config.Config.View.Url
if url == "" { if url == "" {
exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.ApiServerServiceName) exists, err := kubernetesProvider.DoesServiceExist(ctx, config.Config.KubesharkResourcesNamespace, kubernetes.HubServiceName)
if err != nil { if err != nil {
log.Printf("Failed to found kubeshark service %v", err) log.Printf("Failed to found kubeshark service %v", err)
cancel() cancel()
return return
} }
if !exists { if !exists {
log.Printf("%s service not found, you should run `kubeshark tap` command first", kubernetes.ApiServerServiceName) log.Printf("%s service not found, you should run `kubeshark tap` command first", kubernetes.HubServiceName)
cancel() cancel()
return return
} }
@ -41,7 +41,7 @@ func runKubesharkView() {
response, err := http.Get(fmt.Sprintf("%s/", url)) response, err := http.Get(fmt.Sprintf("%s/", url))
if err == nil && response.StatusCode == 200 { if err == nil && response.StatusCode == 200 {
log.Printf("Found a running service %s and open port %d", kubernetes.ApiServerServiceName, config.Config.Front.PortForward.SrcPort) log.Printf("Found a running service %s and open port %d", kubernetes.HubServiceName, config.Config.Front.PortForward.SrcPort)
return return
} }
log.Printf("Establishing connection to k8s cluster...") log.Printf("Establishing connection to k8s cluster...")
@ -50,7 +50,7 @@ func runKubesharkView() {
connector := connect.NewConnector(url, connect.DefaultRetries, connect.DefaultTimeout) connector := connect.NewConnector(url, connect.DefaultRetries, connect.DefaultTimeout)
if err := connector.TestConnection(""); err != nil { if err := connector.TestConnection(""); err != nil {
log.Printf(utils.Error, "Couldn't connect to API server.") log.Printf(utils.Error, "Couldn't connect to Hub.")
return return
} }

View File

@ -44,7 +44,7 @@ type TapConfig struct {
HumanMaxEntriesDBSize string `yaml:"max-entries-db-size" default:"200MB"` HumanMaxEntriesDBSize string `yaml:"max-entries-db-size" default:"200MB"`
InsertionFilter string `yaml:"insertion-filter" default:""` InsertionFilter string `yaml:"insertion-filter" default:""`
DryRun bool `yaml:"dry-run" default:"false"` DryRun bool `yaml:"dry-run" default:"false"`
ApiServerResources models.Resources `yaml:"api-server-resources"` HubResources models.Resources `yaml:"hub-resources"`
TapperResources models.Resources `yaml:"tapper-resources"` TapperResources models.Resources `yaml:"tapper-resources"`
ServiceMesh bool `yaml:"service-mesh" default:"false"` ServiceMesh bool `yaml:"service-mesh" default:"false"`
Tls bool `yaml:"tls" default:"false"` Tls bool `yaml:"tls" default:"false"`

View File

@ -6,8 +6,8 @@ import (
) )
const ( const (
ApiServerRetries = "API_SERVER_RETRIES" HubRetries = "HUB_SERVER_RETRIES"
ApiServerTimeoutSec = "API_SERVER_TIMEOUT_SEC" HubTimeoutSec = "HUB_SERVER_TIMEOUT_SEC"
) )
func GetIntEnvConfig(key string, defaultValue int) int { func GetIntEnvConfig(key string, defaultValue int) int {

View File

@ -27,7 +27,7 @@ const DefaultTimeout = 2 * time.Second
func NewConnector(url string, retries int, timeout time.Duration) *Connector { func NewConnector(url string, retries int, timeout time.Duration) *Connector {
return &Connector{ return &Connector{
url: url, url: url,
retries: config.GetIntEnvConfig(config.ApiServerRetries, retries), retries: config.GetIntEnvConfig(config.HubRetries, retries),
client: &http.Client{ client: &http.Client{
Timeout: timeout, Timeout: timeout,
}, },
@ -38,9 +38,9 @@ func (connector *Connector) TestConnection(path string) error {
retriesLeft := connector.retries retriesLeft := connector.retries
for retriesLeft > 0 { for retriesLeft > 0 {
if isReachable, err := connector.isReachable(path); err != nil || !isReachable { if isReachable, err := connector.isReachable(path); err != nil || !isReachable {
log.Printf("api server not ready yet %v", err) log.Printf("Hub is not ready yet %v!", err)
} else { } else {
log.Printf("connection test to api server passed successfully") log.Printf("Connection test to Hub passed successfully!")
break break
} }
retriesLeft -= 1 retriesLeft -= 1
@ -48,7 +48,7 @@ func (connector *Connector) TestConnection(path string) error {
} }
if retriesLeft == 0 { if retriesLeft == 0 {
return fmt.Errorf("couldn't reach the api server after %v retries", connector.retries) return fmt.Errorf("Couldn't reach the Hub after %d retries!", connector.retries)
} }
return nil return nil
} }
@ -66,12 +66,12 @@ func (connector *Connector) ReportTapperStatus(tapperStatus models.TapperStatus)
tapperStatusUrl := fmt.Sprintf("%s/status/tapperStatus", connector.url) tapperStatusUrl := fmt.Sprintf("%s/status/tapperStatus", connector.url)
if jsonValue, err := json.Marshal(tapperStatus); err != nil { if jsonValue, err := json.Marshal(tapperStatus); err != nil {
return fmt.Errorf("failed Marshal the tapper status %w", err) return fmt.Errorf("Failed Marshal the tapper status %w", err)
} else { } else {
if _, err := utils.Post(tapperStatusUrl, "application/json", bytes.NewBuffer(jsonValue), connector.client); err != nil { if _, err := utils.Post(tapperStatusUrl, "application/json", bytes.NewBuffer(jsonValue), connector.client); err != nil {
return fmt.Errorf("failed sending to API server the tapped pods %w", err) return fmt.Errorf("Failed sending to Hub the tapped pods %w", err)
} else { } else {
log.Printf("Reported to server API about tapper status: %v", tapperStatus) log.Printf("Reported to Hub about tapper status: %v", tapperStatus)
return nil return nil
} }
} }
@ -81,12 +81,12 @@ func (connector *Connector) ReportTappedPods(pods []core.Pod) error {
tappedPodsUrl := fmt.Sprintf("%s/status/tappedPods", connector.url) tappedPodsUrl := fmt.Sprintf("%s/status/tappedPods", connector.url)
if jsonValue, err := json.Marshal(pods); err != nil { if jsonValue, err := json.Marshal(pods); err != nil {
return fmt.Errorf("failed Marshal the tapped pods %w", err) return fmt.Errorf("Failed Marshal the tapped pods %w", err)
} else { } else {
if _, err := utils.Post(tappedPodsUrl, "application/json", bytes.NewBuffer(jsonValue), connector.client); err != nil { if _, err := utils.Post(tappedPodsUrl, "application/json", bytes.NewBuffer(jsonValue), connector.client); err != nil {
return fmt.Errorf("failed sending to API server the tapped pods %w", err) return fmt.Errorf("Failed sending to Hub the tapped pods %w", err)
} else { } else {
log.Printf("Reported to server API about %d taped pods successfully", len(pods)) log.Printf("Reported to Hub about %d taped pods successfully", len(pods))
return nil return nil
} }
} }

View File

@ -4,8 +4,8 @@ const (
KubesharkResourcesPrefix = "ks-" KubesharkResourcesPrefix = "ks-"
FrontPodName = KubesharkResourcesPrefix + "front" FrontPodName = KubesharkResourcesPrefix + "front"
FrontServiceName = FrontPodName FrontServiceName = FrontPodName
ApiServerPodName = KubesharkResourcesPrefix + "hub" HubPodName = KubesharkResourcesPrefix + "hub"
ApiServerServiceName = ApiServerPodName HubServiceName = HubPodName
ClusterRoleBindingName = KubesharkResourcesPrefix + "cluster-role-binding" ClusterRoleBindingName = KubesharkResourcesPrefix + "cluster-role-binding"
ClusterRoleName = KubesharkResourcesPrefix + "cluster-role" ClusterRoleName = KubesharkResourcesPrefix + "cluster-role"
K8sAllNamespaces = "" K8sAllNamespaces = ""

View File

@ -331,7 +331,7 @@ func (tapperSyncer *KubesharkTapperSyncer) updateKubesharkTappers() error {
TapperDaemonSetName, TapperDaemonSetName,
"kubeshark/worker:latest", "kubeshark/worker:latest",
TapperPodName, TapperPodName,
fmt.Sprintf("%s.%s.svc", ApiServerPodName, tapperSyncer.config.KubesharkResourcesNamespace), fmt.Sprintf("%s.%s.svc", HubPodName, tapperSyncer.config.KubesharkResourcesNamespace),
nodeNames, nodeNames,
serviceAccountName, serviceAccountName,
tapperSyncer.config.TapperResources, tapperSyncer.config.TapperResources,

View File

@ -170,7 +170,7 @@ func (provider *Provider) CreateNamespace(ctx context.Context, name string) (*co
return provider.clientSet.CoreV1().Namespaces().Create(ctx, namespaceSpec, metav1.CreateOptions{}) return provider.clientSet.CoreV1().Namespaces().Create(ctx, namespaceSpec, metav1.CreateOptions{})
} }
type ApiServerOptions struct { type HubOptions struct {
Namespace string Namespace string
PodName string PodName string
PodImage string PodImage string
@ -185,7 +185,7 @@ type ApiServerOptions struct {
Profiler bool Profiler bool
} }
func (provider *Provider) BuildApiServerPod(opts *ApiServerOptions, mountVolumeClaim bool, volumeClaimName string, createAuthContainer bool) (*core.Pod, error) { func (provider *Provider) BuildHubPod(opts *HubOptions, mountVolumeClaim bool, volumeClaimName string, createAuthContainer bool) (*core.Pod, error) {
configMapVolume := &core.ConfigMapVolumeSource{} configMapVolume := &core.ConfigMapVolumeSource{}
configMapVolume.Name = ConfigMapName configMapVolume.Name = ConfigMapName
@ -400,7 +400,7 @@ func (provider *Provider) BuildApiServerPod(opts *ApiServerOptions, mountVolumeC
return pod, nil return pod, nil
} }
func (provider *Provider) BuildFrontPod(opts *ApiServerOptions, mountVolumeClaim bool, volumeClaimName string, createAuthContainer bool) (*core.Pod, error) { func (provider *Provider) BuildFrontPod(opts *HubOptions, mountVolumeClaim bool, volumeClaimName string, createAuthContainer bool) (*core.Pod, error) {
configMapVolume := &core.ConfigMapVolumeSource{} configMapVolume := &core.ConfigMapVolumeSource{}
configMapVolume.Name = ConfigMapName configMapVolume.Name = ConfigMapName
@ -806,7 +806,7 @@ func (provider *Provider) CreateConfigMap(ctx context.Context, namespace string,
return nil return nil
} }
func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, apiServerPodIp string, nodeNames []string, serviceAccountName string, resources models.Resources, imagePullPolicy core.PullPolicy, kubesharkApiFilteringOptions api.TrafficFilteringOptions, logLevel logging.Level, serviceMesh bool, tls bool, maxLiveStreams int) error { func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, hubPodIp string, nodeNames []string, serviceAccountName string, resources models.Resources, imagePullPolicy core.PullPolicy, kubesharkApiFilteringOptions api.TrafficFilteringOptions, logLevel logging.Level, serviceMesh bool, tls bool, maxLiveStreams int) error {
log.Printf("Applying %d tapper daemon sets, ns: %s, daemonSetName: %s, podImage: %s, tapperPodName: %s", len(nodeNames), namespace, daemonSetName, podImage, tapperPodName) log.Printf("Applying %d tapper daemon sets, ns: %s, daemonSetName: %s, podImage: %s, tapperPodName: %s", len(nodeNames), namespace, daemonSetName, podImage, tapperPodName)
if len(nodeNames) == 0 { if len(nodeNames) == 0 {
@ -821,7 +821,7 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
kubesharkCmd := []string{ kubesharkCmd := []string{
"./worker", "./worker",
"-i", "any", "-i", "any",
"--api-server-address", fmt.Sprintf("ws://%s/wsTapper", apiServerPodIp), "--api-server-address", fmt.Sprintf("ws://%s/wsTapper", hubPodIp),
"--nodefrag", "--nodefrag",
"--max-live-streams", strconv.Itoa(maxLiveStreams), "--max-live-streams", strconv.Itoa(maxLiveStreams),
} }

View File

@ -58,7 +58,7 @@ func StartProxy(kubernetesProvider *Provider, proxyHost string, srcPort uint16,
return server, nil return server, nil
} }
func getKubesharkApiServerProxiedHostAndPath(kubesharkNamespace string, kubesharkServiceName string) string { func getKubesharkHubProxiedHostAndPath(kubesharkNamespace string, kubesharkServiceName string) string {
return fmt.Sprintf("/api/v1/namespaces/%s/services/%s:%d/proxy", kubesharkNamespace, kubesharkServiceName, kubesharkServicePort) return fmt.Sprintf("/api/v1/namespaces/%s/services/%s:%d/proxy", kubesharkNamespace, kubesharkServiceName, kubesharkServicePort)
} }
@ -78,11 +78,11 @@ func getRerouteHttpHandlerKubesharkAPI(proxyHandler http.Handler, kubesharkNames
return return
} }
proxiedPath := getKubesharkApiServerProxiedHostAndPath(kubesharkNamespace, kubesharkServiceName) proxiedPath := getKubesharkHubProxiedHostAndPath(kubesharkNamespace, kubesharkServiceName)
//avoid redirecting several times //avoid redirecting several times
if !strings.Contains(r.URL.Path, proxiedPath) { if !strings.Contains(r.URL.Path, proxiedPath) {
r.URL.Path = fmt.Sprintf("%s%s", getKubesharkApiServerProxiedHostAndPath(kubesharkNamespace, kubesharkServiceName), r.URL.Path) r.URL.Path = fmt.Sprintf("%s%s", getKubesharkHubProxiedHostAndPath(kubesharkNamespace, kubesharkServiceName), r.URL.Path)
} }
proxyHandler.ServeHTTP(w, r) proxyHandler.ServeHTTP(w, r)
}) })
@ -90,7 +90,7 @@ func getRerouteHttpHandlerKubesharkAPI(proxyHandler http.Handler, kubesharkNames
func getRerouteHttpHandlerKubesharkStatic(proxyHandler http.Handler, kubesharkNamespace string, kubesharkServiceName string) http.Handler { func getRerouteHttpHandlerKubesharkStatic(proxyHandler http.Handler, kubesharkNamespace string, kubesharkServiceName string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
r.URL.Path = strings.Replace(r.URL.Path, "/static/", fmt.Sprintf("%s/static/", getKubesharkApiServerProxiedHostAndPath(kubesharkNamespace, kubesharkServiceName)), 1) r.URL.Path = strings.Replace(r.URL.Path, "/static/", fmt.Sprintf("%s/static/", getKubesharkHubProxiedHostAndPath(kubesharkNamespace, kubesharkServiceName)), 1)
proxyHandler.ServeHTTP(w, r) proxyHandler.ServeHTTP(w, r)
}) })
} }

View File

@ -89,8 +89,8 @@ func waitUntilNamespaceDeleted(ctx context.Context, cancel context.CancelFunc, k
func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.Provider, kubesharkResourcesNamespace string) []string { func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.Provider, kubesharkResourcesNamespace string) []string {
leftoverResources := make([]string, 0) leftoverResources := make([]string, 0)
if err := kubernetesProvider.RemoveService(ctx, kubesharkResourcesNamespace, kubernetes.ApiServerServiceName); err != nil { if err := kubernetesProvider.RemoveService(ctx, kubesharkResourcesNamespace, kubernetes.HubServiceName); err != nil {
resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.ApiServerServiceName, kubesharkResourcesNamespace) resourceDesc := fmt.Sprintf("Service %s in namespace %s", kubernetes.HubServiceName, kubesharkResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources) handleDeletionError(err, resourceDesc, &leftoverResources)
} }
@ -140,8 +140,8 @@ func cleanUpRestrictedMode(ctx context.Context, kubernetesProvider *kubernetes.P
} }
} }
if err := kubernetesProvider.RemovePod(ctx, kubesharkResourcesNamespace, kubernetes.ApiServerPodName); err != nil { if err := kubernetesProvider.RemovePod(ctx, kubesharkResourcesNamespace, kubernetes.HubPodName); err != nil {
resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.ApiServerPodName, kubesharkResourcesNamespace) resourceDesc := fmt.Sprintf("Pod %s in namespace %s", kubernetes.HubPodName, kubesharkResourcesNamespace)
handleDeletionError(err, resourceDesc, &leftoverResources) handleDeletionError(err, resourceDesc, &leftoverResources)
} }

View File

@ -15,7 +15,7 @@ import (
core "k8s.io/api/core/v1" core "k8s.io/api/core/v1"
) )
func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, agentImage string, maxEntriesDBSizeBytes int64, apiServerResources models.Resources, imagePullPolicy core.PullPolicy, logLevel logging.Level, profiler bool) (bool, error) { func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubernetes.Provider, serializedKubesharkConfig string, isNsRestrictedMode bool, kubesharkResourcesNamespace string, agentImage string, maxEntriesDBSizeBytes int64, hubResources models.Resources, imagePullPolicy core.PullPolicy, logLevel logging.Level, profiler bool) (bool, error) {
if !isNsRestrictedMode { if !isNsRestrictedMode {
if err := createKubesharkNamespace(ctx, kubernetesProvider, kubesharkResourcesNamespace); err != nil { if err := createKubesharkNamespace(ctx, kubernetesProvider, kubesharkResourcesNamespace); err != nil {
return false, err return false, err
@ -38,22 +38,22 @@ func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubern
serviceAccountName = "" serviceAccountName = ""
} }
opts := &kubernetes.ApiServerOptions{ opts := &kubernetes.HubOptions{
Namespace: kubesharkResourcesNamespace, Namespace: kubesharkResourcesNamespace,
PodName: kubernetes.ApiServerPodName, PodName: kubernetes.HubPodName,
PodImage: agentImage, PodImage: agentImage,
KratosImage: "", KratosImage: "",
KetoImage: "", KetoImage: "",
ServiceAccountName: serviceAccountName, ServiceAccountName: serviceAccountName,
IsNamespaceRestricted: isNsRestrictedMode, IsNamespaceRestricted: isNsRestrictedMode,
MaxEntriesDBSizeBytes: maxEntriesDBSizeBytes, MaxEntriesDBSizeBytes: maxEntriesDBSizeBytes,
Resources: apiServerResources, Resources: hubResources,
ImagePullPolicy: imagePullPolicy, ImagePullPolicy: imagePullPolicy,
LogLevel: logLevel, LogLevel: logLevel,
Profiler: profiler, Profiler: profiler,
} }
frontOpts := &kubernetes.ApiServerOptions{ frontOpts := &kubernetes.HubOptions{
Namespace: kubesharkResourcesNamespace, Namespace: kubesharkResourcesNamespace,
PodName: kubernetes.FrontPodName, PodName: kubernetes.FrontPodName,
PodImage: agentImage, PodImage: agentImage,
@ -62,13 +62,13 @@ func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubern
ServiceAccountName: serviceAccountName, ServiceAccountName: serviceAccountName,
IsNamespaceRestricted: isNsRestrictedMode, IsNamespaceRestricted: isNsRestrictedMode,
MaxEntriesDBSizeBytes: maxEntriesDBSizeBytes, MaxEntriesDBSizeBytes: maxEntriesDBSizeBytes,
Resources: apiServerResources, Resources: hubResources,
ImagePullPolicy: imagePullPolicy, ImagePullPolicy: imagePullPolicy,
LogLevel: logLevel, LogLevel: logLevel,
Profiler: profiler, Profiler: profiler,
} }
if err := createKubesharkApiServerPod(ctx, kubernetesProvider, opts); err != nil { if err := createKubesharkHubPod(ctx, kubernetesProvider, opts); err != nil {
return kubesharkServiceAccountExists, err return kubesharkServiceAccountExists, err
} }
@ -76,12 +76,12 @@ func CreateTapKubesharkResources(ctx context.Context, kubernetesProvider *kubern
return kubesharkServiceAccountExists, err return kubesharkServiceAccountExists, err
} }
_, err = kubernetesProvider.CreateService(ctx, kubesharkResourcesNamespace, kubernetes.ApiServerServiceName, kubernetes.ApiServerServiceName, 80, int32(config.Config.Hub.PortForward.DstPort), int32(config.Config.Hub.PortForward.SrcPort)) _, err = kubernetesProvider.CreateService(ctx, kubesharkResourcesNamespace, kubernetes.HubServiceName, kubernetes.HubServiceName, 80, int32(config.Config.Hub.PortForward.DstPort), int32(config.Config.Hub.PortForward.SrcPort))
if err != nil { if err != nil {
return kubesharkServiceAccountExists, err return kubesharkServiceAccountExists, err
} }
log.Printf("Successfully created service: %s", kubernetes.ApiServerServiceName) log.Printf("Successfully created service: %s", kubernetes.HubServiceName)
_, err = kubernetesProvider.CreateService(ctx, kubesharkResourcesNamespace, kubernetes.FrontServiceName, kubernetes.FrontServiceName, 80, int32(config.Config.Front.PortForward.DstPort), int32(config.Config.Front.PortForward.SrcPort)) _, err = kubernetesProvider.CreateService(ctx, kubesharkResourcesNamespace, kubernetes.FrontServiceName, kubernetes.FrontServiceName, 80, int32(config.Config.Front.PortForward.DstPort), int32(config.Config.Front.PortForward.SrcPort))
if err != nil { if err != nil {
@ -117,8 +117,8 @@ func createRBACIfNecessary(ctx context.Context, kubernetesProvider *kubernetes.P
return true, nil return true, nil
} }
func createKubesharkApiServerPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.ApiServerOptions) error { func createKubesharkHubPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.HubOptions) error {
pod, err := kubernetesProvider.BuildApiServerPod(opts, false, "", false) pod, err := kubernetesProvider.BuildHubPod(opts, false, "", false)
if err != nil { if err != nil {
return err return err
} }
@ -129,7 +129,7 @@ func createKubesharkApiServerPod(ctx context.Context, kubernetesProvider *kubern
return nil return nil
} }
func createFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.ApiServerOptions) error { func createFrontPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, opts *kubernetes.HubOptions) error {
pod, err := kubernetesProvider.BuildFrontPod(opts, false, "", false) pod, err := kubernetesProvider.BuildFrontPod(opts, false, "", false)
if err != nil { if err != nil {
return err return err

View File

@ -29,7 +29,7 @@ func Do(req *http.Request, client *http.Client) (*http.Response, error) {
func checkError(response *http.Response, errInOperation error) (*http.Response, error) { func checkError(response *http.Response, errInOperation error) (*http.Response, error) {
if errInOperation != nil { if errInOperation != nil {
return response, errInOperation return response, errInOperation
// Check only if status != 200 (and not status >= 300). Agent APIs return only 200 on success. // Check only if status != 200 (and not status >= 300). Hub return only 200 on success.
} else if response.StatusCode != http.StatusOK { } else if response.StatusCode != http.StatusOK {
body, err := io.ReadAll(response.Body) body, err := io.ReadAll(response.Body)
response.Body.Close() response.Body.Close()