mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-04-27 19:47:31 +00:00
Add hub to the list of containers in pprof
command and add flags to pprof
command (#1603)
* Add hub to the list of containers in `pprof` command and add flags to `pprof` command * Reduce duplication --------- Co-authored-by: Alon Girmonsky <1990761+alongir@users.noreply.github.com>
This commit is contained in:
parent
95637bfce8
commit
1c883c73e4
2
Makefile
2
Makefile
@ -165,7 +165,7 @@ helm-install-debug:
|
||||
cd helm-chart && helm install kubeshark . --set tap.docker.tag=$(TAG) --set tap.debug=true && cd ..
|
||||
|
||||
helm-install-profile:
|
||||
cd helm-chart && helm install kubeshark . --set tap.docker.tag=$(TAG) --set tap.misc.profile=true && cd ..
|
||||
cd helm-chart && helm install kubeshark . --set tap.docker.tag=$(TAG) --set tap.pprof.enabled=true && cd ..
|
||||
|
||||
helm-uninstall:
|
||||
helm uninstall kubeshark
|
||||
|
14
cmd/pprof.go
14
cmd/pprof.go
@ -1,6 +1,9 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"github.com/creasty/defaults"
|
||||
"github.com/kubeshark/kubeshark/config/configStructs"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@ -15,4 +18,15 @@ var pprofCmd = &cobra.Command{
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(pprofCmd)
|
||||
|
||||
defaultTapConfig := configStructs.TapConfig{}
|
||||
if err := defaults.Set(&defaultTapConfig); err != nil {
|
||||
log.Debug().Err(err).Send()
|
||||
}
|
||||
|
||||
pprofCmd.Flags().Uint16(configStructs.ProxyFrontPortLabel, defaultTapConfig.Proxy.Front.Port, "Provide a custom port for the proxy/port-forward")
|
||||
pprofCmd.Flags().String(configStructs.ProxyHostLabel, defaultTapConfig.Proxy.Host, "Provide a custom host for the proxy/port-forward")
|
||||
pprofCmd.Flags().StringP(configStructs.ReleaseNamespaceLabel, "s", defaultTapConfig.Release.Namespace, "Release namespace of Kubeshark")
|
||||
pprofCmd.Flags().Uint16(configStructs.PprofPortLabel, defaultTapConfig.Pprof.Port, "Provide a custom port for the pprof server")
|
||||
pprofCmd.Flags().String(configStructs.PprofViewLabel, defaultTapConfig.Pprof.View, "Change the default view of the pprof web interface")
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/rivo/tview"
|
||||
"github.com/rs/zerolog/log"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func runPprof() {
|
||||
@ -23,7 +24,16 @@ func runPprof() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
pods, err := provider.ListPodsByAppLabel(ctx, config.Config.Tap.Release.Namespace, map[string]string{"app.kubeshark.co/app": "worker"})
|
||||
hubPods, err := provider.ListPodsByAppLabel(ctx, config.Config.Tap.Release.Namespace, map[string]string{kubernetes.AppLabelKey: "hub"})
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
Msg("Failed to list hub pods!")
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
workerPods, err := provider.ListPodsByAppLabel(ctx, config.Config.Tap.Release.Namespace, map[string]string{kubernetes.AppLabelKey: "worker"})
|
||||
if err != nil {
|
||||
log.Error().
|
||||
Err(err).
|
||||
@ -40,62 +50,39 @@ func runPprof() {
|
||||
var currentCmd *cmd.Cmd
|
||||
|
||||
i := 48
|
||||
for _, pod := range pods {
|
||||
for _, pod := range hubPods {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
log.Info().Str("pod", pod.Name).Str("container", container.Name).Send()
|
||||
homeUrl := fmt.Sprintf("%s/debug/pprof/", kubernetes.GetHubUrl())
|
||||
modal := buildNewModal(
|
||||
pod,
|
||||
container,
|
||||
homeUrl,
|
||||
app,
|
||||
list,
|
||||
fullscreen,
|
||||
currentCmd,
|
||||
)
|
||||
list.AddItem(fmt.Sprintf("pod: %s container: %s", pod.Name, container.Name), pod.Spec.NodeName, rune(i), func() {
|
||||
app.SetRoot(modal, fullscreen)
|
||||
})
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range workerPods {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
log.Info().Str("pod", pod.Name).Str("container", container.Name).Send()
|
||||
homeUrl := fmt.Sprintf("%s/pprof/%s/%s/", kubernetes.GetHubUrl(), pod.Status.HostIP, container.Name)
|
||||
modal := tview.NewModal().
|
||||
SetText(fmt.Sprintf("pod: %s container: %s", pod.Name, container.Name)).
|
||||
AddButtons([]string{
|
||||
"Open Debug Home Page",
|
||||
"Profile: CPU",
|
||||
"Profile: Memory",
|
||||
"Profile: Goroutine",
|
||||
"Cancel",
|
||||
}).
|
||||
SetDoneFunc(func(buttonIndex int, buttonLabel string) {
|
||||
switch buttonLabel {
|
||||
case "Open Debug Home Page":
|
||||
utils.OpenBrowser(homeUrl)
|
||||
case "Profile: CPU":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
currentCmd = cmd.NewCmd("go", "tool", "pprof", "-http", ":8000", fmt.Sprintf("%sprofile", homeUrl))
|
||||
currentCmd.Start()
|
||||
case "Profile: Memory":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
currentCmd = cmd.NewCmd("go", "tool", "pprof", "-http", ":8000", fmt.Sprintf("%sheap", homeUrl))
|
||||
currentCmd.Start()
|
||||
case "Profile: Goroutine":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
currentCmd = cmd.NewCmd("go", "tool", "pprof", "-http", ":8000", fmt.Sprintf("%sgoroutine", homeUrl))
|
||||
currentCmd.Start()
|
||||
case "Cancel":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
app.SetRoot(list, fullscreen)
|
||||
}
|
||||
})
|
||||
modal := buildNewModal(
|
||||
pod,
|
||||
container,
|
||||
homeUrl,
|
||||
app,
|
||||
list,
|
||||
fullscreen,
|
||||
currentCmd,
|
||||
)
|
||||
list.AddItem(fmt.Sprintf("pod: %s container: %s", pod.Name, container.Name), pod.Spec.NodeName, rune(i), func() {
|
||||
app.SetRoot(modal, fullscreen)
|
||||
})
|
||||
@ -117,3 +104,73 @@ func runPprof() {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func buildNewModal(
|
||||
pod v1.Pod,
|
||||
container v1.Container,
|
||||
homeUrl string,
|
||||
app *tview.Application,
|
||||
list *tview.List,
|
||||
fullscreen bool,
|
||||
currentCmd *cmd.Cmd,
|
||||
) *tview.Modal {
|
||||
return tview.NewModal().
|
||||
SetText(fmt.Sprintf("pod: %s container: %s", pod.Name, container.Name)).
|
||||
AddButtons([]string{
|
||||
"Open Debug Home Page",
|
||||
"Profile: CPU",
|
||||
"Profile: Memory",
|
||||
"Profile: Goroutine",
|
||||
"Cancel",
|
||||
}).
|
||||
SetDoneFunc(func(buttonIndex int, buttonLabel string) {
|
||||
var err error
|
||||
port := fmt.Sprintf(":%d", config.Config.Tap.Pprof.Port)
|
||||
view := fmt.Sprintf("http://localhost%s/ui/%s", port, config.Config.Tap.Pprof.View)
|
||||
|
||||
switch buttonLabel {
|
||||
case "Open Debug Home Page":
|
||||
utils.OpenBrowser(homeUrl)
|
||||
case "Profile: CPU":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
currentCmd = cmd.NewCmd("go", "tool", "pprof", "-http", port, "-no_browser", fmt.Sprintf("%sprofile", homeUrl))
|
||||
currentCmd.Start()
|
||||
utils.OpenBrowser(view)
|
||||
case "Profile: Memory":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
currentCmd = cmd.NewCmd("go", "tool", "pprof", "-http", port, "-no_browser", fmt.Sprintf("%sheap", homeUrl))
|
||||
currentCmd.Start()
|
||||
utils.OpenBrowser(view)
|
||||
case "Profile: Goroutine":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
currentCmd = cmd.NewCmd("go", "tool", "pprof", "-http", port, "-no_browser", fmt.Sprintf("%sgoroutine", homeUrl))
|
||||
currentCmd.Start()
|
||||
utils.OpenBrowser(view)
|
||||
case "Cancel":
|
||||
if currentCmd != nil {
|
||||
err = currentCmd.Stop()
|
||||
if err != nil {
|
||||
log.Error().Err(err).Str("name", currentCmd.Name).Msg("Failed to stop process!")
|
||||
}
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
app.SetRoot(list, fullscreen)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -70,6 +70,7 @@ func InitConfig(cmd *cobra.Command) error {
|
||||
"pro",
|
||||
"proxy",
|
||||
"scripts",
|
||||
"pprof",
|
||||
}, cmdName) {
|
||||
cmdName = "tap"
|
||||
}
|
||||
|
@ -31,6 +31,8 @@ const (
|
||||
IgnoreTaintedLabel = "ignoreTainted"
|
||||
IngressEnabledLabel = "ingress-enabled"
|
||||
TelemetryEnabledLabel = "telemetry-enabled"
|
||||
PprofPortLabel = "pprof-port"
|
||||
PprofViewLabel = "pprof-view"
|
||||
DebugLabel = "debug"
|
||||
ContainerPort = 80
|
||||
ContainerPortStr = "80"
|
||||
@ -159,6 +161,12 @@ type MetricsConfig struct {
|
||||
Port uint16 `yaml:"port" json:"port" default:"49100"`
|
||||
}
|
||||
|
||||
type PprofConfig struct {
|
||||
Enabled bool `yaml:"enabled" json:"enabled" default:"false"`
|
||||
Port uint16 `yaml:"port" json:"port" default:"8000"`
|
||||
View string `yaml:"view" json:"view" default:"flamegraph"`
|
||||
}
|
||||
|
||||
type MiscConfig struct {
|
||||
JsonTTL string `yaml:"jsonTTL" json:"jsonTTL" default:"5m"`
|
||||
PcapTTL string `yaml:"pcapTTL" json:"pcapTTL" default:"10s"`
|
||||
@ -167,7 +175,6 @@ type MiscConfig struct {
|
||||
TcpStreamChannelTimeoutMs int `yaml:"tcpStreamChannelTimeoutMs" json:"tcpStreamChannelTimeoutMs" default:"10000"`
|
||||
TcpStreamChannelTimeoutShow bool `yaml:"tcpStreamChannelTimeoutShow" json:"tcpStreamChannelTimeoutShow" default:"false"`
|
||||
ResolutionStrategy string `yaml:"resolutionStrategy" json:"resolutionStrategy" default:"auto"`
|
||||
Profile bool `yaml:"profile" json:"profile" default:"false"`
|
||||
DuplicateTimeframe string `yaml:"duplicateTimeframe" json:"duplicateTimeframe" default:"200ms"`
|
||||
DetectDuplicates bool `yaml:"detectDuplicates" json:"detectDuplicates" default:"false"`
|
||||
}
|
||||
@ -211,6 +218,7 @@ type TapConfig struct {
|
||||
GlobalFilter string `yaml:"globalFilter" json:"globalFilter"`
|
||||
EnabledDissectors []string `yaml:"enabledDissectors" json:"enabledDissectors"`
|
||||
Metrics MetricsConfig `yaml:"metrics" json:"metrics"`
|
||||
Pprof PprofConfig `yaml:"pprof" json:"pprof"`
|
||||
Misc MiscConfig `yaml:"misc" json:"misc"`
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@ spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: {{ include "kubeshark.serviceAccountName" . }}
|
||||
containers:
|
||||
- name: kubeshark-hub
|
||||
- name: hub
|
||||
command:
|
||||
- ./hub
|
||||
- -port
|
||||
@ -46,7 +46,7 @@ spec:
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubeshark.co'
|
||||
- name: PROFILING_ENABLED
|
||||
value: '{{ .Values.tap.misc.profile }}'
|
||||
value: '{{ .Values.tap.pprof.enabled }}'
|
||||
{{- if .Values.tap.docker.overrideTag.hub }}
|
||||
image: '{{ .Values.tap.docker.registry }}/hub:{{ .Values.tap.docker.overrideTag.hub }}'
|
||||
{{ else }}
|
||||
|
@ -97,7 +97,7 @@ spec:
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubeshark.co'
|
||||
- name: PROFILING_ENABLED
|
||||
value: '{{ .Values.tap.misc.profile }}'
|
||||
value: '{{ .Values.tap.pprof.enabled }}'
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ .Values.tap.resources.sniffer.limits.cpu }}
|
||||
@ -169,7 +169,7 @@ spec:
|
||||
{{- if .Values.tap.disableTlsLog }}
|
||||
- -disable-tls-log
|
||||
{{- end }}
|
||||
{{- if .Values.tap.misc.profile }}
|
||||
{{- if .Values.tap.pprof.enabled }}
|
||||
- -port
|
||||
- '{{ add .Values.tap.proxy.worker.srvPort 1 }}'
|
||||
{{- end }}
|
||||
@ -190,7 +190,7 @@ spec:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: PROFILING_ENABLED
|
||||
value: '{{ .Values.tap.misc.profile }}'
|
||||
value: '{{ .Values.tap.pprof.enabled }}'
|
||||
resources:
|
||||
limits:
|
||||
cpu: {{ .Values.tap.resources.tracer.limits.cpu }}
|
||||
|
@ -60,7 +60,7 @@ Escape double quotes in a string
|
||||
Define debug docker tag suffix
|
||||
*/}}
|
||||
{{- define "kubeshark.dockerTagDebugVersion" -}}
|
||||
{{- .Values.tap.misc.profile | ternary "-debug" "" }}
|
||||
{{- .Values.tap.pprof.enabled | ternary "-debug" "" }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
|
@ -131,6 +131,10 @@ tap:
|
||||
- ws
|
||||
metrics:
|
||||
port: 49100
|
||||
pprof:
|
||||
enabled: false
|
||||
port: 8000
|
||||
view: flamegraph
|
||||
misc:
|
||||
jsonTTL: 5m
|
||||
pcapTTL: 10s
|
||||
@ -139,7 +143,6 @@ tap:
|
||||
tcpStreamChannelTimeoutMs: 10000
|
||||
tcpStreamChannelTimeoutShow: false
|
||||
resolutionStrategy: auto
|
||||
profile: false
|
||||
duplicateTimeframe: 200ms
|
||||
detectDuplicates: false
|
||||
logs:
|
||||
|
@ -8,4 +8,5 @@ const (
|
||||
HubServiceName = HubPodName
|
||||
K8sAllNamespaces = ""
|
||||
MinKubernetesServerVersion = "1.16.0"
|
||||
AppLabelKey = "app.kubeshark.co/app"
|
||||
)
|
||||
|
@ -106,7 +106,7 @@ func getRerouteHttpHandlerSelfStatic(proxyHandler http.Handler, selfNamespace st
|
||||
}
|
||||
|
||||
func NewPortForward(kubernetesProvider *Provider, namespace string, podRegex *regexp.Regexp, srcPort uint16, dstPort uint16, ctx context.Context) (*portforward.PortForwarder, error) {
|
||||
pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, namespace, map[string]string{"app.kubeshark.co/app": "front"})
|
||||
pods, err := kubernetesProvider.ListPodsByAppLabel(ctx, namespace, map[string]string{AppLabelKey: "front"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(pods) == 0 {
|
||||
|
@ -250,6 +250,7 @@ data:
|
||||
CLOUD_LICENSE_ENABLED: 'true'
|
||||
DUPLICATE_TIMEFRAME: '200ms'
|
||||
ENABLED_DISSECTORS: 'amqp,dns,http,icmp,kafka,redis,sctp,syscall,tcp,ws'
|
||||
DISSECTORS_UPDATING_ENABLED: 'true'
|
||||
DETECT_DUPLICATES: 'false'
|
||||
---
|
||||
# Source: kubeshark/templates/02-cluster-role.yaml
|
||||
@ -669,7 +670,7 @@ spec:
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
serviceAccountName: kubeshark-service-account
|
||||
containers:
|
||||
- name: kubeshark-hub
|
||||
- name: hub
|
||||
command:
|
||||
- ./hub
|
||||
- -port
|
||||
@ -685,6 +686,8 @@ spec:
|
||||
fieldPath: metadata.namespace
|
||||
- name: KUBESHARK_CLOUD_API_URL
|
||||
value: 'https://api.kubeshark.co'
|
||||
- name: PROFILING_ENABLED
|
||||
value: 'false'
|
||||
image: 'docker.io/kubeshark/hub:v52.3.79'
|
||||
imagePullPolicy: Always
|
||||
readinessProbe:
|
||||
@ -784,6 +787,8 @@ spec:
|
||||
value: 'true'
|
||||
- name: REACT_APP_SUPPORT_CHAT_ENABLED
|
||||
value: 'true'
|
||||
- name: REACT_APP_DISSECTORS_UPDATING_ENABLED
|
||||
value: 'true'
|
||||
image: 'docker.io/kubeshark/front:v52.3.79'
|
||||
imagePullPolicy: Always
|
||||
name: kubeshark-front
|
||||
|
Loading…
Reference in New Issue
Block a user