mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-09-23 19:17:18 +00:00
🔨 Move cli
folder contents into project root (#1253)
* Remove `logger` module * Remove `shared` module * Move `cli` folder contents into project root * Fix linter * Change the module name from `github.com/kubeshark/kubeshark/cli` to `github.com/kubeshark/kubeshark` * Set the default `Makefile` rule to `build` * Add `lint` rule * Fix the linter errors
This commit is contained in:
25
kubernetes/consts.go
Normal file
25
kubernetes/consts.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package kubernetes
|
||||
|
||||
const (
|
||||
KubesharkResourcesPrefix = "ks-"
|
||||
ApiServerPodName = KubesharkResourcesPrefix + "hub"
|
||||
ClusterRoleBindingName = KubesharkResourcesPrefix + "cluster-role-binding"
|
||||
ClusterRoleName = KubesharkResourcesPrefix + "cluster-role"
|
||||
K8sAllNamespaces = ""
|
||||
RoleBindingName = KubesharkResourcesPrefix + "role-binding"
|
||||
RoleName = KubesharkResourcesPrefix + "role"
|
||||
ServiceAccountName = KubesharkResourcesPrefix + "service-account"
|
||||
TapperDaemonSetName = KubesharkResourcesPrefix + "worker-daemon-set"
|
||||
TapperPodName = KubesharkResourcesPrefix + "worker"
|
||||
ConfigMapName = KubesharkResourcesPrefix + "config"
|
||||
MinKubernetesServerVersion = "1.16.0"
|
||||
)
|
||||
|
||||
const (
|
||||
LabelPrefixApp = "app.kubernetes.io/"
|
||||
LabelManagedBy = LabelPrefixApp + "managed-by"
|
||||
LabelCreatedBy = LabelPrefixApp + "created-by"
|
||||
LabelValueKubeshark = "kubeshark"
|
||||
LabelValueKubesharkCLI = "kubeshark-cli"
|
||||
LabelValueKubesharkAgent = "kubeshark-agent"
|
||||
)
|
26
kubernetes/errors.go
Normal file
26
kubernetes/errors.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package kubernetes
|
||||
|
||||
type K8sTapManagerErrorReason string
|
||||
|
||||
const (
|
||||
TapManagerTapperUpdateError K8sTapManagerErrorReason = "TAPPER_UPDATE_ERROR"
|
||||
TapManagerPodWatchError K8sTapManagerErrorReason = "POD_WATCH_ERROR"
|
||||
TapManagerPodListError K8sTapManagerErrorReason = "POD_LIST_ERROR"
|
||||
)
|
||||
|
||||
type K8sTapManagerError struct {
|
||||
OriginalError error
|
||||
TapManagerReason K8sTapManagerErrorReason
|
||||
}
|
||||
|
||||
// K8sTapManagerError implements the Error interface.
|
||||
func (e *K8sTapManagerError) Error() string {
|
||||
return e.OriginalError.Error()
|
||||
}
|
||||
|
||||
type ClusterBehindProxyError struct{}
|
||||
|
||||
// ClusterBehindProxyError implements the Error interface.
|
||||
func (e *ClusterBehindProxyError) Error() string {
|
||||
return "Cluster is behind proxy"
|
||||
}
|
50
kubernetes/eventWatchHelper.go
Normal file
50
kubernetes/eventWatchHelper.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
type EventWatchHelper struct {
|
||||
kubernetesProvider *Provider
|
||||
NameRegexFilter *regexp.Regexp
|
||||
Kind string
|
||||
}
|
||||
|
||||
func NewEventWatchHelper(kubernetesProvider *Provider, NameRegexFilter *regexp.Regexp, kind string) *EventWatchHelper {
|
||||
return &EventWatchHelper{
|
||||
kubernetesProvider: kubernetesProvider,
|
||||
NameRegexFilter: NameRegexFilter,
|
||||
Kind: kind,
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the EventFilterer Interface
|
||||
func (wh *EventWatchHelper) Filter(wEvent *WatchEvent) (bool, error) {
|
||||
event, err := wEvent.ToEvent()
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
if !wh.NameRegexFilter.MatchString(event.Name) {
|
||||
return false, nil
|
||||
}
|
||||
if !strings.EqualFold(event.Regarding.Kind, wh.Kind) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Implements the WatchCreator Interface
|
||||
func (wh *EventWatchHelper) NewWatcher(ctx context.Context, namespace string) (watch.Interface, error) {
|
||||
watcher, err := wh.kubernetesProvider.clientSet.EventsV1().Events(namespace).Watch(ctx, metav1.ListOptions{Watch: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return watcher, nil
|
||||
}
|
364
kubernetes/kubesharkTapperSyncer.go
Normal file
364
kubernetes/kubesharkTapperSyncer.go
Normal file
@@ -0,0 +1,364 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/kubeshark/kubeshark/debounce"
|
||||
"github.com/kubeshark/kubeshark/utils"
|
||||
"github.com/kubeshark/worker/api"
|
||||
"github.com/kubeshark/worker/models"
|
||||
"github.com/op/go-logging"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const updateTappersDelay = 5 * time.Second
|
||||
|
||||
type TappedPodChangeEvent struct {
|
||||
Added []core.Pod
|
||||
Removed []core.Pod
|
||||
}
|
||||
|
||||
// KubesharkTapperSyncer uses a k8s pod watch to update tapper daemonsets when targeted pods are removed or created
|
||||
type KubesharkTapperSyncer struct {
|
||||
startTime time.Time
|
||||
context context.Context
|
||||
CurrentlyTappedPods []core.Pod
|
||||
config TapperSyncerConfig
|
||||
kubernetesProvider *Provider
|
||||
TapPodChangesOut chan TappedPodChangeEvent
|
||||
TapperStatusChangedOut chan models.TapperStatus
|
||||
ErrorOut chan K8sTapManagerError
|
||||
nodeToTappedPodMap models.NodeToPodsMap
|
||||
tappedNodes []string
|
||||
}
|
||||
|
||||
type TapperSyncerConfig struct {
|
||||
TargetNamespaces []string
|
||||
PodFilterRegex regexp.Regexp
|
||||
KubesharkResourcesNamespace string
|
||||
AgentImage string
|
||||
TapperResources models.Resources
|
||||
ImagePullPolicy core.PullPolicy
|
||||
LogLevel logging.Level
|
||||
KubesharkApiFilteringOptions api.TrafficFilteringOptions
|
||||
KubesharkServiceAccountExists bool
|
||||
ServiceMesh bool
|
||||
Tls bool
|
||||
MaxLiveStreams int
|
||||
}
|
||||
|
||||
func CreateAndStartKubesharkTapperSyncer(ctx context.Context, kubernetesProvider *Provider, config TapperSyncerConfig, startTime time.Time) (*KubesharkTapperSyncer, error) {
|
||||
syncer := &KubesharkTapperSyncer{
|
||||
startTime: startTime.Truncate(time.Second), // Round down because k8s CreationTimestamp is given in 1 sec resolution.
|
||||
context: ctx,
|
||||
CurrentlyTappedPods: make([]core.Pod, 0),
|
||||
config: config,
|
||||
kubernetesProvider: kubernetesProvider,
|
||||
TapPodChangesOut: make(chan TappedPodChangeEvent, 100),
|
||||
TapperStatusChangedOut: make(chan models.TapperStatus, 100),
|
||||
ErrorOut: make(chan K8sTapManagerError, 100),
|
||||
}
|
||||
|
||||
if err, _ := syncer.updateCurrentlyTappedPods(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := syncer.updateKubesharkTappers(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go syncer.watchPodsForTapping()
|
||||
go syncer.watchTapperEvents()
|
||||
go syncer.watchTapperPods()
|
||||
return syncer, nil
|
||||
}
|
||||
|
||||
func (tapperSyncer *KubesharkTapperSyncer) watchTapperPods() {
|
||||
kubesharkResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", TapperPodName))
|
||||
podWatchHelper := NewPodWatchHelper(tapperSyncer.kubernetesProvider, kubesharkResourceRegex)
|
||||
eventChan, errorChan := FilteredWatch(tapperSyncer.context, podWatchHelper, []string{tapperSyncer.config.KubesharkResourcesNamespace}, podWatchHelper)
|
||||
|
||||
for {
|
||||
select {
|
||||
case wEvent, ok := <-eventChan:
|
||||
if !ok {
|
||||
eventChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
pod, err := wEvent.ToPod()
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] parsing Kubeshark resource pod: %+v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("Watching tapper pods loop, tapper: %v, node: %v, status: %v", pod.Name, pod.Spec.NodeName, pod.Status.Phase)
|
||||
if pod.Spec.NodeName != "" {
|
||||
tapperStatus := models.TapperStatus{TapperName: pod.Name, NodeName: pod.Spec.NodeName, Status: string(pod.Status.Phase)}
|
||||
tapperSyncer.TapperStatusChangedOut <- tapperStatus
|
||||
}
|
||||
|
||||
case err, ok := <-errorChan:
|
||||
if !ok {
|
||||
errorChan = nil
|
||||
continue
|
||||
}
|
||||
log.Printf("[ERROR] Watching tapper pods loop, error: %+v", err)
|
||||
|
||||
case <-tapperSyncer.context.Done():
|
||||
log.Printf("Watching tapper pods loop, ctx done")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tapperSyncer *KubesharkTapperSyncer) watchTapperEvents() {
|
||||
kubesharkResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", TapperPodName))
|
||||
eventWatchHelper := NewEventWatchHelper(tapperSyncer.kubernetesProvider, kubesharkResourceRegex, "pod")
|
||||
eventChan, errorChan := FilteredWatch(tapperSyncer.context, eventWatchHelper, []string{tapperSyncer.config.KubesharkResourcesNamespace}, eventWatchHelper)
|
||||
|
||||
for {
|
||||
select {
|
||||
case wEvent, ok := <-eventChan:
|
||||
if !ok {
|
||||
eventChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
event, err := wEvent.ToEvent()
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] parsing Kubeshark resource event: %+v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"Watching tapper events loop, event %s, time: %v, resource: %s (%s), reason: %s, note: %s",
|
||||
event.Name,
|
||||
event.CreationTimestamp.Time,
|
||||
event.Regarding.Name,
|
||||
event.Regarding.Kind,
|
||||
event.Reason,
|
||||
event.Note,
|
||||
)
|
||||
|
||||
pod, err1 := tapperSyncer.kubernetesProvider.GetPod(tapperSyncer.context, tapperSyncer.config.KubesharkResourcesNamespace, event.Regarding.Name)
|
||||
if err1 != nil {
|
||||
log.Printf("Couldn't get tapper pod %s", event.Regarding.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
nodeName := ""
|
||||
if event.Reason != "FailedScheduling" {
|
||||
nodeName = pod.Spec.NodeName
|
||||
} else {
|
||||
nodeName = pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
|
||||
}
|
||||
|
||||
tapperStatus := models.TapperStatus{TapperName: pod.Name, NodeName: nodeName, Status: string(pod.Status.Phase)}
|
||||
tapperSyncer.TapperStatusChangedOut <- tapperStatus
|
||||
|
||||
case err, ok := <-errorChan:
|
||||
if !ok {
|
||||
errorChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
log.Printf("[ERROR] Watching tapper events loop, error: %+v", err)
|
||||
|
||||
case <-tapperSyncer.context.Done():
|
||||
log.Printf("Watching tapper events loop, ctx done")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() {
|
||||
podWatchHelper := NewPodWatchHelper(tapperSyncer.kubernetesProvider, &tapperSyncer.config.PodFilterRegex)
|
||||
eventChan, errorChan := FilteredWatch(tapperSyncer.context, podWatchHelper, tapperSyncer.config.TargetNamespaces, podWatchHelper)
|
||||
|
||||
handleChangeInPods := func() {
|
||||
err, changeFound := tapperSyncer.updateCurrentlyTappedPods()
|
||||
if err != nil {
|
||||
tapperSyncer.ErrorOut <- K8sTapManagerError{
|
||||
OriginalError: err,
|
||||
TapManagerReason: TapManagerPodListError,
|
||||
}
|
||||
}
|
||||
|
||||
if !changeFound {
|
||||
log.Printf("Nothing changed update tappers not needed")
|
||||
return
|
||||
}
|
||||
if err := tapperSyncer.updateKubesharkTappers(); err != nil {
|
||||
tapperSyncer.ErrorOut <- K8sTapManagerError{
|
||||
OriginalError: err,
|
||||
TapManagerReason: TapManagerTapperUpdateError,
|
||||
}
|
||||
}
|
||||
}
|
||||
restartTappersDebouncer := debounce.NewDebouncer(updateTappersDelay, handleChangeInPods)
|
||||
|
||||
for {
|
||||
select {
|
||||
case wEvent, ok := <-eventChan:
|
||||
if !ok {
|
||||
eventChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
pod, err := wEvent.ToPod()
|
||||
if err != nil {
|
||||
tapperSyncer.handleErrorInWatchLoop(err, restartTappersDebouncer)
|
||||
continue
|
||||
}
|
||||
|
||||
switch wEvent.Type {
|
||||
case EventAdded:
|
||||
log.Printf("Added matching pod %s, ns: %s", pod.Name, pod.Namespace)
|
||||
if err := restartTappersDebouncer.SetOn(); err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
case EventDeleted:
|
||||
log.Printf("Removed matching pod %s, ns: %s", pod.Name, pod.Namespace)
|
||||
if err := restartTappersDebouncer.SetOn(); err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
case EventModified:
|
||||
log.Printf("Modified matching pod %s, ns: %s, phase: %s, ip: %s", pod.Name, pod.Namespace, pod.Status.Phase, pod.Status.PodIP)
|
||||
// Act only if the modified pod has already obtained an IP address.
|
||||
// After filtering for IPs, on a normal pod restart this includes the following events:
|
||||
// - Pod deletion
|
||||
// - Pod reaches start state
|
||||
// - Pod reaches ready state
|
||||
// Ready/unready transitions might also trigger this event.
|
||||
if pod.Status.PodIP != "" {
|
||||
if err := restartTappersDebouncer.SetOn(); err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
}
|
||||
case EventBookmark:
|
||||
break
|
||||
case EventError:
|
||||
break
|
||||
}
|
||||
case err, ok := <-errorChan:
|
||||
if !ok {
|
||||
errorChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
tapperSyncer.handleErrorInWatchLoop(err, restartTappersDebouncer)
|
||||
continue
|
||||
|
||||
case <-tapperSyncer.context.Done():
|
||||
log.Printf("Watching pods loop, context done, stopping `restart tappers debouncer`")
|
||||
restartTappersDebouncer.Cancel()
|
||||
// TODO: Does this also perform cleanup?
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tapperSyncer *KubesharkTapperSyncer) handleErrorInWatchLoop(err error, restartTappersDebouncer *debounce.Debouncer) {
|
||||
log.Printf("Watching pods loop, got error %v, stopping `restart tappers debouncer`", err)
|
||||
restartTappersDebouncer.Cancel()
|
||||
tapperSyncer.ErrorOut <- K8sTapManagerError{
|
||||
OriginalError: err,
|
||||
TapManagerReason: TapManagerPodWatchError,
|
||||
}
|
||||
}
|
||||
|
||||
func (tapperSyncer *KubesharkTapperSyncer) updateCurrentlyTappedPods() (err error, changesFound bool) {
|
||||
if matchingPods, err := tapperSyncer.kubernetesProvider.ListAllRunningPodsMatchingRegex(tapperSyncer.context, &tapperSyncer.config.PodFilterRegex, tapperSyncer.config.TargetNamespaces); err != nil {
|
||||
return err, false
|
||||
} else {
|
||||
podsToTap := excludeKubesharkPods(matchingPods)
|
||||
addedPods, removedPods := getPodArrayDiff(tapperSyncer.CurrentlyTappedPods, podsToTap)
|
||||
for _, addedPod := range addedPods {
|
||||
log.Printf("tapping new pod %s", addedPod.Name)
|
||||
}
|
||||
for _, removedPod := range removedPods {
|
||||
log.Printf("pod %s is no longer running, tapping for it stopped", removedPod.Name)
|
||||
}
|
||||
if len(addedPods) > 0 || len(removedPods) > 0 {
|
||||
tapperSyncer.CurrentlyTappedPods = podsToTap
|
||||
tapperSyncer.nodeToTappedPodMap = GetNodeHostToTappedPodsMap(tapperSyncer.CurrentlyTappedPods)
|
||||
tapperSyncer.TapPodChangesOut <- TappedPodChangeEvent{
|
||||
Added: addedPods,
|
||||
Removed: removedPods,
|
||||
}
|
||||
return nil, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
func (tapperSyncer *KubesharkTapperSyncer) updateKubesharkTappers() error {
|
||||
nodesToTap := make([]string, len(tapperSyncer.nodeToTappedPodMap))
|
||||
i := 0
|
||||
for node := range tapperSyncer.nodeToTappedPodMap {
|
||||
nodesToTap[i] = node
|
||||
i++
|
||||
}
|
||||
|
||||
if utils.EqualStringSlices(nodesToTap, tapperSyncer.tappedNodes) {
|
||||
log.Print("Skipping apply, DaemonSet is up to date")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("Updating DaemonSet to run on nodes: %v", nodesToTap)
|
||||
|
||||
if len(tapperSyncer.nodeToTappedPodMap) > 0 {
|
||||
var serviceAccountName string
|
||||
if tapperSyncer.config.KubesharkServiceAccountExists {
|
||||
serviceAccountName = ServiceAccountName
|
||||
} else {
|
||||
serviceAccountName = ""
|
||||
}
|
||||
|
||||
nodeNames := make([]string, 0, len(tapperSyncer.nodeToTappedPodMap))
|
||||
for nodeName := range tapperSyncer.nodeToTappedPodMap {
|
||||
nodeNames = append(nodeNames, nodeName)
|
||||
}
|
||||
|
||||
if err := tapperSyncer.kubernetesProvider.ApplyKubesharkTapperDaemonSet(
|
||||
tapperSyncer.context,
|
||||
tapperSyncer.config.KubesharkResourcesNamespace,
|
||||
TapperDaemonSetName,
|
||||
"kubeshark/worker:test-amd64",
|
||||
TapperPodName,
|
||||
fmt.Sprintf("%s.%s.svc", ApiServerPodName, tapperSyncer.config.KubesharkResourcesNamespace),
|
||||
nodeNames,
|
||||
serviceAccountName,
|
||||
tapperSyncer.config.TapperResources,
|
||||
tapperSyncer.config.ImagePullPolicy,
|
||||
tapperSyncer.config.KubesharkApiFilteringOptions,
|
||||
tapperSyncer.config.LogLevel,
|
||||
tapperSyncer.config.ServiceMesh,
|
||||
tapperSyncer.config.Tls,
|
||||
tapperSyncer.config.MaxLiveStreams); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Successfully created %v tappers", len(tapperSyncer.nodeToTappedPodMap))
|
||||
} else {
|
||||
if err := tapperSyncer.kubernetesProvider.ResetKubesharkTapperDaemonSet(
|
||||
tapperSyncer.context,
|
||||
tapperSyncer.config.KubesharkResourcesNamespace,
|
||||
TapperDaemonSetName,
|
||||
tapperSyncer.config.AgentImage,
|
||||
TapperPodName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Successfully reset tapper daemon set")
|
||||
}
|
||||
|
||||
tapperSyncer.tappedNodes = nodesToTap
|
||||
|
||||
return nil
|
||||
}
|
45
kubernetes/podWatchHelper.go
Normal file
45
kubernetes/podWatchHelper.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"regexp"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
type PodWatchHelper struct {
|
||||
kubernetesProvider *Provider
|
||||
NameRegexFilter *regexp.Regexp
|
||||
}
|
||||
|
||||
func NewPodWatchHelper(kubernetesProvider *Provider, NameRegexFilter *regexp.Regexp) *PodWatchHelper {
|
||||
return &PodWatchHelper{
|
||||
kubernetesProvider: kubernetesProvider,
|
||||
NameRegexFilter: NameRegexFilter,
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the EventFilterer Interface
|
||||
func (wh *PodWatchHelper) Filter(wEvent *WatchEvent) (bool, error) {
|
||||
pod, err := wEvent.ToPod()
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !wh.NameRegexFilter.MatchString(pod.Name) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Implements the WatchCreator Interface
|
||||
func (wh *PodWatchHelper) NewWatcher(ctx context.Context, namespace string) (watch.Interface, error) {
|
||||
watcher, err := wh.kubernetesProvider.clientSet.CoreV1().Pods(namespace).Watch(ctx, metav1.ListOptions{Watch: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return watcher, nil
|
||||
}
|
1233
kubernetes/provider.go
Normal file
1233
kubernetes/provider.go
Normal file
File diff suppressed because it is too large
Load Diff
150
kubernetes/proxy.go
Normal file
150
kubernetes/proxy.go
Normal file
@@ -0,0 +1,150 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/httpstream"
|
||||
"k8s.io/client-go/tools/portforward"
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
|
||||
"k8s.io/kubectl/pkg/proxy"
|
||||
)
|
||||
|
||||
const k8sProxyApiPrefix = "/"
|
||||
const kubesharkServicePort = 80
|
||||
|
||||
func StartProxy(kubernetesProvider *Provider, proxyHost string, srcPort uint16, dstPort uint16, kubesharkNamespace string, kubesharkServiceName string, cancel context.CancelFunc) (*http.Server, error) {
|
||||
log.Printf("Starting proxy - namespace: [%v], service name: [%s], port: [%d:%d]\n", kubesharkNamespace, kubesharkServiceName, srcPort, dstPort)
|
||||
filter := &proxy.FilterServer{
|
||||
AcceptPaths: proxy.MakeRegexpArrayOrDie(proxy.DefaultPathAcceptRE),
|
||||
RejectPaths: proxy.MakeRegexpArrayOrDie(proxy.DefaultPathRejectRE),
|
||||
AcceptHosts: proxy.MakeRegexpArrayOrDie("^.*"),
|
||||
RejectMethods: proxy.MakeRegexpArrayOrDie(proxy.DefaultMethodRejectRE),
|
||||
}
|
||||
|
||||
proxyHandler, err := proxy.NewProxyHandler(k8sProxyApiPrefix, filter, &kubernetesProvider.clientConfig, time.Second*2, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
mux.Handle(k8sProxyApiPrefix, getRerouteHttpHandlerKubesharkAPI(proxyHandler, kubesharkNamespace, kubesharkServiceName))
|
||||
mux.Handle("/static/", getRerouteHttpHandlerKubesharkStatic(proxyHandler, kubesharkNamespace, kubesharkServiceName))
|
||||
|
||||
l, err := net.Listen("tcp", fmt.Sprintf("%s:%d", proxyHost, int(srcPort)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
server := &http.Server{
|
||||
Handler: mux,
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := server.Serve(l); err != nil && err != http.ErrServerClosed {
|
||||
log.Printf("Error creating proxy, %v", err)
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
|
||||
return server, nil
|
||||
}
|
||||
|
||||
func getKubesharkApiServerProxiedHostAndPath(kubesharkNamespace string, kubesharkServiceName string) string {
|
||||
return fmt.Sprintf("/api/v1/namespaces/%s/services/%s:%d/proxy", kubesharkNamespace, kubesharkServiceName, kubesharkServicePort)
|
||||
}
|
||||
|
||||
func GetLocalhostOnPort(port uint16) string {
|
||||
return fmt.Sprintf("http://localhost:%d", port)
|
||||
}
|
||||
|
||||
func getRerouteHttpHandlerKubesharkAPI(proxyHandler http.Handler, kubesharkNamespace string, kubesharkServiceName string) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Credentials", "true")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With, x-session-token")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT, DELETE")
|
||||
|
||||
if r.Method == "OPTIONS" {
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
proxiedPath := getKubesharkApiServerProxiedHostAndPath(kubesharkNamespace, kubesharkServiceName)
|
||||
|
||||
//avoid redirecting several times
|
||||
if !strings.Contains(r.URL.Path, proxiedPath) {
|
||||
r.URL.Path = fmt.Sprintf("%s%s", getKubesharkApiServerProxiedHostAndPath(kubesharkNamespace, kubesharkServiceName), r.URL.Path)
|
||||
}
|
||||
proxyHandler.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func getRerouteHttpHandlerKubesharkStatic(proxyHandler http.Handler, kubesharkNamespace string, kubesharkServiceName string) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
r.URL.Path = strings.Replace(r.URL.Path, "/static/", fmt.Sprintf("%s/static/", getKubesharkApiServerProxiedHostAndPath(kubesharkNamespace, kubesharkServiceName)), 1)
|
||||
proxyHandler.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func NewPortForward(kubernetesProvider *Provider, namespace string, podRegex *regexp.Regexp, srcPort uint16, dstPort uint16, ctx context.Context, cancel context.CancelFunc) (*portforward.PortForwarder, error) {
|
||||
pods, err := kubernetesProvider.ListAllRunningPodsMatchingRegex(ctx, podRegex, []string{namespace})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if len(pods) == 0 {
|
||||
return nil, fmt.Errorf("didn't find pod to port-forward")
|
||||
}
|
||||
|
||||
podName := pods[0].Name
|
||||
|
||||
log.Printf("Starting proxy using port-forward method. namespace: [%v], pod name: [%s], %d:%d", namespace, podName, srcPort, dstPort)
|
||||
|
||||
dialer, err := getHttpDialer(kubernetesProvider, namespace, podName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stopChan, readyChan := make(chan struct{}, 1), make(chan struct{}, 1)
|
||||
out, errOut := new(bytes.Buffer), new(bytes.Buffer)
|
||||
|
||||
forwarder, err := portforward.New(dialer, []string{fmt.Sprintf("%d:%d", srcPort, dstPort)}, stopChan, readyChan, out, errOut)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err = forwarder.ForwardPorts(); err != nil {
|
||||
log.Printf("kubernetes port-forwarding error: %v", err)
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
|
||||
return forwarder, nil
|
||||
}
|
||||
|
||||
func getHttpDialer(kubernetesProvider *Provider, namespace string, podName string) (httpstream.Dialer, error) {
|
||||
roundTripper, upgrader, err := spdy.RoundTripperFor(&kubernetesProvider.clientConfig)
|
||||
if err != nil {
|
||||
log.Printf("Error creating http dialer")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clientConfigHostUrl, err := url.Parse(kubernetesProvider.clientConfig.Host)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed parsing client config host URL %s, error %w", kubernetesProvider.clientConfig.Host, err)
|
||||
}
|
||||
path := fmt.Sprintf("%s/api/v1/namespaces/%s/pods/%s/portforward", clientConfigHostUrl.Path, namespace, podName)
|
||||
|
||||
serverURL := url.URL{Scheme: "https", Path: path, Host: clientConfigHostUrl.Host}
|
||||
log.Printf("Http dialer url %v", serverURL)
|
||||
|
||||
return spdy.NewDialer(upgrader, &http.Client{Transport: roundTripper}, http.MethodPost, &serverURL), nil
|
||||
}
|
95
kubernetes/utils.go
Normal file
95
kubernetes/utils.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/kubeshark/worker/models"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func GetNodeHostToTappedPodsMap(tappedPods []core.Pod) models.NodeToPodsMap {
|
||||
nodeToTappedPodMap := make(models.NodeToPodsMap)
|
||||
for _, pod := range tappedPods {
|
||||
minimizedPod := getMinimizedPod(pod)
|
||||
|
||||
existingList := nodeToTappedPodMap[pod.Spec.NodeName]
|
||||
if existingList == nil {
|
||||
nodeToTappedPodMap[pod.Spec.NodeName] = []core.Pod{minimizedPod}
|
||||
} else {
|
||||
nodeToTappedPodMap[pod.Spec.NodeName] = append(nodeToTappedPodMap[pod.Spec.NodeName], minimizedPod)
|
||||
}
|
||||
}
|
||||
return nodeToTappedPodMap
|
||||
}
|
||||
|
||||
func getMinimizedPod(fullPod core.Pod) core.Pod {
|
||||
return core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fullPod.Name,
|
||||
Namespace: fullPod.Namespace,
|
||||
},
|
||||
Status: core.PodStatus{
|
||||
PodIP: fullPod.Status.PodIP,
|
||||
ContainerStatuses: getMinimizedContainerStatuses(fullPod),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getMinimizedContainerStatuses(fullPod core.Pod) []core.ContainerStatus {
|
||||
result := make([]core.ContainerStatus, len(fullPod.Status.ContainerStatuses))
|
||||
|
||||
for i, container := range fullPod.Status.ContainerStatuses {
|
||||
result[i] = core.ContainerStatus{
|
||||
ContainerID: container.ContainerID,
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func excludeKubesharkPods(pods []core.Pod) []core.Pod {
|
||||
kubesharkPrefixRegex := regexp.MustCompile("^" + KubesharkResourcesPrefix)
|
||||
|
||||
nonKubesharkPods := make([]core.Pod, 0)
|
||||
for _, pod := range pods {
|
||||
if !kubesharkPrefixRegex.MatchString(pod.Name) {
|
||||
nonKubesharkPods = append(nonKubesharkPods, pod)
|
||||
}
|
||||
}
|
||||
|
||||
return nonKubesharkPods
|
||||
}
|
||||
|
||||
func getPodArrayDiff(oldPods []core.Pod, newPods []core.Pod) (added []core.Pod, removed []core.Pod) {
|
||||
added = getMissingPods(newPods, oldPods)
|
||||
removed = getMissingPods(oldPods, newPods)
|
||||
|
||||
return added, removed
|
||||
}
|
||||
|
||||
//returns pods present in pods1 array and missing in pods2 array
|
||||
func getMissingPods(pods1 []core.Pod, pods2 []core.Pod) []core.Pod {
|
||||
missingPods := make([]core.Pod, 0)
|
||||
for _, pod1 := range pods1 {
|
||||
var found = false
|
||||
for _, pod2 := range pods2 {
|
||||
if pod1.UID == pod2.UID {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
missingPods = append(missingPods, pod1)
|
||||
}
|
||||
}
|
||||
return missingPods
|
||||
}
|
||||
|
||||
func GetPodInfosForPods(pods []core.Pod) []*models.PodInfo {
|
||||
podInfos := make([]*models.PodInfo, 0)
|
||||
for _, pod := range pods {
|
||||
podInfos = append(podInfos, &models.PodInfo{Name: pod.Name, Namespace: pod.Namespace, NodeName: pod.Spec.NodeName})
|
||||
}
|
||||
return podInfos
|
||||
}
|
110
kubernetes/watch.go
Normal file
110
kubernetes/watch.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/kubeshark/kubeshark/debounce"
|
||||
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
type EventFilterer interface {
|
||||
Filter(*WatchEvent) (bool, error)
|
||||
}
|
||||
|
||||
type WatchCreator interface {
|
||||
NewWatcher(ctx context.Context, namespace string) (watch.Interface, error)
|
||||
}
|
||||
|
||||
func FilteredWatch(ctx context.Context, watcherCreator WatchCreator, targetNamespaces []string, filterer EventFilterer) (<-chan *WatchEvent, <-chan error) {
|
||||
eventChan := make(chan *WatchEvent)
|
||||
errorChan := make(chan error)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for _, targetNamespace := range targetNamespaces {
|
||||
wg.Add(1)
|
||||
|
||||
go func(targetNamespace string) {
|
||||
defer wg.Done()
|
||||
watchRestartDebouncer := debounce.NewDebouncer(1*time.Minute, func() {})
|
||||
|
||||
for {
|
||||
watcher, err := watcherCreator.NewWatcher(ctx, targetNamespace)
|
||||
if err != nil {
|
||||
errorChan <- fmt.Errorf("error in k8s watch: %v", err)
|
||||
break
|
||||
}
|
||||
|
||||
err = startWatchLoop(ctx, watcher, filterer, eventChan) // blocking
|
||||
watcher.Stop()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
break
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
errorChan <- fmt.Errorf("error in k8s watch: %v", err)
|
||||
break
|
||||
} else {
|
||||
if !watchRestartDebouncer.IsOn() {
|
||||
if err := watchRestartDebouncer.SetOn(); err != nil {
|
||||
log.Print(err)
|
||||
}
|
||||
log.Print("k8s watch channel closed, restarting watcher")
|
||||
time.Sleep(time.Second * 5)
|
||||
continue
|
||||
} else {
|
||||
errorChan <- errors.New("k8s watch unstable, closes frequently")
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}(targetNamespace)
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
wg.Wait()
|
||||
close(eventChan)
|
||||
close(errorChan)
|
||||
}()
|
||||
|
||||
return eventChan, errorChan
|
||||
}
|
||||
|
||||
func startWatchLoop(ctx context.Context, watcher watch.Interface, filterer EventFilterer, eventChan chan<- *WatchEvent) error {
|
||||
resultChan := watcher.ResultChan()
|
||||
for {
|
||||
select {
|
||||
case e, isChannelOpen := <-resultChan:
|
||||
if !isChannelOpen {
|
||||
return nil
|
||||
}
|
||||
|
||||
wEvent := WatchEvent(e)
|
||||
|
||||
if wEvent.Type == watch.Error {
|
||||
return wEvent.ToError()
|
||||
}
|
||||
|
||||
if pass, err := filterer.Filter(&wEvent); err != nil {
|
||||
return err
|
||||
} else if !pass {
|
||||
continue
|
||||
}
|
||||
|
||||
eventChan <- &wEvent
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
52
kubernetes/watchEvent.go
Normal file
52
kubernetes/watchEvent.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
eventsv1 "k8s.io/api/events/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
)
|
||||
|
||||
const (
|
||||
EventAdded = watch.Added
|
||||
EventModified = watch.Modified
|
||||
EventDeleted = watch.Deleted
|
||||
EventBookmark = watch.Bookmark
|
||||
EventError = watch.Error
|
||||
)
|
||||
|
||||
type InvalidObjectType struct {
|
||||
RequestedType reflect.Type
|
||||
}
|
||||
|
||||
// Implements the error interface
|
||||
func (iot *InvalidObjectType) Error() string {
|
||||
return fmt.Sprintf("Cannot convert event to type %s", iot.RequestedType)
|
||||
}
|
||||
|
||||
type WatchEvent watch.Event
|
||||
|
||||
func (we *WatchEvent) ToPod() (*corev1.Pod, error) {
|
||||
pod, ok := we.Object.(*corev1.Pod)
|
||||
if !ok {
|
||||
return nil, &InvalidObjectType{RequestedType: reflect.TypeOf(pod)}
|
||||
}
|
||||
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
func (we *WatchEvent) ToEvent() (*eventsv1.Event, error) {
|
||||
event, ok := we.Object.(*eventsv1.Event)
|
||||
if !ok {
|
||||
return nil, &InvalidObjectType{RequestedType: reflect.TypeOf(event)}
|
||||
}
|
||||
|
||||
return event, nil
|
||||
}
|
||||
|
||||
func (we *WatchEvent) ToError() error {
|
||||
return apierrors.FromObject(we.Object)
|
||||
}
|
Reference in New Issue
Block a user