🎨 Replace the tap/tapper terminology with deploy, worker and targetted

This commit is contained in:
M. Mert Yildiran
2022-11-29 07:31:36 +03:00
parent ae278526ab
commit 6ca0fe137e
18 changed files with 250 additions and 250 deletions

View File

@@ -12,8 +12,8 @@ const (
RoleBindingName = KubesharkResourcesPrefix + "role-binding"
RoleName = KubesharkResourcesPrefix + "role"
ServiceAccountName = KubesharkResourcesPrefix + "service-account"
TapperDaemonSetName = KubesharkResourcesPrefix + "worker-daemon-set"
TapperPodName = KubesharkResourcesPrefix + "worker"
WorkerDaemonSetName = KubesharkResourcesPrefix + "worker-daemon-set"
WorkerPodName = KubesharkResourcesPrefix + "worker"
ConfigMapName = KubesharkResourcesPrefix + "config"
MinKubernetesServerVersion = "1.16.0"
)

View File

@@ -1,20 +1,20 @@
package kubernetes
type K8sTapManagerErrorReason string
type K8sDeployManagerErrorReason string
const (
TapManagerTapperUpdateError K8sTapManagerErrorReason = "TAPPER_UPDATE_ERROR"
TapManagerPodWatchError K8sTapManagerErrorReason = "POD_WATCH_ERROR"
TapManagerPodListError K8sTapManagerErrorReason = "POD_LIST_ERROR"
DeployManagerWorkerUpdateError K8sDeployManagerErrorReason = "TAPPER_UPDATE_ERROR"
DeployManagerPodWatchError K8sDeployManagerErrorReason = "POD_WATCH_ERROR"
DeployManagerPodListError K8sDeployManagerErrorReason = "POD_LIST_ERROR"
)
type K8sTapManagerError struct {
OriginalError error
TapManagerReason K8sTapManagerErrorReason
type K8sDeployManagerError struct {
OriginalError error
DeployManagerReason K8sDeployManagerErrorReason
}
// K8sTapManagerError implements the Error interface.
func (e *K8sTapManagerError) Error() string {
// K8sDeployManagerError implements the Error interface.
func (e *K8sDeployManagerError) Error() string {
return e.OriginalError.Error()
}

View File

@@ -15,32 +15,32 @@ import (
core "k8s.io/api/core/v1"
)
const updateTappersDelay = 5 * time.Second
const updateWorkersDelay = 5 * time.Second
type TappedPodChangeEvent struct {
type TargettedPodChangeEvent struct {
Added []core.Pod
Removed []core.Pod
}
// KubesharkTapperSyncer uses a k8s pod watch to update tapper daemonsets when targeted pods are removed or created
type KubesharkTapperSyncer struct {
// WorkerSyncer uses a k8s pod watch to update Worker daemonsets when targeted pods are removed or created
type WorkerSyncer struct {
startTime time.Time
context context.Context
CurrentlyTappedPods []core.Pod
config TapperSyncerConfig
CurrentlyTargettedPods []core.Pod
config WorkerSyncerConfig
kubernetesProvider *Provider
TapPodChangesOut chan TappedPodChangeEvent
TapperStatusChangedOut chan models.TapperStatus
ErrorOut chan K8sTapManagerError
nodeToTappedPodMap models.NodeToPodsMap
tappedNodes []string
DeployPodChangesOut chan TargettedPodChangeEvent
WorkerStatusChangedOut chan models.TapperStatus
ErrorOut chan K8sDeployManagerError
nodeToTargettedPodMap models.NodeToPodsMap
targettedNodes []string
}
type TapperSyncerConfig struct {
type WorkerSyncerConfig struct {
TargetNamespaces []string
PodFilterRegex regexp.Regexp
KubesharkResourcesNamespace string
TapperResources models.Resources
WorkerResources models.Resources
ImagePullPolicy core.PullPolicy
LogLevel zerolog.Level
KubesharkApiFilteringOptions api.TrafficFilteringOptions
@@ -50,36 +50,36 @@ type TapperSyncerConfig struct {
MaxLiveStreams int
}
func CreateAndStartKubesharkTapperSyncer(ctx context.Context, kubernetesProvider *Provider, config TapperSyncerConfig, startTime time.Time) (*KubesharkTapperSyncer, error) {
syncer := &KubesharkTapperSyncer{
func CreateAndStartWorkerSyncer(ctx context.Context, kubernetesProvider *Provider, config WorkerSyncerConfig, startTime time.Time) (*WorkerSyncer, error) {
syncer := &WorkerSyncer{
startTime: startTime.Truncate(time.Second), // Round down because k8s CreationTimestamp is given in 1 sec resolution.
context: ctx,
CurrentlyTappedPods: make([]core.Pod, 0),
CurrentlyTargettedPods: make([]core.Pod, 0),
config: config,
kubernetesProvider: kubernetesProvider,
TapPodChangesOut: make(chan TappedPodChangeEvent, 100),
TapperStatusChangedOut: make(chan models.TapperStatus, 100),
ErrorOut: make(chan K8sTapManagerError, 100),
DeployPodChangesOut: make(chan TargettedPodChangeEvent, 100),
WorkerStatusChangedOut: make(chan models.TapperStatus, 100),
ErrorOut: make(chan K8sDeployManagerError, 100),
}
if err, _ := syncer.updateCurrentlyTappedPods(); err != nil {
if err, _ := syncer.updateCurrentlyTargettedPods(); err != nil {
return nil, err
}
if err := syncer.updateKubesharkTappers(); err != nil {
if err := syncer.updateWorkers(); err != nil {
return nil, err
}
go syncer.watchPodsForTapping()
go syncer.watchTapperEvents()
go syncer.watchTapperPods()
go syncer.watchPodsForTargetting()
go syncer.watchWorkerEvents()
go syncer.watchWorkerPods()
return syncer, nil
}
func (tapperSyncer *KubesharkTapperSyncer) watchTapperPods() {
kubesharkResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", TapperPodName))
podWatchHelper := NewPodWatchHelper(tapperSyncer.kubernetesProvider, kubesharkResourceRegex)
eventChan, errorChan := FilteredWatch(tapperSyncer.context, podWatchHelper, []string{tapperSyncer.config.KubesharkResourcesNamespace}, podWatchHelper)
func (workerSyncer *WorkerSyncer) watchWorkerPods() {
kubesharkResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", WorkerPodName))
podWatchHelper := NewPodWatchHelper(workerSyncer.kubernetesProvider, kubesharkResourceRegex)
eventChan, errorChan := FilteredWatch(workerSyncer.context, podWatchHelper, []string{workerSyncer.config.KubesharkResourcesNamespace}, podWatchHelper)
for {
select {
@@ -91,7 +91,7 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperPods() {
pod, err := wEvent.ToPod()
if err != nil {
log.Error().Str("pod", TapperPodName).Err(err).Msg("While parsing Kubeshark resource!")
log.Error().Str("pod", WorkerPodName).Err(err).Msg("While parsing Kubeshark resource!")
continue
}
@@ -101,8 +101,8 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperPods() {
Interface("phase", pod.Status.Phase).
Msg("Watching pod events...")
if pod.Spec.NodeName != "" {
tapperStatus := models.TapperStatus{TapperName: pod.Name, NodeName: pod.Spec.NodeName, Status: string(pod.Status.Phase)}
tapperSyncer.TapperStatusChangedOut <- tapperStatus
workerStatus := models.TapperStatus{TapperName: pod.Name, NodeName: pod.Spec.NodeName, Status: string(pod.Status.Phase)}
workerSyncer.WorkerStatusChangedOut <- workerStatus
}
case err, ok := <-errorChan:
@@ -110,21 +110,21 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperPods() {
errorChan = nil
continue
}
log.Error().Str("pod", TapperPodName).Err(err).Msg("While watching pod!")
log.Error().Str("pod", WorkerPodName).Err(err).Msg("While watching pod!")
case <-tapperSyncer.context.Done():
case <-workerSyncer.context.Done():
log.Debug().
Str("pod", TapperPodName).
Str("pod", WorkerPodName).
Msg("Watching pod, context done.")
return
}
}
}
func (tapperSyncer *KubesharkTapperSyncer) watchTapperEvents() {
kubesharkResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", TapperPodName))
eventWatchHelper := NewEventWatchHelper(tapperSyncer.kubernetesProvider, kubesharkResourceRegex, "pod")
eventChan, errorChan := FilteredWatch(tapperSyncer.context, eventWatchHelper, []string{tapperSyncer.config.KubesharkResourcesNamespace}, eventWatchHelper)
func (workerSyncer *WorkerSyncer) watchWorkerEvents() {
kubesharkResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", WorkerPodName))
eventWatchHelper := NewEventWatchHelper(workerSyncer.kubernetesProvider, kubesharkResourceRegex, "pod")
eventChan, errorChan := FilteredWatch(workerSyncer.context, eventWatchHelper, []string{workerSyncer.config.KubesharkResourcesNamespace}, eventWatchHelper)
for {
select {
@@ -137,14 +137,14 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperEvents() {
event, err := wEvent.ToEvent()
if err != nil {
log.Error().
Str("pod", TapperPodName).
Str("pod", WorkerPodName).
Err(err).
Msg("Parsing resource event.")
continue
}
log.Debug().
Str("pod", TapperPodName).
Str("pod", WorkerPodName).
Str("event", event.Name).
Time("time", event.CreationTimestamp.Time).
Str("name", event.Regarding.Name).
@@ -153,7 +153,7 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperEvents() {
Str("note", event.Note).
Msg("Watching events.")
pod, err1 := tapperSyncer.kubernetesProvider.GetPod(tapperSyncer.context, tapperSyncer.config.KubesharkResourcesNamespace, event.Regarding.Name)
pod, err1 := workerSyncer.kubernetesProvider.GetPod(workerSyncer.context, workerSyncer.config.KubesharkResourcesNamespace, event.Regarding.Name)
if err1 != nil {
log.Error().Str("name", event.Regarding.Name).Msg("Couldn't get pod")
continue
@@ -166,8 +166,8 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperEvents() {
nodeName = pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
}
tapperStatus := models.TapperStatus{TapperName: pod.Name, NodeName: nodeName, Status: string(pod.Status.Phase)}
tapperSyncer.TapperStatusChangedOut <- tapperStatus
workerStatus := models.TapperStatus{TapperName: pod.Name, NodeName: nodeName, Status: string(pod.Status.Phase)}
workerSyncer.WorkerStatusChangedOut <- workerStatus
case err, ok := <-errorChan:
if !ok {
@@ -176,44 +176,44 @@ func (tapperSyncer *KubesharkTapperSyncer) watchTapperEvents() {
}
log.Error().
Str("pod", TapperPodName).
Str("pod", WorkerPodName).
Err(err).
Msg("While watching events.")
case <-tapperSyncer.context.Done():
case <-workerSyncer.context.Done():
log.Debug().
Str("pod", TapperPodName).
Str("pod", WorkerPodName).
Msg("Watching pod events, context done.")
return
}
}
}
func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() {
podWatchHelper := NewPodWatchHelper(tapperSyncer.kubernetesProvider, &tapperSyncer.config.PodFilterRegex)
eventChan, errorChan := FilteredWatch(tapperSyncer.context, podWatchHelper, tapperSyncer.config.TargetNamespaces, podWatchHelper)
func (workerSyncer *WorkerSyncer) watchPodsForTargetting() {
podWatchHelper := NewPodWatchHelper(workerSyncer.kubernetesProvider, &workerSyncer.config.PodFilterRegex)
eventChan, errorChan := FilteredWatch(workerSyncer.context, podWatchHelper, workerSyncer.config.TargetNamespaces, podWatchHelper)
handleChangeInPods := func() {
err, changeFound := tapperSyncer.updateCurrentlyTappedPods()
err, changeFound := workerSyncer.updateCurrentlyTargettedPods()
if err != nil {
tapperSyncer.ErrorOut <- K8sTapManagerError{
OriginalError: err,
TapManagerReason: TapManagerPodListError,
workerSyncer.ErrorOut <- K8sDeployManagerError{
OriginalError: err,
DeployManagerReason: DeployManagerPodListError,
}
}
if !changeFound {
log.Debug().Msg("Nothing changed. Updating tappers is not needed.")
log.Debug().Msg("Nothing changed. Updating workers is not needed.")
return
}
if err := tapperSyncer.updateKubesharkTappers(); err != nil {
tapperSyncer.ErrorOut <- K8sTapManagerError{
OriginalError: err,
TapManagerReason: TapManagerTapperUpdateError,
if err := workerSyncer.updateWorkers(); err != nil {
workerSyncer.ErrorOut <- K8sDeployManagerError{
OriginalError: err,
DeployManagerReason: DeployManagerWorkerUpdateError,
}
}
}
restartTappersDebouncer := debounce.NewDebouncer(updateTappersDelay, handleChangeInPods)
restartWorkersDebouncer := debounce.NewDebouncer(updateWorkersDelay, handleChangeInPods)
for {
select {
@@ -225,7 +225,7 @@ func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() {
pod, err := wEvent.ToPod()
if err != nil {
tapperSyncer.handleErrorInWatchLoop(err, restartTappersDebouncer)
workerSyncer.handleErrorInWatchLoop(err, restartWorkersDebouncer)
continue
}
@@ -235,24 +235,24 @@ func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() {
Str("pod", pod.Name).
Str("namespace", pod.Namespace).
Msg("Added matching pod.")
if err := restartTappersDebouncer.SetOn(); err != nil {
if err := restartWorkersDebouncer.SetOn(); err != nil {
log.Error().
Str("pod", pod.Name).
Str("namespace", pod.Namespace).
Err(err).
Msg("While restarting tappers!")
Msg("While restarting workers!")
}
case EventDeleted:
log.Debug().
Str("pod", pod.Name).
Str("namespace", pod.Namespace).
Msg("Removed matching pod.")
if err := restartTappersDebouncer.SetOn(); err != nil {
if err := restartWorkersDebouncer.SetOn(); err != nil {
log.Error().
Str("pod", pod.Name).
Str("namespace", pod.Namespace).
Err(err).
Msg("While restarting tappers!")
Msg("While restarting workers!")
}
case EventModified:
log.Debug().
@@ -269,12 +269,12 @@ func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() {
// - Pod reaches ready state
// Ready/unready transitions might also trigger this event.
if pod.Status.PodIP != "" {
if err := restartTappersDebouncer.SetOn(); err != nil {
if err := restartWorkersDebouncer.SetOn(); err != nil {
log.Error().
Str("pod", pod.Name).
Str("namespace", pod.Namespace).
Err(err).
Msg("While restarting tappers!")
Msg("While restarting workers!")
}
}
case EventBookmark:
@@ -288,33 +288,33 @@ func (tapperSyncer *KubesharkTapperSyncer) watchPodsForTapping() {
continue
}
tapperSyncer.handleErrorInWatchLoop(err, restartTappersDebouncer)
workerSyncer.handleErrorInWatchLoop(err, restartWorkersDebouncer)
continue
case <-tapperSyncer.context.Done():
log.Debug().Msg("Watching pods, context done. Stopping \"restart tappers debouncer\"")
restartTappersDebouncer.Cancel()
case <-workerSyncer.context.Done():
log.Debug().Msg("Watching pods, context done. Stopping \"restart workers debouncer\"")
restartWorkersDebouncer.Cancel()
// TODO: Does this also perform cleanup?
return
}
}
}
func (tapperSyncer *KubesharkTapperSyncer) handleErrorInWatchLoop(err error, restartTappersDebouncer *debounce.Debouncer) {
log.Error().Err(err).Msg("While watching pods, got an error! Stopping \"restart tappers debouncer\"")
restartTappersDebouncer.Cancel()
tapperSyncer.ErrorOut <- K8sTapManagerError{
OriginalError: err,
TapManagerReason: TapManagerPodWatchError,
func (workerSyncer *WorkerSyncer) handleErrorInWatchLoop(err error, restartWorkersDebouncer *debounce.Debouncer) {
log.Error().Err(err).Msg("While watching pods, got an error! Stopping \"restart workers debouncer\"")
restartWorkersDebouncer.Cancel()
workerSyncer.ErrorOut <- K8sDeployManagerError{
OriginalError: err,
DeployManagerReason: DeployManagerPodWatchError,
}
}
func (tapperSyncer *KubesharkTapperSyncer) updateCurrentlyTappedPods() (err error, changesFound bool) {
if matchingPods, err := tapperSyncer.kubernetesProvider.ListAllRunningPodsMatchingRegex(tapperSyncer.context, &tapperSyncer.config.PodFilterRegex, tapperSyncer.config.TargetNamespaces); err != nil {
func (workerSyncer *WorkerSyncer) updateCurrentlyTargettedPods() (err error, changesFound bool) {
if matchingPods, err := workerSyncer.kubernetesProvider.ListAllRunningPodsMatchingRegex(workerSyncer.context, &workerSyncer.config.PodFilterRegex, workerSyncer.config.TargetNamespaces); err != nil {
return err, false
} else {
podsToTap := excludeKubesharkPods(matchingPods)
addedPods, removedPods := getPodArrayDiff(tapperSyncer.CurrentlyTappedPods, podsToTap)
podsToTarget := excludeSelfPods(matchingPods)
addedPods, removedPods := getPodArrayDiff(workerSyncer.CurrentlyTargettedPods, podsToTarget)
for _, addedPod := range addedPods {
log.Info().Str("pod", addedPod.Name).Msg("Currently targetting:")
}
@@ -322,9 +322,9 @@ func (tapperSyncer *KubesharkTapperSyncer) updateCurrentlyTappedPods() (err erro
log.Info().Str("pod", removedPod.Name).Msg("Pod is no longer running. Targetting is stopped.")
}
if len(addedPods) > 0 || len(removedPods) > 0 {
tapperSyncer.CurrentlyTappedPods = podsToTap
tapperSyncer.nodeToTappedPodMap = GetNodeHostToTappedPodsMap(tapperSyncer.CurrentlyTappedPods)
tapperSyncer.TapPodChangesOut <- TappedPodChangeEvent{
workerSyncer.CurrentlyTargettedPods = podsToTarget
workerSyncer.nodeToTargettedPodMap = GetNodeHostToTargettedPodsMap(workerSyncer.CurrentlyTargettedPods)
workerSyncer.DeployPodChangesOut <- TargettedPodChangeEvent{
Added: addedPods,
Removed: removedPods,
}
@@ -334,70 +334,70 @@ func (tapperSyncer *KubesharkTapperSyncer) updateCurrentlyTappedPods() (err erro
}
}
func (tapperSyncer *KubesharkTapperSyncer) updateKubesharkTappers() error {
nodesToTap := make([]string, len(tapperSyncer.nodeToTappedPodMap))
func (workerSyncer *WorkerSyncer) updateWorkers() error {
nodesToTarget := make([]string, len(workerSyncer.nodeToTargettedPodMap))
i := 0
for node := range tapperSyncer.nodeToTappedPodMap {
nodesToTap[i] = node
for node := range workerSyncer.nodeToTargettedPodMap {
nodesToTarget[i] = node
i++
}
if utils.EqualStringSlices(nodesToTap, tapperSyncer.tappedNodes) {
if utils.EqualStringSlices(nodesToTarget, workerSyncer.targettedNodes) {
log.Debug().Msg("Skipping apply, DaemonSet is up to date")
return nil
}
log.Debug().Strs("nodes", nodesToTap).Msg("Updating DaemonSet to run on nodes.")
log.Debug().Strs("nodes", nodesToTarget).Msg("Updating DaemonSet to run on nodes.")
image := "kubeshark/worker:latest"
if len(tapperSyncer.nodeToTappedPodMap) > 0 {
if len(workerSyncer.nodeToTargettedPodMap) > 0 {
var serviceAccountName string
if tapperSyncer.config.KubesharkServiceAccountExists {
if workerSyncer.config.KubesharkServiceAccountExists {
serviceAccountName = ServiceAccountName
} else {
serviceAccountName = ""
}
nodeNames := make([]string, 0, len(tapperSyncer.nodeToTappedPodMap))
for nodeName := range tapperSyncer.nodeToTappedPodMap {
nodeNames := make([]string, 0, len(workerSyncer.nodeToTargettedPodMap))
for nodeName := range workerSyncer.nodeToTargettedPodMap {
nodeNames = append(nodeNames, nodeName)
}
if err := tapperSyncer.kubernetesProvider.ApplyKubesharkTapperDaemonSet(
tapperSyncer.context,
tapperSyncer.config.KubesharkResourcesNamespace,
TapperDaemonSetName,
if err := workerSyncer.kubernetesProvider.ApplyWorkerDaemonSet(
workerSyncer.context,
workerSyncer.config.KubesharkResourcesNamespace,
WorkerDaemonSetName,
image,
TapperPodName,
fmt.Sprintf("%s.%s.svc", HubPodName, tapperSyncer.config.KubesharkResourcesNamespace),
WorkerPodName,
fmt.Sprintf("%s.%s.svc", HubPodName, workerSyncer.config.KubesharkResourcesNamespace),
nodeNames,
serviceAccountName,
tapperSyncer.config.TapperResources,
tapperSyncer.config.ImagePullPolicy,
tapperSyncer.config.KubesharkApiFilteringOptions,
tapperSyncer.config.LogLevel,
tapperSyncer.config.ServiceMesh,
tapperSyncer.config.Tls,
tapperSyncer.config.MaxLiveStreams); err != nil {
workerSyncer.config.WorkerResources,
workerSyncer.config.ImagePullPolicy,
workerSyncer.config.KubesharkApiFilteringOptions,
workerSyncer.config.LogLevel,
workerSyncer.config.ServiceMesh,
workerSyncer.config.Tls,
workerSyncer.config.MaxLiveStreams); err != nil {
return err
}
log.Debug().Int("tapper-count", len(tapperSyncer.nodeToTappedPodMap)).Msg("Successfully created tappers.")
log.Debug().Int("worker-count", len(workerSyncer.nodeToTargettedPodMap)).Msg("Successfully created workers.")
} else {
if err := tapperSyncer.kubernetesProvider.ResetKubesharkTapperDaemonSet(
tapperSyncer.context,
tapperSyncer.config.KubesharkResourcesNamespace,
TapperDaemonSetName,
if err := workerSyncer.kubernetesProvider.ResetWorkerDaemonSet(
workerSyncer.context,
workerSyncer.config.KubesharkResourcesNamespace,
WorkerDaemonSetName,
image,
TapperPodName); err != nil {
WorkerPodName); err != nil {
return err
}
log.Debug().Msg("Successfully reset tapper daemon set")
log.Debug().Msg("Successfully resetted Worker DaemonSet")
}
tapperSyncer.tappedNodes = nodesToTap
workerSyncer.targettedNodes = nodesToTarget
return nil
}

View File

@@ -810,17 +810,17 @@ func (provider *Provider) CreateConfigMap(ctx context.Context, namespace string,
return nil
}
func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string, hubPodIp string, nodeNames []string, serviceAccountName string, resources models.Resources, imagePullPolicy core.PullPolicy, kubesharkApiFilteringOptions api.TrafficFilteringOptions, logLevel zerolog.Level, serviceMesh bool, tls bool, maxLiveStreams int) error {
func (provider *Provider) ApplyWorkerDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, workerPodName string, hubPodIp string, nodeNames []string, serviceAccountName string, resources models.Resources, imagePullPolicy core.PullPolicy, kubesharkApiFilteringOptions api.TrafficFilteringOptions, logLevel zerolog.Level, serviceMesh bool, tls bool, maxLiveStreams int) error {
log.Debug().
Int("node-count", len(nodeNames)).
Str("namespace", namespace).
Str("daemonset-name", daemonSetName).
Str("image", podImage).
Str("pod", tapperPodName).
Msg("Applying tapper DaemonSets.")
Str("pod", workerPodName).
Msg("Applying worker DaemonSets.")
if len(nodeNames) == 0 {
return fmt.Errorf("daemon set %s must tap at least 1 pod", daemonSetName)
return fmt.Errorf("DaemonSet %s must target at least 1 pod", daemonSetName)
}
kubesharkApiFilteringOptionsJsonStr, err := json.Marshal(kubesharkApiFilteringOptions)
@@ -849,7 +849,7 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
}
workerContainer := applyconfcore.Container()
workerContainer.WithName(tapperPodName)
workerContainer.WithName(workerPodName)
workerContainer.WithImage(podImage)
workerContainer.WithImagePullPolicy(imagePullPolicy)
@@ -887,19 +887,19 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
)
cpuLimit, err := resource.ParseQuantity(resources.CpuLimit)
if err != nil {
return fmt.Errorf("invalid cpu limit for %s container", tapperPodName)
return fmt.Errorf("invalid cpu limit for %s container", workerPodName)
}
memLimit, err := resource.ParseQuantity(resources.MemoryLimit)
if err != nil {
return fmt.Errorf("invalid memory limit for %s container", tapperPodName)
return fmt.Errorf("invalid memory limit for %s container", workerPodName)
}
cpuRequests, err := resource.ParseQuantity(resources.CpuRequests)
if err != nil {
return fmt.Errorf("invalid cpu request for %s container", tapperPodName)
return fmt.Errorf("invalid cpu request for %s container", workerPodName)
}
memRequests, err := resource.ParseQuantity(resources.MemoryRequests)
if err != nil {
return fmt.Errorf("invalid memory request for %s container", tapperPodName)
return fmt.Errorf("invalid memory request for %s container", workerPodName)
}
workerResourceLimits := core.ResourceList{
"cpu": cpuLimit,
@@ -967,14 +967,14 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
podTemplate := applyconfcore.PodTemplateSpec()
podTemplate.WithLabels(map[string]string{
"app": tapperPodName,
"app": workerPodName,
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
})
podTemplate.WithSpec(podSpec)
labelSelector := applyconfmeta.LabelSelector()
labelSelector.WithMatchLabels(map[string]string{"app": tapperPodName})
labelSelector.WithMatchLabels(map[string]string{"app": workerPodName})
applyOptions := metav1.ApplyOptions{
Force: true,
@@ -993,9 +993,9 @@ func (provider *Provider) ApplyKubesharkTapperDaemonSet(ctx context.Context, nam
return err
}
func (provider *Provider) ResetKubesharkTapperDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, tapperPodName string) error {
func (provider *Provider) ResetWorkerDaemonSet(ctx context.Context, namespace string, daemonSetName string, podImage string, workerPodName string) error {
workerContainer := applyconfcore.Container()
workerContainer.WithName(tapperPodName)
workerContainer.WithName(workerPodName)
workerContainer.WithImage(podImage)
nodeSelectorRequirement := applyconfcore.NodeSelectorRequirement()
@@ -1016,14 +1016,14 @@ func (provider *Provider) ResetKubesharkTapperDaemonSet(ctx context.Context, nam
podTemplate := applyconfcore.PodTemplateSpec()
podTemplate.WithLabels(map[string]string{
"app": tapperPodName,
"app": workerPodName,
LabelManagedBy: provider.managedBy,
LabelCreatedBy: provider.createdBy,
})
podTemplate.WithSpec(podSpec)
labelSelector := applyconfmeta.LabelSelector()
labelSelector.WithMatchLabels(map[string]string{"app": tapperPodName})
labelSelector.WithMatchLabels(map[string]string{"app": workerPodName})
applyOptions := metav1.ApplyOptions{
Force: true,

View File

@@ -8,19 +8,19 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func GetNodeHostToTappedPodsMap(tappedPods []core.Pod) models.NodeToPodsMap {
nodeToTappedPodMap := make(models.NodeToPodsMap)
for _, pod := range tappedPods {
func GetNodeHostToTargettedPodsMap(targettedPods []core.Pod) models.NodeToPodsMap {
nodeToTargettedPodsMap := make(models.NodeToPodsMap)
for _, pod := range targettedPods {
minimizedPod := getMinimizedPod(pod)
existingList := nodeToTappedPodMap[pod.Spec.NodeName]
existingList := nodeToTargettedPodsMap[pod.Spec.NodeName]
if existingList == nil {
nodeToTappedPodMap[pod.Spec.NodeName] = []core.Pod{minimizedPod}
nodeToTargettedPodsMap[pod.Spec.NodeName] = []core.Pod{minimizedPod}
} else {
nodeToTappedPodMap[pod.Spec.NodeName] = append(nodeToTappedPodMap[pod.Spec.NodeName], minimizedPod)
nodeToTargettedPodsMap[pod.Spec.NodeName] = append(nodeToTargettedPodsMap[pod.Spec.NodeName], minimizedPod)
}
}
return nodeToTappedPodMap
return nodeToTargettedPodsMap
}
func getMinimizedPod(fullPod core.Pod) core.Pod {
@@ -48,7 +48,7 @@ func getMinimizedContainerStatuses(fullPod core.Pod) []core.ContainerStatus {
return result
}
func excludeKubesharkPods(pods []core.Pod) []core.Pod {
func excludeSelfPods(pods []core.Pod) []core.Pod {
kubesharkPrefixRegex := regexp.MustCompile("^" + KubesharkResourcesPrefix)
nonKubesharkPods := make([]core.Pod, 0)