mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-09-27 21:38:06 +00:00
⬆️ github.com/kubeshark/base@v0.1.3
This commit is contained in:
@@ -3,7 +3,7 @@ package kubernetes
|
||||
type K8sDeployManagerErrorReason string
|
||||
|
||||
const (
|
||||
DeployManagerWorkerUpdateError K8sDeployManagerErrorReason = "TAPPER_UPDATE_ERROR"
|
||||
DeployManagerWorkerUpdateError K8sDeployManagerErrorReason = "WORKER_UPDATE_ERROR"
|
||||
DeployManagerPodWatchError K8sDeployManagerErrorReason = "POD_WATCH_ERROR"
|
||||
DeployManagerPodListError K8sDeployManagerErrorReason = "POD_LIST_ERROR"
|
||||
)
|
||||
|
@@ -30,7 +30,7 @@ type WorkerSyncer struct {
|
||||
config WorkerSyncerConfig
|
||||
kubernetesProvider *Provider
|
||||
DeployPodChangesOut chan TargettedPodChangeEvent
|
||||
WorkerStatusChangedOut chan models.TapperStatus
|
||||
WorkerStatusChangedOut chan models.WorkerStatus
|
||||
ErrorOut chan K8sDeployManagerError
|
||||
nodeToTargettedPodMap models.NodeToPodsMap
|
||||
targettedNodes []string
|
||||
@@ -58,7 +58,7 @@ func CreateAndStartWorkerSyncer(ctx context.Context, kubernetesProvider *Provide
|
||||
config: config,
|
||||
kubernetesProvider: kubernetesProvider,
|
||||
DeployPodChangesOut: make(chan TargettedPodChangeEvent, 100),
|
||||
WorkerStatusChangedOut: make(chan models.TapperStatus, 100),
|
||||
WorkerStatusChangedOut: make(chan models.WorkerStatus, 100),
|
||||
ErrorOut: make(chan K8sDeployManagerError, 100),
|
||||
}
|
||||
|
||||
@@ -101,7 +101,7 @@ func (workerSyncer *WorkerSyncer) watchWorkerPods() {
|
||||
Interface("phase", pod.Status.Phase).
|
||||
Msg("Watching pod events...")
|
||||
if pod.Spec.NodeName != "" {
|
||||
workerStatus := models.TapperStatus{TapperName: pod.Name, NodeName: pod.Spec.NodeName, Status: string(pod.Status.Phase)}
|
||||
workerStatus := models.WorkerStatus{Name: pod.Name, NodeName: pod.Spec.NodeName, Status: string(pod.Status.Phase)}
|
||||
workerSyncer.WorkerStatusChangedOut <- workerStatus
|
||||
}
|
||||
|
||||
@@ -166,7 +166,7 @@ func (workerSyncer *WorkerSyncer) watchWorkerEvents() {
|
||||
nodeName = pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
|
||||
}
|
||||
|
||||
workerStatus := models.TapperStatus{TapperName: pod.Name, NodeName: nodeName, Status: string(pod.Status.Phase)}
|
||||
workerStatus := models.WorkerStatus{Name: pod.Name, NodeName: nodeName, Status: string(pod.Status.Phase)}
|
||||
workerSyncer.WorkerStatusChangedOut <- workerStatus
|
||||
|
||||
case err, ok := <-errorChan:
|
||||
|
@@ -831,7 +831,7 @@ func (provider *Provider) ApplyWorkerDaemonSet(ctx context.Context, namespace st
|
||||
kubesharkCmd := []string{
|
||||
"./worker",
|
||||
"-i", "any",
|
||||
"--api-server-address", fmt.Sprintf("ws://%s/wsTapper", hubPodIp),
|
||||
"--api-server-address", fmt.Sprintf("ws://%s/wsWorker", hubPodIp),
|
||||
"--nodefrag",
|
||||
"--max-live-streams", strconv.Itoa(maxLiveStreams),
|
||||
}
|
||||
|
Reference in New Issue
Block a user