mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-09-03 03:25:26 +00:00
API server stores tappers status (#531)
This commit is contained in:
@@ -22,7 +22,6 @@ import (
|
|||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"plugin"
|
"plugin"
|
||||||
"regexp"
|
|
||||||
"sort"
|
"sort"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
@@ -273,8 +272,6 @@ func hostApi(socketHarOutputChannel chan<- *tapApi.OutputChannelItem) {
|
|||||||
if _, err := startMizuTapperSyncer(ctx, kubernetesProvider); err != nil {
|
if _, err := startMizuTapperSyncer(ctx, kubernetesProvider); err != nil {
|
||||||
logger.Log.Fatalf("error initializing tapper syncer: %+v", err)
|
logger.Log.Fatalf("error initializing tapper syncer: %+v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
go watchMizuEvents(ctx, kubernetesProvider, cancel)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
utils.StartServer(app)
|
utils.StartServer(app)
|
||||||
@@ -447,7 +444,7 @@ func startMizuTapperSyncer(ctx context.Context, provider *kubernetes.Provider) (
|
|||||||
MizuApiFilteringOptions: config.Config.MizuApiFilteringOptions,
|
MizuApiFilteringOptions: config.Config.MizuApiFilteringOptions,
|
||||||
MizuServiceAccountExists: true, //assume service account exists since daemon mode will not function without it anyway
|
MizuServiceAccountExists: true, //assume service account exists since daemon mode will not function without it anyway
|
||||||
Istio: config.Config.Istio,
|
Istio: config.Config.Istio,
|
||||||
})
|
}, time.Now())
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -477,6 +474,16 @@ func startMizuTapperSyncer(ctx context.Context, provider *kubernetes.Provider) (
|
|||||||
api.BroadcastToBrowserClients(serializedTapStatus)
|
api.BroadcastToBrowserClients(serializedTapStatus)
|
||||||
providers.TapStatus.Pods = tapStatus.Pods
|
providers.TapStatus.Pods = tapStatus.Pods
|
||||||
providers.ExpectedTapperAmount = tapPodChangeEvent.ExpectedTapperAmount
|
providers.ExpectedTapperAmount = tapPodChangeEvent.ExpectedTapperAmount
|
||||||
|
case tapperStatus, ok := <-tapperSyncer.TapperStatusChangedOut:
|
||||||
|
if !ok {
|
||||||
|
logger.Log.Debug("mizuTapperSyncer tapper status changed channel closed, ending listener loop")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if providers.TappersStatus == nil {
|
||||||
|
providers.TappersStatus = make(map[string]shared.TapperStatus)
|
||||||
|
}
|
||||||
|
providers.TappersStatus[tapperStatus.NodeName] = tapperStatus
|
||||||
|
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
logger.Log.Debug("mizuTapperSyncer event listener loop exiting due to context done")
|
logger.Log.Debug("mizuTapperSyncer event listener loop exiting due to context done")
|
||||||
return
|
return
|
||||||
@@ -486,48 +493,3 @@ func startMizuTapperSyncer(ctx context.Context, provider *kubernetes.Provider) (
|
|||||||
|
|
||||||
return tapperSyncer, nil
|
return tapperSyncer, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func watchMizuEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
|
||||||
// Round down because k8s CreationTimestamp is given in 1 sec resolution.
|
|
||||||
startTime := time.Now().Truncate(time.Second)
|
|
||||||
|
|
||||||
mizuResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", kubernetes.MizuResourcesPrefix))
|
|
||||||
eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, mizuResourceRegex)
|
|
||||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.MizuResourcesNamespace}, eventWatchHelper)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case wEvent, ok := <-eventChan:
|
|
||||||
if !ok {
|
|
||||||
eventChan = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
event, err := wEvent.ToEvent()
|
|
||||||
if err != nil {
|
|
||||||
logger.Log.Errorf("error parsing Mizu resource event: %+v", err)
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
if startTime.After(event.CreationTimestamp.Time) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if event.Type == v1.EventTypeWarning {
|
|
||||||
logger.Log.Warningf("resource %s in state %s - %s", event.Regarding.Name, event.Reason, event.Note)
|
|
||||||
}
|
|
||||||
case err, ok := <-errorChan:
|
|
||||||
if !ok {
|
|
||||||
errorChan = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Log.Errorf("error in watch mizu resource events loop: %+v", err)
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
case <-ctx.Done():
|
|
||||||
logger.Log.Debugf("watching Mizu resource events loop, ctx done")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@@ -24,14 +24,19 @@ func HealthCheck(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tappers := make([]shared.TapperStatus, len(providers.TappersStatus))
|
||||||
|
for _, value := range providers.TappersStatus {
|
||||||
|
tappers = append(tappers, value)
|
||||||
|
}
|
||||||
|
|
||||||
response := shared.HealthResponse{
|
response := shared.HealthResponse{
|
||||||
TapStatus: providers.TapStatus,
|
TapStatus: providers.TapStatus,
|
||||||
TappersCount: providers.TappersCount,
|
TappersCount: providers.TappersCount,
|
||||||
|
TappersStatus: tappers,
|
||||||
}
|
}
|
||||||
c.JSON(http.StatusOK, response)
|
c.JSON(http.StatusOK, response)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func PostTappedPods(c *gin.Context) {
|
func PostTappedPods(c *gin.Context) {
|
||||||
tapStatus := &shared.TapStatus{}
|
tapStatus := &shared.TapStatus{}
|
||||||
if err := c.Bind(tapStatus); err != nil {
|
if err := c.Bind(tapStatus); err != nil {
|
||||||
@@ -52,6 +57,23 @@ func PostTappedPods(c *gin.Context) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func PostTapperStatus(c *gin.Context) {
|
||||||
|
tapperStatus := &shared.TapperStatus{}
|
||||||
|
if err := c.Bind(tapperStatus); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := validation.Validate(tapperStatus); err != nil {
|
||||||
|
c.JSON(http.StatusBadRequest, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
logger.Log.Infof("[Status] POST request, tapper status: %v", tapperStatus)
|
||||||
|
if providers.TappersStatus == nil {
|
||||||
|
providers.TappersStatus = make(map[string]shared.TapperStatus)
|
||||||
|
}
|
||||||
|
providers.TappersStatus[tapperStatus.NodeName] = *tapperStatus
|
||||||
|
}
|
||||||
|
|
||||||
func GetTappersCount(c *gin.Context) {
|
func GetTappersCount(c *gin.Context) {
|
||||||
c.JSON(http.StatusOK, providers.TappersCount)
|
c.JSON(http.StatusOK, providers.TappersCount)
|
||||||
}
|
}
|
||||||
|
@@ -15,12 +15,13 @@ import (
|
|||||||
const tlsLinkRetainmentTime = time.Minute * 15
|
const tlsLinkRetainmentTime = time.Minute * 15
|
||||||
|
|
||||||
var (
|
var (
|
||||||
TappersCount int
|
TappersCount int
|
||||||
TapStatus shared.TapStatus
|
TapStatus shared.TapStatus
|
||||||
authStatus *models.AuthStatus
|
TappersStatus map[string]shared.TapperStatus
|
||||||
RecentTLSLinks = cache.New(tlsLinkRetainmentTime, tlsLinkRetainmentTime)
|
authStatus *models.AuthStatus
|
||||||
|
RecentTLSLinks = cache.New(tlsLinkRetainmentTime, tlsLinkRetainmentTime)
|
||||||
ExpectedTapperAmount = -1 //only relevant in daemon mode as cli manages tappers otherwise
|
ExpectedTapperAmount = -1 //only relevant in daemon mode as cli manages tappers otherwise
|
||||||
tappersCountLock = sync.Mutex{}
|
tappersCountLock = sync.Mutex{}
|
||||||
)
|
)
|
||||||
|
|
||||||
func GetAuthStatus() (*models.AuthStatus, error) {
|
func GetAuthStatus() (*models.AuthStatus, error) {
|
||||||
|
@@ -11,6 +11,7 @@ func StatusRoutes(ginApp *gin.Engine) {
|
|||||||
routeGroup.GET("/health", controllers.HealthCheck)
|
routeGroup.GET("/health", controllers.HealthCheck)
|
||||||
|
|
||||||
routeGroup.POST("/tappedPods", controllers.PostTappedPods)
|
routeGroup.POST("/tappedPods", controllers.PostTappedPods)
|
||||||
|
routeGroup.POST("/tapperStatus", controllers.PostTapperStatus)
|
||||||
routeGroup.GET("/tappersCount", controllers.GetTappersCount)
|
routeGroup.GET("/tappersCount", controllers.GetTappersCount)
|
||||||
routeGroup.GET("/tap", controllers.GetTappingStatus)
|
routeGroup.GET("/tap", controllers.GetTappingStatus)
|
||||||
|
|
||||||
|
@@ -42,7 +42,7 @@ func (provider *Provider) TestConnection() error {
|
|||||||
retriesLeft := provider.retries
|
retriesLeft := provider.retries
|
||||||
for retriesLeft > 0 {
|
for retriesLeft > 0 {
|
||||||
if _, err := provider.GetHealthStatus(); err != nil {
|
if _, err := provider.GetHealthStatus(); err != nil {
|
||||||
logger.Log.Debugf("[ERROR] api server not ready yet %v", err)
|
logger.Log.Debugf("api server not ready yet %v", err)
|
||||||
} else {
|
} else {
|
||||||
logger.Log.Debugf("connection test to api server passed successfully")
|
logger.Log.Debugf("connection test to api server passed successfully")
|
||||||
break
|
break
|
||||||
@@ -81,6 +81,23 @@ func (provider *Provider) GetHealthStatus() (*shared.HealthResponse, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (provider *Provider) ReportTapperStatus(tapperStatus shared.TapperStatus) error {
|
||||||
|
tapperStatusUrl := fmt.Sprintf("%s/status/tapperStatus", provider.url)
|
||||||
|
|
||||||
|
if jsonValue, err := json.Marshal(tapperStatus); err != nil {
|
||||||
|
return fmt.Errorf("failed Marshal the tapper status %w", err)
|
||||||
|
} else {
|
||||||
|
if response, err := provider.client.Post(tapperStatusUrl, "application/json", bytes.NewBuffer(jsonValue)); err != nil {
|
||||||
|
return fmt.Errorf("failed sending to API server the tapped pods %w", err)
|
||||||
|
} else if response.StatusCode != 200 {
|
||||||
|
return fmt.Errorf("failed sending to API server the tapper status, response status code %v", response.StatusCode)
|
||||||
|
} else {
|
||||||
|
logger.Log.Debugf("Reported to server API about tapper status: %v", tapperStatus)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (provider *Provider) ReportTappedPods(pods []core.Pod) error {
|
func (provider *Provider) ReportTappedPods(pods []core.Pod) error {
|
||||||
tappedPodsUrl := fmt.Sprintf("%s/status/tappedPods", provider.url)
|
tappedPodsUrl := fmt.Sprintf("%s/status/tappedPods", provider.url)
|
||||||
|
|
||||||
|
@@ -33,6 +33,9 @@ import (
|
|||||||
const cleanupTimeout = time.Minute
|
const cleanupTimeout = time.Minute
|
||||||
|
|
||||||
type tapState struct {
|
type tapState struct {
|
||||||
|
startTime time.Time
|
||||||
|
targetNamespaces []string
|
||||||
|
|
||||||
apiServerService *core.Service
|
apiServerService *core.Service
|
||||||
tapperSyncer *kubernetes.MizuTapperSyncer
|
tapperSyncer *kubernetes.MizuTapperSyncer
|
||||||
mizuServiceAccountExists bool
|
mizuServiceAccountExists bool
|
||||||
@@ -42,7 +45,7 @@ var state tapState
|
|||||||
var apiProvider *apiserver.Provider
|
var apiProvider *apiserver.Provider
|
||||||
|
|
||||||
func RunMizuTap() {
|
func RunMizuTap() {
|
||||||
startTime := time.Now()
|
state.startTime = time.Now()
|
||||||
|
|
||||||
mizuApiFilteringOptions, err := getMizuApiFilteringOptions()
|
mizuApiFilteringOptions, err := getMizuApiFilteringOptions()
|
||||||
apiProvider = apiserver.NewProvider(GetApiServerUrl(), apiserver.DefaultRetries, apiserver.DefaultTimeout)
|
apiProvider = apiserver.NewProvider(GetApiServerUrl(), apiserver.DefaultRetries, apiserver.DefaultTimeout)
|
||||||
@@ -92,16 +95,16 @@ func RunMizuTap() {
|
|||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel() // cancel will be called when this function exits
|
defer cancel() // cancel will be called when this function exits
|
||||||
|
|
||||||
targetNamespaces := getNamespaces(kubernetesProvider)
|
state.targetNamespaces = getNamespaces(kubernetesProvider)
|
||||||
|
|
||||||
serializedMizuConfig, err := config.GetSerializedMizuAgentConfig(targetNamespaces, mizuApiFilteringOptions)
|
serializedMizuConfig, err := config.GetSerializedMizuAgentConfig(state.targetNamespaces, mizuApiFilteringOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error composing mizu config: %v", errormessage.FormatError(err)))
|
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error composing mizu config: %v", errormessage.FormatError(err)))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.Config.IsNsRestrictedMode() {
|
if config.Config.IsNsRestrictedMode() {
|
||||||
if len(targetNamespaces) != 1 || !shared.Contains(targetNamespaces, config.Config.MizuResourcesNamespace) {
|
if len(state.targetNamespaces) != 1 || !shared.Contains(state.targetNamespaces, config.Config.MizuResourcesNamespace) {
|
||||||
logger.Log.Errorf("Not supported mode. Mizu can't resolve IPs in other namespaces when running in namespace restricted mode.\n"+
|
logger.Log.Errorf("Not supported mode. Mizu can't resolve IPs in other namespaces when running in namespace restricted mode.\n"+
|
||||||
"You can use the same namespace for --%s and --%s", configStructs.NamespacesTapName, config.MizuResourcesNamespaceConfigName)
|
"You can use the same namespace for --%s and --%s", configStructs.NamespacesTapName, config.MizuResourcesNamespaceConfigName)
|
||||||
return
|
return
|
||||||
@@ -109,18 +112,19 @@ func RunMizuTap() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var namespacesStr string
|
var namespacesStr string
|
||||||
if !shared.Contains(targetNamespaces, kubernetes.K8sAllNamespaces) {
|
if !shared.Contains(state.targetNamespaces, kubernetes.K8sAllNamespaces) {
|
||||||
namespacesStr = fmt.Sprintf("namespaces \"%s\"", strings.Join(targetNamespaces, "\", \""))
|
namespacesStr = fmt.Sprintf("namespaces \"%s\"", strings.Join(state.targetNamespaces, "\", \""))
|
||||||
} else {
|
} else {
|
||||||
namespacesStr = "all namespaces"
|
namespacesStr = "all namespaces"
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Log.Infof("Tapping pods in %s", namespacesStr)
|
logger.Log.Infof("Tapping pods in %s", namespacesStr)
|
||||||
|
|
||||||
|
if err := printTappedPodsPreview(ctx, kubernetesProvider, state.targetNamespaces); err != nil {
|
||||||
|
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error listing pods: %v", errormessage.FormatError(err)))
|
||||||
|
}
|
||||||
|
|
||||||
if config.Config.Tap.DryRun {
|
if config.Config.Tap.DryRun {
|
||||||
if err := printTappedPodsPreview(ctx, kubernetesProvider, targetNamespaces); err != nil {
|
|
||||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error listing pods: %v", errormessage.FormatError(err)))
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -136,7 +140,7 @@ func RunMizuTap() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if config.Config.Tap.DaemonMode {
|
if config.Config.Tap.DaemonMode {
|
||||||
if err := handleDaemonModePostCreation(ctx, cancel, kubernetesProvider, targetNamespaces); err != nil {
|
if err := handleDaemonModePostCreation(ctx, cancel, kubernetesProvider, state.targetNamespaces); err != nil {
|
||||||
defer finishMizuExecution(kubernetesProvider, apiProvider)
|
defer finishMizuExecution(kubernetesProvider, apiProvider)
|
||||||
cancel()
|
cancel()
|
||||||
} else {
|
} else {
|
||||||
@@ -145,14 +149,7 @@ func RunMizuTap() {
|
|||||||
} else {
|
} else {
|
||||||
defer finishMizuExecution(kubernetesProvider, apiProvider)
|
defer finishMizuExecution(kubernetesProvider, apiProvider)
|
||||||
|
|
||||||
if err = startTapperSyncer(ctx, cancel, kubernetesProvider, targetNamespaces, *mizuApiFilteringOptions); err != nil {
|
|
||||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error starting mizu tapper syncer: %v", err))
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
go goUtils.HandleExcWrapper(watchApiServerPod, ctx, kubernetesProvider, cancel)
|
go goUtils.HandleExcWrapper(watchApiServerPod, ctx, kubernetesProvider, cancel)
|
||||||
go goUtils.HandleExcWrapper(watchTapperPod, ctx, kubernetesProvider, cancel)
|
|
||||||
go goUtils.HandleExcWrapper(watchMizuEvents, ctx, kubernetesProvider, cancel, startTime)
|
|
||||||
|
|
||||||
// block until exit signal or error
|
// block until exit signal or error
|
||||||
waitForFinish(ctx, cancel)
|
waitForFinish(ctx, cancel)
|
||||||
@@ -185,7 +182,6 @@ func printTappedPodsPreview(ctx context.Context, kubernetesProvider *kubernetes.
|
|||||||
if len(matchingPods) == 0 {
|
if len(matchingPods) == 0 {
|
||||||
printNoPodsFoundSuggestion(namespaces)
|
printNoPodsFoundSuggestion(namespaces)
|
||||||
}
|
}
|
||||||
logger.Log.Info("Pods that match the provided criteria at this instant:")
|
|
||||||
for _, tappedPod := range matchingPods {
|
for _, tappedPod := range matchingPods {
|
||||||
logger.Log.Infof(uiUtils.Green, fmt.Sprintf("+%s", tappedPod.Name))
|
logger.Log.Infof(uiUtils.Green, fmt.Sprintf("+%s", tappedPod.Name))
|
||||||
}
|
}
|
||||||
@@ -205,7 +201,7 @@ func waitForDaemonModeToBeReady(cancel context.CancelFunc, kubernetesProvider *k
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider *kubernetes.Provider, targetNamespaces []string, mizuApiFilteringOptions api.TrafficFilteringOptions) error {
|
func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider *kubernetes.Provider, targetNamespaces []string, mizuApiFilteringOptions api.TrafficFilteringOptions, startTime time.Time) error {
|
||||||
tapperSyncer, err := kubernetes.CreateAndStartMizuTapperSyncer(ctx, provider, kubernetes.TapperSyncerConfig{
|
tapperSyncer, err := kubernetes.CreateAndStartMizuTapperSyncer(ctx, provider, kubernetes.TapperSyncerConfig{
|
||||||
TargetNamespaces: targetNamespaces,
|
TargetNamespaces: targetNamespaces,
|
||||||
PodFilterRegex: *config.Config.Tap.PodRegex(),
|
PodFilterRegex: *config.Config.Tap.PodRegex(),
|
||||||
@@ -218,20 +214,12 @@ func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider
|
|||||||
MizuApiFilteringOptions: mizuApiFilteringOptions,
|
MizuApiFilteringOptions: mizuApiFilteringOptions,
|
||||||
MizuServiceAccountExists: state.mizuServiceAccountExists,
|
MizuServiceAccountExists: state.mizuServiceAccountExists,
|
||||||
Istio: config.Config.Tap.Istio,
|
Istio: config.Config.Tap.Istio,
|
||||||
})
|
}, startTime)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tappedPod := range tapperSyncer.CurrentlyTappedPods {
|
|
||||||
logger.Log.Infof(uiUtils.Green, fmt.Sprintf("+%s", tappedPod.Name))
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(tapperSyncer.CurrentlyTappedPods) == 0 {
|
|
||||||
printNoPodsFoundSuggestion(targetNamespaces)
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@@ -250,6 +238,14 @@ func startTapperSyncer(ctx context.Context, cancel context.CancelFunc, provider
|
|||||||
if err := apiProvider.ReportTappedPods(tapperSyncer.CurrentlyTappedPods); err != nil {
|
if err := apiProvider.ReportTappedPods(tapperSyncer.CurrentlyTappedPods); err != nil {
|
||||||
logger.Log.Debugf("[Error] failed update tapped pods %v", err)
|
logger.Log.Debugf("[Error] failed update tapped pods %v", err)
|
||||||
}
|
}
|
||||||
|
case tapperStatus, ok := <-tapperSyncer.TapperStatusChangedOut:
|
||||||
|
if !ok {
|
||||||
|
logger.Log.Debug("mizuTapperSyncer tapper status changed channel closed, ending listener loop")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := apiProvider.ReportTapperStatus(tapperStatus); err != nil {
|
||||||
|
logger.Log.Debugf("[Error] failed update tapper status %v", err)
|
||||||
|
}
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
logger.Log.Debug("mizuTapperSyncer event listener loop exiting due to context done")
|
logger.Log.Debug("mizuTapperSyncer event listener loop exiting due to context done")
|
||||||
return
|
return
|
||||||
@@ -557,171 +553,9 @@ func waitUntilNamespaceDeleted(ctx context.Context, cancel context.CancelFunc, k
|
|||||||
}
|
}
|
||||||
|
|
||||||
func watchApiServerPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
func watchApiServerPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
||||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s$", kubernetes.ApiServerPodName))
|
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s", kubernetes.ApiServerPodName))
|
||||||
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, podExactRegex, "pod")
|
||||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.MizuResourcesNamespace}, podWatchHelper)
|
|
||||||
isPodReady := false
|
|
||||||
timeAfter := time.After(25 * time.Second)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case wEvent, ok := <-eventChan:
|
|
||||||
if !ok {
|
|
||||||
eventChan = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch wEvent.Type {
|
|
||||||
case kubernetes.EventAdded:
|
|
||||||
logger.Log.Debugf("Watching API Server pod loop, added")
|
|
||||||
case kubernetes.EventDeleted:
|
|
||||||
logger.Log.Infof("%s removed", kubernetes.ApiServerPodName)
|
|
||||||
cancel()
|
|
||||||
return
|
|
||||||
case kubernetes.EventModified:
|
|
||||||
modifiedPod, err := wEvent.ToPod()
|
|
||||||
if err != nil {
|
|
||||||
logger.Log.Errorf(uiUtils.Error, err)
|
|
||||||
cancel()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Log.Debugf("Watching API Server pod loop, modified: %v", modifiedPod.Status.Phase)
|
|
||||||
|
|
||||||
if modifiedPod.Status.Phase == core.PodPending {
|
|
||||||
if modifiedPod.Status.Conditions[0].Type == core.PodScheduled && modifiedPod.Status.Conditions[0].Status != core.ConditionTrue {
|
|
||||||
logger.Log.Debugf("Wasn't able to deploy the API server. Reason: \"%s\"", modifiedPod.Status.Conditions[0].Message)
|
|
||||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Wasn't able to deploy the API server, for more info check logs at %s", fsUtils.GetLogFilePath()))
|
|
||||||
cancel()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(modifiedPod.Status.ContainerStatuses) > 0 && modifiedPod.Status.ContainerStatuses[0].State.Waiting != nil && modifiedPod.Status.ContainerStatuses[0].State.Waiting.Reason == "ErrImagePull" {
|
|
||||||
logger.Log.Debugf("Wasn't able to deploy the API server. (ErrImagePull) Reason: \"%s\"", modifiedPod.Status.ContainerStatuses[0].State.Waiting.Message)
|
|
||||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Wasn't able to deploy the API server: failed to pull the image, for more info check logs at %v", fsUtils.GetLogFilePath()))
|
|
||||||
cancel()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if modifiedPod.Status.Phase == core.PodRunning && !isPodReady {
|
|
||||||
isPodReady = true
|
|
||||||
go startProxyReportErrorIfAny(kubernetesProvider, cancel)
|
|
||||||
|
|
||||||
url := GetApiServerUrl()
|
|
||||||
if err := apiProvider.TestConnection(); err != nil {
|
|
||||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Couldn't connect to API server, for more info check logs at %s", fsUtils.GetLogFilePath()))
|
|
||||||
cancel()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Log.Infof("Mizu is available at %s", url)
|
|
||||||
if !config.Config.HeadlessMode {
|
|
||||||
uiUtils.OpenBrowser(url)
|
|
||||||
}
|
|
||||||
if err := apiProvider.ReportTappedPods(state.tapperSyncer.CurrentlyTappedPods); err != nil {
|
|
||||||
logger.Log.Debugf("[Error] failed update tapped pods %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case kubernetes.EventBookmark:
|
|
||||||
break
|
|
||||||
case kubernetes.EventError:
|
|
||||||
break
|
|
||||||
}
|
|
||||||
case err, ok := <-errorChan:
|
|
||||||
if !ok {
|
|
||||||
errorChan = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Log.Errorf("[ERROR] Agent creation, watching %v namespace, error: %v", config.Config.MizuResourcesNamespace, err)
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
case <-timeAfter:
|
|
||||||
if !isPodReady {
|
|
||||||
logger.Log.Errorf(uiUtils.Error, "Mizu API server was not ready in time")
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
case <-ctx.Done():
|
|
||||||
logger.Log.Debugf("Watching API Server pod loop, ctx done")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func watchTapperPod(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc) {
|
|
||||||
podExactRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", kubernetes.TapperDaemonSetName))
|
|
||||||
podWatchHelper := kubernetes.NewPodWatchHelper(kubernetesProvider, podExactRegex)
|
|
||||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, podWatchHelper, []string{config.Config.MizuResourcesNamespace}, podWatchHelper)
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case wEvent, ok := <-eventChan:
|
|
||||||
if !ok {
|
|
||||||
eventChan = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
pod, err := wEvent.ToPod()
|
|
||||||
if err != nil {
|
|
||||||
logger.Log.Errorf(uiUtils.Error, err)
|
|
||||||
cancel()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch wEvent.Type {
|
|
||||||
case kubernetes.EventAdded:
|
|
||||||
logger.Log.Debugf("Tapper is created [%s]", pod.Name)
|
|
||||||
case kubernetes.EventDeleted:
|
|
||||||
logger.Log.Debugf("Tapper is removed [%s]", pod.Name)
|
|
||||||
case kubernetes.EventModified:
|
|
||||||
if pod.Status.Phase == core.PodPending && pod.Status.Conditions[0].Type == core.PodScheduled && pod.Status.Conditions[0].Status != core.ConditionTrue {
|
|
||||||
logger.Log.Infof(uiUtils.Red, fmt.Sprintf("Wasn't able to deploy the tapper %s. Reason: \"%s\"", pod.Name, pod.Status.Conditions[0].Message))
|
|
||||||
cancel()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
podStatus := pod.Status
|
|
||||||
|
|
||||||
if podStatus.Phase == core.PodRunning {
|
|
||||||
state := podStatus.ContainerStatuses[0].State
|
|
||||||
if state.Terminated != nil {
|
|
||||||
switch state.Terminated.Reason {
|
|
||||||
case "OOMKilled":
|
|
||||||
logger.Log.Infof(uiUtils.Red, fmt.Sprintf("Tapper %s was terminated (reason: OOMKilled). You should consider increasing machine resources.", pod.Name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Log.Debugf("Tapper %s is %s", pod.Name, strings.ToLower(string(podStatus.Phase)))
|
|
||||||
case kubernetes.EventBookmark:
|
|
||||||
break
|
|
||||||
case kubernetes.EventError:
|
|
||||||
break
|
|
||||||
}
|
|
||||||
case err, ok := <-errorChan:
|
|
||||||
if !ok {
|
|
||||||
errorChan = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Log.Errorf("[Error] Error in mizu tapper pod watch, err: %v", err)
|
|
||||||
cancel()
|
|
||||||
|
|
||||||
case <-ctx.Done():
|
|
||||||
logger.Log.Debugf("Watching tapper pod loop, ctx done")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func watchMizuEvents(ctx context.Context, kubernetesProvider *kubernetes.Provider, cancel context.CancelFunc, startTime time.Time) {
|
|
||||||
// Round down because k8s CreationTimestamp is given in 1 sec resolution.
|
|
||||||
startTime = startTime.Truncate(time.Second)
|
|
||||||
|
|
||||||
mizuResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", kubernetes.MizuResourcesPrefix))
|
|
||||||
eventWatchHelper := kubernetes.NewEventWatchHelper(kubernetesProvider, mizuResourceRegex)
|
|
||||||
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.MizuResourcesNamespace}, eventWatchHelper)
|
eventChan, errorChan := kubernetes.FilteredWatch(ctx, eventWatchHelper, []string{config.Config.MizuResourcesNamespace}, eventWatchHelper)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case wEvent, ok := <-eventChan:
|
case wEvent, ok := <-eventChan:
|
||||||
@@ -732,16 +566,46 @@ func watchMizuEvents(ctx context.Context, kubernetesProvider *kubernetes.Provide
|
|||||||
|
|
||||||
event, err := wEvent.ToEvent()
|
event, err := wEvent.ToEvent()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("error parsing Mizu resource event: %+v", err))
|
logger.Log.Errorf(fmt.Sprintf("Error parsing Mizu resource event: %+v", err))
|
||||||
cancel()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if startTime.After(event.CreationTimestamp.Time) {
|
if state.startTime.After(event.CreationTimestamp.Time) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if event.Type == core.EventTypeWarning {
|
logger.Log.Debugf(
|
||||||
logger.Log.Warningf(uiUtils.Warning, fmt.Sprintf("Resource %s in state %s - %s", event.Regarding.Name, event.Reason, event.Note))
|
fmt.Sprintf("Watching API server events loop, event %s, time: %v, resource: %s (%s), reason: %s, note: %s",
|
||||||
|
event.Name,
|
||||||
|
event.CreationTimestamp.Time,
|
||||||
|
event.Regarding.Name,
|
||||||
|
event.Regarding.Kind,
|
||||||
|
event.Reason,
|
||||||
|
event.Note))
|
||||||
|
|
||||||
|
switch event.Reason {
|
||||||
|
case "Started":
|
||||||
|
go startProxyReportErrorIfAny(kubernetesProvider, cancel)
|
||||||
|
|
||||||
|
url := GetApiServerUrl()
|
||||||
|
if err := apiProvider.TestConnection(); err != nil {
|
||||||
|
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Couldn't connect to API server, for more info check logs at %s", fsUtils.GetLogFilePath()))
|
||||||
|
cancel()
|
||||||
|
break
|
||||||
|
}
|
||||||
|
options, _ := getMizuApiFilteringOptions()
|
||||||
|
if err = startTapperSyncer(ctx, cancel, kubernetesProvider, state.targetNamespaces, *options, state.startTime); err != nil {
|
||||||
|
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Error starting mizu tapper syncer: %v", err))
|
||||||
|
cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Log.Infof("Mizu is available at %s", url)
|
||||||
|
if !config.Config.HeadlessMode {
|
||||||
|
uiUtils.OpenBrowser(url)
|
||||||
|
}
|
||||||
|
case "FailedScheduling", "Failed", "Killing":
|
||||||
|
logger.Log.Errorf(uiUtils.Error, fmt.Sprintf("Mizu API Server status: %s - %s", event.Reason, event.Note))
|
||||||
|
cancel()
|
||||||
|
break
|
||||||
}
|
}
|
||||||
case err, ok := <-errorChan:
|
case err, ok := <-errorChan:
|
||||||
if !ok {
|
if !ok {
|
||||||
@@ -749,11 +613,9 @@ func watchMizuEvents(ctx context.Context, kubernetesProvider *kubernetes.Provide
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Log.Errorf("error in watch mizu resource events loop: %+v", err)
|
logger.Log.Errorf("Watching API server events loop, error: %+v", err)
|
||||||
cancel()
|
|
||||||
|
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
logger.Log.Debugf("watching Mizu resource events loop, ctx done")
|
logger.Log.Debugf("Watching API server events loop, ctx done")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -3,6 +3,7 @@ package kubernetes
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
@@ -10,13 +11,15 @@ import (
|
|||||||
|
|
||||||
type EventWatchHelper struct {
|
type EventWatchHelper struct {
|
||||||
kubernetesProvider *Provider
|
kubernetesProvider *Provider
|
||||||
NameRegexFilter *regexp.Regexp
|
NameRegexFilter *regexp.Regexp
|
||||||
|
Kind string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEventWatchHelper(kubernetesProvider *Provider, NameRegexFilter *regexp.Regexp) *EventWatchHelper {
|
func NewEventWatchHelper(kubernetesProvider *Provider, NameRegexFilter *regexp.Regexp, kind string) *EventWatchHelper {
|
||||||
return &EventWatchHelper{
|
return &EventWatchHelper{
|
||||||
kubernetesProvider: kubernetesProvider,
|
kubernetesProvider: kubernetesProvider,
|
||||||
NameRegexFilter: NameRegexFilter,
|
NameRegexFilter: NameRegexFilter,
|
||||||
|
Kind: kind,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -31,6 +34,10 @@ func (wh *EventWatchHelper) Filter(wEvent *WatchEvent) (bool, error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if strings.ToLower(event.Regarding.Kind) != strings.ToLower(wh.Kind) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -23,13 +23,15 @@ type TappedPodChangeEvent struct {
|
|||||||
|
|
||||||
// MizuTapperSyncer uses a k8s pod watch to update tapper daemonsets when targeted pods are removed or created
|
// MizuTapperSyncer uses a k8s pod watch to update tapper daemonsets when targeted pods are removed or created
|
||||||
type MizuTapperSyncer struct {
|
type MizuTapperSyncer struct {
|
||||||
context context.Context
|
startTime time.Time
|
||||||
CurrentlyTappedPods []core.Pod
|
context context.Context
|
||||||
config TapperSyncerConfig
|
CurrentlyTappedPods []core.Pod
|
||||||
kubernetesProvider *Provider
|
config TapperSyncerConfig
|
||||||
TapPodChangesOut chan TappedPodChangeEvent
|
kubernetesProvider *Provider
|
||||||
ErrorOut chan K8sTapManagerError
|
TapPodChangesOut chan TappedPodChangeEvent
|
||||||
nodeToTappedPodIPMap map[string][]string
|
TapperStatusChangedOut chan shared.TapperStatus
|
||||||
|
ErrorOut chan K8sTapManagerError
|
||||||
|
nodeToTappedPodIPMap map[string][]string
|
||||||
}
|
}
|
||||||
|
|
||||||
type TapperSyncerConfig struct {
|
type TapperSyncerConfig struct {
|
||||||
@@ -46,14 +48,16 @@ type TapperSyncerConfig struct {
|
|||||||
Istio bool
|
Istio bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateAndStartMizuTapperSyncer(ctx context.Context, kubernetesProvider *Provider, config TapperSyncerConfig) (*MizuTapperSyncer, error) {
|
func CreateAndStartMizuTapperSyncer(ctx context.Context, kubernetesProvider *Provider, config TapperSyncerConfig, startTime time.Time) (*MizuTapperSyncer, error) {
|
||||||
syncer := &MizuTapperSyncer{
|
syncer := &MizuTapperSyncer{
|
||||||
context: ctx,
|
startTime: startTime.Truncate(time.Second), // Round down because k8s CreationTimestamp is given in 1 sec resolution.
|
||||||
CurrentlyTappedPods: make([]core.Pod, 0),
|
context: ctx,
|
||||||
config: config,
|
CurrentlyTappedPods: make([]core.Pod, 0),
|
||||||
kubernetesProvider: kubernetesProvider,
|
config: config,
|
||||||
TapPodChangesOut: make(chan TappedPodChangeEvent, 100),
|
kubernetesProvider: kubernetesProvider,
|
||||||
ErrorOut: make(chan K8sTapManagerError, 100),
|
TapPodChangesOut: make(chan TappedPodChangeEvent, 100),
|
||||||
|
TapperStatusChangedOut: make(chan shared.TapperStatus, 100),
|
||||||
|
ErrorOut: make(chan K8sTapManagerError, 100),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err, _ := syncer.updateCurrentlyTappedPods(); err != nil {
|
if err, _ := syncer.updateCurrentlyTappedPods(); err != nil {
|
||||||
@@ -65,9 +69,72 @@ func CreateAndStartMizuTapperSyncer(ctx context.Context, kubernetesProvider *Pro
|
|||||||
}
|
}
|
||||||
|
|
||||||
go syncer.watchPodsForTapping()
|
go syncer.watchPodsForTapping()
|
||||||
|
go syncer.watchTapperEvents()
|
||||||
return syncer, nil
|
return syncer, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tapperSyncer *MizuTapperSyncer) watchTapperEvents() {
|
||||||
|
mizuResourceRegex := regexp.MustCompile(fmt.Sprintf("^%s.*", TapperPodName))
|
||||||
|
eventWatchHelper := NewEventWatchHelper(tapperSyncer.kubernetesProvider, mizuResourceRegex, "pod")
|
||||||
|
eventChan, errorChan := FilteredWatch(tapperSyncer.context, eventWatchHelper, []string{tapperSyncer.config.MizuResourcesNamespace}, eventWatchHelper)
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case wEvent, ok := <-eventChan:
|
||||||
|
if !ok {
|
||||||
|
eventChan = nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
event, err := wEvent.ToEvent()
|
||||||
|
if err != nil {
|
||||||
|
logger.Log.Errorf(fmt.Sprintf("Error parsing Mizu resource event: %+v", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if tapperSyncer.startTime.After(event.CreationTimestamp.Time) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Log.Debugf(
|
||||||
|
fmt.Sprintf("Watching tapper events loop, event %s, time: %v, resource: %s (%s), reason: %s, note: %s",
|
||||||
|
event.Name,
|
||||||
|
event.CreationTimestamp.Time,
|
||||||
|
event.Regarding.Name,
|
||||||
|
event.Regarding.Kind,
|
||||||
|
event.Reason,
|
||||||
|
event.Note))
|
||||||
|
|
||||||
|
pod, err1 := tapperSyncer.kubernetesProvider.GetPod(tapperSyncer.context, tapperSyncer.config.MizuResourcesNamespace, event.Regarding.Name)
|
||||||
|
if err1 != nil {
|
||||||
|
logger.Log.Debugf(fmt.Sprintf("Failed to get tapper pod %s", event.Regarding.Name))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
nodeName := ""
|
||||||
|
if event.Reason != "FailedScheduling" {
|
||||||
|
nodeName = pod.Spec.NodeName
|
||||||
|
} else {
|
||||||
|
nodeName = pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields[0].Values[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
taperStatus := shared.TapperStatus{TapperName: pod.Name, NodeName: nodeName, Status: event.Reason}
|
||||||
|
tapperSyncer.TapperStatusChangedOut <- taperStatus
|
||||||
|
|
||||||
|
case err, ok := <-errorChan:
|
||||||
|
if !ok {
|
||||||
|
errorChan = nil
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.Log.Errorf("Watching tapper events loop, error: %+v", err)
|
||||||
|
|
||||||
|
case <-tapperSyncer.context.Done():
|
||||||
|
logger.Log.Debugf("Watching tapper events loop, ctx done")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (tapperSyncer *MizuTapperSyncer) watchPodsForTapping() {
|
func (tapperSyncer *MizuTapperSyncer) watchPodsForTapping() {
|
||||||
podWatchHelper := NewPodWatchHelper(tapperSyncer.kubernetesProvider, &tapperSyncer.config.PodFilterRegex)
|
podWatchHelper := NewPodWatchHelper(tapperSyncer.kubernetesProvider, &tapperSyncer.config.PodFilterRegex)
|
||||||
eventChan, errorChan := FilteredWatch(tapperSyncer.context, podWatchHelper, tapperSyncer.config.TargetNamespaces, podWatchHelper)
|
eventChan, errorChan := FilteredWatch(tapperSyncer.context, podWatchHelper, tapperSyncer.config.TargetNamespaces, podWatchHelper)
|
||||||
@@ -108,7 +175,6 @@ func (tapperSyncer *MizuTapperSyncer) watchPodsForTapping() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
switch wEvent.Type {
|
switch wEvent.Type {
|
||||||
case EventAdded:
|
case EventAdded:
|
||||||
logger.Log.Debugf("Added matching pod %s, ns: %s", pod.Name, pod.Namespace)
|
logger.Log.Debugf("Added matching pod %s, ns: %s", pod.Name, pod.Namespace)
|
||||||
|
@@ -351,8 +351,8 @@ func (provider *Provider) CreateService(ctx context.Context, namespace string, s
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (provider *Provider) DoesServicesExist(ctx context.Context, namespace string, name string) (bool, error) {
|
func (provider *Provider) DoesServicesExist(ctx context.Context, namespace string, name string) (bool, error) {
|
||||||
resource, err := provider.clientSet.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
|
serviceResource, err := provider.clientSet.CoreV1().Services(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||||
return provider.doesResourceExist(resource, err)
|
return provider.doesResourceExist(serviceResource, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (provider *Provider) doesResourceExist(resource interface{}, err error) (bool, error) {
|
func (provider *Provider) doesResourceExist(resource interface{}, err error) (bool, error) {
|
||||||
@@ -642,7 +642,7 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
|||||||
"--api-server-address", fmt.Sprintf("ws://%s/wsTapper", apiServerPodIp),
|
"--api-server-address", fmt.Sprintf("ws://%s/wsTapper", apiServerPodIp),
|
||||||
"--nodefrag",
|
"--nodefrag",
|
||||||
}
|
}
|
||||||
|
|
||||||
if istio {
|
if istio {
|
||||||
mizuCmd = append(mizuCmd, "--procfs", procfsMountPath, "--istio")
|
mizuCmd = append(mizuCmd, "--procfs", procfsMountPath, "--istio")
|
||||||
}
|
}
|
||||||
@@ -653,13 +653,13 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
|||||||
agentContainer.WithImagePullPolicy(imagePullPolicy)
|
agentContainer.WithImagePullPolicy(imagePullPolicy)
|
||||||
|
|
||||||
caps := applyconfcore.Capabilities().WithDrop("ALL").WithAdd("NET_RAW").WithAdd("NET_ADMIN")
|
caps := applyconfcore.Capabilities().WithDrop("ALL").WithAdd("NET_RAW").WithAdd("NET_ADMIN")
|
||||||
|
|
||||||
if istio {
|
if istio {
|
||||||
caps = caps.WithAdd("SYS_ADMIN") // for reading /proc/PID/net/ns
|
caps = caps.WithAdd("SYS_ADMIN") // for reading /proc/PID/net/ns
|
||||||
caps = caps.WithAdd("SYS_PTRACE") // for setting netns to other process
|
caps = caps.WithAdd("SYS_PTRACE") // for setting netns to other process
|
||||||
caps = caps.WithAdd("DAC_OVERRIDE") // for reading /proc/PID/environ
|
caps = caps.WithAdd("DAC_OVERRIDE") // for reading /proc/PID/environ
|
||||||
}
|
}
|
||||||
|
|
||||||
agentContainer.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
|
agentContainer.WithSecurityContext(applyconfcore.SecurityContext().WithCapabilities(caps))
|
||||||
|
|
||||||
agentContainer.WithCommand(mizuCmd...)
|
agentContainer.WithCommand(mizuCmd...)
|
||||||
@@ -780,10 +780,10 @@ func (provider *Provider) ApplyMizuTapperDaemonSet(ctx context.Context, namespac
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (provider *Provider) ListAllPodsMatchingRegex(ctx context.Context, regex *regexp.Regexp, namespaces []string) ([]core.Pod, error) {
|
func (provider *Provider) listPodsImpl(ctx context.Context, regex *regexp.Regexp, namespaces []string, listOptions metav1.ListOptions) ([]core.Pod, error) {
|
||||||
var pods []core.Pod
|
var pods []core.Pod
|
||||||
for _, namespace := range namespaces {
|
for _, namespace := range namespaces {
|
||||||
namespacePods, err := provider.clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
|
namespacePods, err := provider.clientSet.CoreV1().Pods(namespace).List(ctx, listOptions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to get pods in ns: [%s], %w", namespace, err)
|
return nil, fmt.Errorf("failed to get pods in ns: [%s], %w", namespace, err)
|
||||||
}
|
}
|
||||||
@@ -800,6 +800,14 @@ func (provider *Provider) ListAllPodsMatchingRegex(ctx context.Context, regex *r
|
|||||||
return matchingPods, nil
|
return matchingPods, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (provider *Provider) ListAllPodsMatchingRegex(ctx context.Context, regex *regexp.Regexp, namespaces []string) ([]core.Pod, error) {
|
||||||
|
return provider.listPodsImpl(ctx, regex, namespaces, metav1.ListOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (provider *Provider) GetPod(ctx context.Context, namespaces string, podName string) (*core.Pod, error) {
|
||||||
|
return provider.clientSet.CoreV1().Pods(namespaces).Get(ctx, podName, metav1.GetOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
func (provider *Provider) ListAllRunningPodsMatchingRegex(ctx context.Context, regex *regexp.Regexp, namespaces []string) ([]core.Pod, error) {
|
func (provider *Provider) ListAllRunningPodsMatchingRegex(ctx context.Context, regex *regexp.Regexp, namespaces []string) ([]core.Pod, error) {
|
||||||
pods, err := provider.ListAllPodsMatchingRegex(ctx, regex, namespaces)
|
pods, err := provider.ListAllPodsMatchingRegex(ctx, regex, namespaces)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@@ -57,11 +57,10 @@ func getMissingPods(pods1 []core.Pod, pods2 []core.Pod) []core.Pod {
|
|||||||
return missingPods
|
return missingPods
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func GetPodInfosForPods(pods []core.Pod) []shared.PodInfo {
|
func GetPodInfosForPods(pods []core.Pod) []shared.PodInfo {
|
||||||
podInfos := make([]shared.PodInfo, 0)
|
podInfos := make([]shared.PodInfo, 0)
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
podInfos = append(podInfos, shared.PodInfo{Name: pod.Name, Namespace: pod.Namespace})
|
podInfos = append(podInfos, shared.PodInfo{Name: pod.Name, Namespace: pod.Namespace, NodeName: pod.Spec.NodeName})
|
||||||
}
|
}
|
||||||
return podInfos
|
return podInfos
|
||||||
}
|
}
|
||||||
|
@@ -67,6 +67,12 @@ type WebSocketStatusMessage struct {
|
|||||||
TappingStatus TapStatus `json:"tappingStatus"`
|
TappingStatus TapStatus `json:"tappingStatus"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type TapperStatus struct {
|
||||||
|
TapperName string `json:"tapperName"`
|
||||||
|
NodeName string `json:"nodeName"`
|
||||||
|
Status string `json:"status"`
|
||||||
|
}
|
||||||
|
|
||||||
type TapStatus struct {
|
type TapStatus struct {
|
||||||
Pods []PodInfo `json:"pods"`
|
Pods []PodInfo `json:"pods"`
|
||||||
TLSLinks []TLSLinkInfo `json:"tlsLinks"`
|
TLSLinks []TLSLinkInfo `json:"tlsLinks"`
|
||||||
@@ -75,6 +81,7 @@ type TapStatus struct {
|
|||||||
type PodInfo struct {
|
type PodInfo struct {
|
||||||
Namespace string `json:"namespace"`
|
Namespace string `json:"namespace"`
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
|
NodeName string `json:"nodeName"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type TLSLinkInfo struct {
|
type TLSLinkInfo struct {
|
||||||
@@ -110,8 +117,9 @@ func CreateWebSocketMessageTypeAnalyzeStatus(analyzeStatus AnalyzeStatus) WebSoc
|
|||||||
}
|
}
|
||||||
|
|
||||||
type HealthResponse struct {
|
type HealthResponse struct {
|
||||||
TapStatus TapStatus `json:"tapStatus"`
|
TapStatus TapStatus `json:"tapStatus"`
|
||||||
TappersCount int `json:"tappersCount"`
|
TappersCount int `json:"tappersCount"`
|
||||||
|
TappersStatus []TapperStatus `json:"tappersStatus"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type VersionResponse struct {
|
type VersionResponse struct {
|
||||||
|
Reference in New Issue
Block a user