mirror of
https://github.com/kubeshark/kubeshark.git
synced 2025-08-16 15:38:40 +00:00
Update main.go, main.go, and 5 more files...
This commit is contained in:
parent
8400e9e903
commit
8886590ea2
@ -131,7 +131,7 @@ func filterHarItems(inChannel <-chan *tap.OutputChannelItem, outChannel chan *ta
|
||||
if message.ConnectionInfo.IsOutgoing && api.CheckIsServiceIP(message.ConnectionInfo.ServerIP) {
|
||||
continue
|
||||
}
|
||||
// TODO: move this to tappers
|
||||
// TODO: move this to tappers https://up9.atlassian.net/browse/TRA-3441
|
||||
if filterOptions.HideHealthChecks && isHealthCheckByUserAgent(message) {
|
||||
continue
|
||||
}
|
||||
|
@ -7,6 +7,7 @@ import (
|
||||
"gorm.io/gorm/logger"
|
||||
"mizuserver/pkg/models"
|
||||
"mizuserver/pkg/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -44,7 +45,7 @@ func GetEntriesTable() *gorm.DB {
|
||||
|
||||
func initDataBase(databasePath string) *gorm.DB {
|
||||
temp, _ := gorm.Open(sqlite.Open(databasePath), &gorm.Config{
|
||||
Logger: logger.Default.LogMode(logger.Silent),
|
||||
Logger: &utils.TruncatingLogger{LogLevel: logger.Warn, SlowThreshold: 500 * time.Millisecond},
|
||||
})
|
||||
_ = temp.AutoMigrate(&models.MizuEntry{}) // this will ensure table is created
|
||||
return temp
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/shared/debounce"
|
||||
"github.com/up9inc/mizu/shared/units"
|
||||
"log"
|
||||
"mizuserver/pkg/models"
|
||||
"os"
|
||||
@ -13,19 +14,18 @@ import (
|
||||
)
|
||||
|
||||
const percentageOfMaxSizeBytesToPrune = 15
|
||||
const defaultMaxDatabaseSizeBytes = 200 * 1000 * 1000
|
||||
const defaultMaxDatabaseSizeBytes int64 = 200 * 1000 * 1000
|
||||
|
||||
func StartEnforcingDatabaseSize() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Printf("Error creating filesystem watcher for db size enforcement: %v\n", err) // TODO: make fatal
|
||||
log.Fatalf("Error creating filesystem watcher for db size enforcement: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
maxEntriesDBByteSize, err := getMaxEntriesDBByteSize()
|
||||
if err != nil {
|
||||
log.Printf("Error parsing max db size: %v\n", err) // TODO: make fatal
|
||||
log.Fatalf("Error parsing max db size: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -33,7 +33,6 @@ func StartEnforcingDatabaseSize() {
|
||||
checkFileSize(maxEntriesDBByteSize)
|
||||
})
|
||||
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
@ -41,7 +40,7 @@ func StartEnforcingDatabaseSize() {
|
||||
if !ok {
|
||||
return // closed channel
|
||||
}
|
||||
if event.Op&fsnotify.Write == fsnotify.Write {
|
||||
if event.Op == fsnotify.Write {
|
||||
checkFileSizeDebouncer.SetOn()
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
@ -55,13 +54,12 @@ func StartEnforcingDatabaseSize() {
|
||||
|
||||
err = watcher.Add(DBPath)
|
||||
if err != nil {
|
||||
log.Printf("Error adding %s to filesystem watcher for db size enforcement: %v\n", DBPath, err) //TODO: make fatal
|
||||
log.Fatalf("Error adding %s to filesystem watcher for db size enforcement: %v\n", DBPath, err)
|
||||
}
|
||||
<-done
|
||||
}
|
||||
|
||||
func getMaxEntriesDBByteSize() (int64, error) {
|
||||
maxEntriesDBByteSize := int64(defaultMaxDatabaseSizeBytes)
|
||||
maxEntriesDBByteSize := defaultMaxDatabaseSizeBytes
|
||||
var err error
|
||||
|
||||
maxEntriesDBSizeByteSEnvVarValue := os.Getenv(shared.MaxEntriesDBSizeByteSEnvVar)
|
||||
@ -112,7 +110,7 @@ func pruneOldEntries(currentFileSize int64) {
|
||||
GetEntriesTable().Where(entryIdsToRemove).Delete(models.MizuEntry{})
|
||||
// VACUUM causes sqlite to shrink the db file after rows have been deleted, the db file will not shrink without this
|
||||
DB.Exec("VACUUM")
|
||||
fmt.Printf("Removed %d rows and cleared %s bytes\n", len(entryIdsToRemove), shared.BytesToHumanReadable(bytesToBeRemoved))
|
||||
fmt.Printf("Removed %d rows and cleared %s\n", len(entryIdsToRemove), units.BytesToHumanReadable(bytesToBeRemoved))
|
||||
} else {
|
||||
fmt.Println("Found no rows to remove when pruning")
|
||||
}
|
||||
|
58
api/pkg/utils/truncating_logger.go
Normal file
58
api/pkg/utils/truncating_logger.go
Normal file
@ -0,0 +1,58 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"gorm.io/gorm/logger"
|
||||
"gorm.io/gorm/utils"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TruncatingLogger implements the gorm logger.Interface interface. Its purpose is to act as gorm's logger while truncating logs to a max of 50 characters to minimise the performance impact
|
||||
type TruncatingLogger struct {
|
||||
LogLevel logger.LogLevel
|
||||
SlowThreshold time.Duration
|
||||
}
|
||||
|
||||
func (truncatingLogger *TruncatingLogger) LogMode(logLevel logger.LogLevel) logger.Interface {
|
||||
truncatingLogger.LogLevel = logLevel
|
||||
return truncatingLogger
|
||||
}
|
||||
|
||||
func (truncatingLogger *TruncatingLogger) Info(_ context.Context, message string, __ ...interface{}) {
|
||||
if truncatingLogger.LogLevel < logger.Info {
|
||||
return
|
||||
}
|
||||
fmt.Printf("gorm info: %.50s\n", message)
|
||||
}
|
||||
|
||||
func (truncatingLogger *TruncatingLogger) Warn(_ context.Context, message string, __ ...interface{}) {
|
||||
if truncatingLogger.LogLevel < logger.Warn {
|
||||
return
|
||||
}
|
||||
fmt.Printf("gorm warning: %.50s\n", message)
|
||||
}
|
||||
|
||||
func (truncatingLogger *TruncatingLogger) Error(_ context.Context, message string, __ ...interface{}) {
|
||||
if truncatingLogger.LogLevel < logger.Error {
|
||||
return
|
||||
}
|
||||
fmt.Printf("gorm error: %.50s\n", message)
|
||||
}
|
||||
|
||||
func (truncatingLogger *TruncatingLogger) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) {
|
||||
if truncatingLogger.LogLevel == logger.Silent {
|
||||
return
|
||||
}
|
||||
elapsed := time.Since(begin)
|
||||
if err != nil {
|
||||
sql, rows := fc() // copied into every condition as this is a potentially heavy operation best done only when necessary
|
||||
truncatingLogger.Error(ctx, fmt.Sprintf("Error in %s: %v - elapsed: %fs affected rows: %d, sql: %s", utils.FileWithLineNum(), err, elapsed.Seconds(), rows, sql))
|
||||
} else if truncatingLogger.LogLevel >= logger.Warn && elapsed > truncatingLogger.SlowThreshold {
|
||||
sql, rows := fc()
|
||||
truncatingLogger.Warn(ctx, fmt.Sprintf("Slow sql query - elapse: %fs rows: %d, sql: %s", elapsed.Seconds(), rows, sql))
|
||||
} else if truncatingLogger.LogLevel >= logger.Info {
|
||||
sql, rows := fc()
|
||||
truncatingLogger.Info(ctx, fmt.Sprintf("Sql query - elapse: %fs rows: %d, sql: %s", elapsed.Seconds(), rows, sql))
|
||||
}
|
||||
}
|
@ -3,7 +3,7 @@ package cmd
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/up9inc/mizu/shared"
|
||||
"github.com/up9inc/mizu/shared/units"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
@ -50,12 +50,13 @@ Supported protocols are HTTP and gRPC.`,
|
||||
return errors.New(fmt.Sprintf("%s is not a valid regex %s", args[0], err))
|
||||
}
|
||||
|
||||
mizuTapOptions.MaxEntriesDBSizeBytes, err = shared.HumanReadableToBytes(humanMaxEntriesDBSize)
|
||||
mizuTapOptions.MaxEntriesDBSizeBytes = 200 * 1000 * 1000
|
||||
mizuTapOptions.MaxEntriesDBSizeBytes, err = units.HumanReadableToBytes(humanMaxEntriesDBSize)
|
||||
if err != nil {
|
||||
return errors.New(fmt.Sprintf("Could not parse --max-entries-db-size value %s", humanMaxEntriesDBSize))
|
||||
} else if cmd.Flags().Changed(maxEntriesDBSizeFlagName) {
|
||||
// We're parsing human readable file sizes here so its best to be unambiguous
|
||||
fmt.Printf("Setting max entries db size to %s\n", shared.BytesToHumanReadable(mizuTapOptions.MaxEntriesDBSizeBytes))
|
||||
fmt.Printf("Setting max entries db size to %s\n", units.BytesToHumanReadable(mizuTapOptions.MaxEntriesDBSizeBytes))
|
||||
}
|
||||
|
||||
directionLowerCase := strings.ToLower(direction)
|
||||
|
@ -1,4 +1,4 @@
|
||||
package shared
|
||||
package units
|
||||
|
||||
import "github.com/docker/go-units"
|
||||
|
Loading…
Reference in New Issue
Block a user