Update main.go, main.go, and 5 more files...

This commit is contained in:
RamiBerm
2021-07-14 17:32:55 +03:00
parent 8400e9e903
commit 8886590ea2
6 changed files with 74 additions and 16 deletions

View File

@@ -131,7 +131,7 @@ func filterHarItems(inChannel <-chan *tap.OutputChannelItem, outChannel chan *ta
if message.ConnectionInfo.IsOutgoing && api.CheckIsServiceIP(message.ConnectionInfo.ServerIP) { if message.ConnectionInfo.IsOutgoing && api.CheckIsServiceIP(message.ConnectionInfo.ServerIP) {
continue continue
} }
// TODO: move this to tappers // TODO: move this to tappers https://up9.atlassian.net/browse/TRA-3441
if filterOptions.HideHealthChecks && isHealthCheckByUserAgent(message) { if filterOptions.HideHealthChecks && isHealthCheckByUserAgent(message) {
continue continue
} }

View File

@@ -7,6 +7,7 @@ import (
"gorm.io/gorm/logger" "gorm.io/gorm/logger"
"mizuserver/pkg/models" "mizuserver/pkg/models"
"mizuserver/pkg/utils" "mizuserver/pkg/utils"
"time"
) )
const ( const (
@@ -44,7 +45,7 @@ func GetEntriesTable() *gorm.DB {
func initDataBase(databasePath string) *gorm.DB { func initDataBase(databasePath string) *gorm.DB {
temp, _ := gorm.Open(sqlite.Open(databasePath), &gorm.Config{ temp, _ := gorm.Open(sqlite.Open(databasePath), &gorm.Config{
Logger: logger.Default.LogMode(logger.Silent), Logger: &utils.TruncatingLogger{LogLevel: logger.Warn, SlowThreshold: 500 * time.Millisecond},
}) })
_ = temp.AutoMigrate(&models.MizuEntry{}) // this will ensure table is created _ = temp.AutoMigrate(&models.MizuEntry{}) // this will ensure table is created
return temp return temp

View File

@@ -5,6 +5,7 @@ import (
"github.com/fsnotify/fsnotify" "github.com/fsnotify/fsnotify"
"github.com/up9inc/mizu/shared" "github.com/up9inc/mizu/shared"
"github.com/up9inc/mizu/shared/debounce" "github.com/up9inc/mizu/shared/debounce"
"github.com/up9inc/mizu/shared/units"
"log" "log"
"mizuserver/pkg/models" "mizuserver/pkg/models"
"os" "os"
@@ -13,19 +14,18 @@ import (
) )
const percentageOfMaxSizeBytesToPrune = 15 const percentageOfMaxSizeBytesToPrune = 15
const defaultMaxDatabaseSizeBytes = 200 * 1000 * 1000 const defaultMaxDatabaseSizeBytes int64 = 200 * 1000 * 1000
func StartEnforcingDatabaseSize() { func StartEnforcingDatabaseSize() {
watcher, err := fsnotify.NewWatcher() watcher, err := fsnotify.NewWatcher()
if err != nil { if err != nil {
log.Printf("Error creating filesystem watcher for db size enforcement: %v\n", err) // TODO: make fatal log.Fatalf("Error creating filesystem watcher for db size enforcement: %v\n", err)
return return
} }
defer watcher.Close()
maxEntriesDBByteSize, err := getMaxEntriesDBByteSize() maxEntriesDBByteSize, err := getMaxEntriesDBByteSize()
if err != nil { if err != nil {
log.Printf("Error parsing max db size: %v\n", err) // TODO: make fatal log.Fatalf("Error parsing max db size: %v\n", err)
return return
} }
@@ -33,7 +33,6 @@ func StartEnforcingDatabaseSize() {
checkFileSize(maxEntriesDBByteSize) checkFileSize(maxEntriesDBByteSize)
}) })
done := make(chan bool)
go func() { go func() {
for { for {
select { select {
@@ -41,7 +40,7 @@ func StartEnforcingDatabaseSize() {
if !ok { if !ok {
return // closed channel return // closed channel
} }
if event.Op&fsnotify.Write == fsnotify.Write { if event.Op == fsnotify.Write {
checkFileSizeDebouncer.SetOn() checkFileSizeDebouncer.SetOn()
} }
case err, ok := <-watcher.Errors: case err, ok := <-watcher.Errors:
@@ -55,13 +54,12 @@ func StartEnforcingDatabaseSize() {
err = watcher.Add(DBPath) err = watcher.Add(DBPath)
if err != nil { if err != nil {
log.Printf("Error adding %s to filesystem watcher for db size enforcement: %v\n", DBPath, err) //TODO: make fatal log.Fatalf("Error adding %s to filesystem watcher for db size enforcement: %v\n", DBPath, err)
} }
<-done
} }
func getMaxEntriesDBByteSize() (int64, error) { func getMaxEntriesDBByteSize() (int64, error) {
maxEntriesDBByteSize := int64(defaultMaxDatabaseSizeBytes) maxEntriesDBByteSize := defaultMaxDatabaseSizeBytes
var err error var err error
maxEntriesDBSizeByteSEnvVarValue := os.Getenv(shared.MaxEntriesDBSizeByteSEnvVar) maxEntriesDBSizeByteSEnvVarValue := os.Getenv(shared.MaxEntriesDBSizeByteSEnvVar)
@@ -112,7 +110,7 @@ func pruneOldEntries(currentFileSize int64) {
GetEntriesTable().Where(entryIdsToRemove).Delete(models.MizuEntry{}) GetEntriesTable().Where(entryIdsToRemove).Delete(models.MizuEntry{})
// VACUUM causes sqlite to shrink the db file after rows have been deleted, the db file will not shrink without this // VACUUM causes sqlite to shrink the db file after rows have been deleted, the db file will not shrink without this
DB.Exec("VACUUM") DB.Exec("VACUUM")
fmt.Printf("Removed %d rows and cleared %s bytes\n", len(entryIdsToRemove), shared.BytesToHumanReadable(bytesToBeRemoved)) fmt.Printf("Removed %d rows and cleared %s\n", len(entryIdsToRemove), units.BytesToHumanReadable(bytesToBeRemoved))
} else { } else {
fmt.Println("Found no rows to remove when pruning") fmt.Println("Found no rows to remove when pruning")
} }

View File

@@ -0,0 +1,58 @@
package utils
import (
"context"
"fmt"
"gorm.io/gorm/logger"
"gorm.io/gorm/utils"
"time"
)
// TruncatingLogger implements the gorm logger.Interface interface. Its purpose is to act as gorm's logger while truncating logs to a max of 50 characters to minimise the performance impact
type TruncatingLogger struct {
LogLevel logger.LogLevel
SlowThreshold time.Duration
}
func (truncatingLogger *TruncatingLogger) LogMode(logLevel logger.LogLevel) logger.Interface {
truncatingLogger.LogLevel = logLevel
return truncatingLogger
}
func (truncatingLogger *TruncatingLogger) Info(_ context.Context, message string, __ ...interface{}) {
if truncatingLogger.LogLevel < logger.Info {
return
}
fmt.Printf("gorm info: %.50s\n", message)
}
func (truncatingLogger *TruncatingLogger) Warn(_ context.Context, message string, __ ...interface{}) {
if truncatingLogger.LogLevel < logger.Warn {
return
}
fmt.Printf("gorm warning: %.50s\n", message)
}
func (truncatingLogger *TruncatingLogger) Error(_ context.Context, message string, __ ...interface{}) {
if truncatingLogger.LogLevel < logger.Error {
return
}
fmt.Printf("gorm error: %.50s\n", message)
}
func (truncatingLogger *TruncatingLogger) Trace(ctx context.Context, begin time.Time, fc func() (string, int64), err error) {
if truncatingLogger.LogLevel == logger.Silent {
return
}
elapsed := time.Since(begin)
if err != nil {
sql, rows := fc() // copied into every condition as this is a potentially heavy operation best done only when necessary
truncatingLogger.Error(ctx, fmt.Sprintf("Error in %s: %v - elapsed: %fs affected rows: %d, sql: %s", utils.FileWithLineNum(), err, elapsed.Seconds(), rows, sql))
} else if truncatingLogger.LogLevel >= logger.Warn && elapsed > truncatingLogger.SlowThreshold {
sql, rows := fc()
truncatingLogger.Warn(ctx, fmt.Sprintf("Slow sql query - elapse: %fs rows: %d, sql: %s", elapsed.Seconds(), rows, sql))
} else if truncatingLogger.LogLevel >= logger.Info {
sql, rows := fc()
truncatingLogger.Info(ctx, fmt.Sprintf("Sql query - elapse: %fs rows: %d, sql: %s", elapsed.Seconds(), rows, sql))
}
}

View File

@@ -3,7 +3,7 @@ package cmd
import ( import (
"errors" "errors"
"fmt" "fmt"
"github.com/up9inc/mizu/shared" "github.com/up9inc/mizu/shared/units"
"regexp" "regexp"
"strings" "strings"
@@ -50,12 +50,13 @@ Supported protocols are HTTP and gRPC.`,
return errors.New(fmt.Sprintf("%s is not a valid regex %s", args[0], err)) return errors.New(fmt.Sprintf("%s is not a valid regex %s", args[0], err))
} }
mizuTapOptions.MaxEntriesDBSizeBytes, err = shared.HumanReadableToBytes(humanMaxEntriesDBSize) mizuTapOptions.MaxEntriesDBSizeBytes = 200 * 1000 * 1000
mizuTapOptions.MaxEntriesDBSizeBytes, err = units.HumanReadableToBytes(humanMaxEntriesDBSize)
if err != nil { if err != nil {
return errors.New(fmt.Sprintf("Could not parse --max-entries-db-size value %s", humanMaxEntriesDBSize)) return errors.New(fmt.Sprintf("Could not parse --max-entries-db-size value %s", humanMaxEntriesDBSize))
} else if cmd.Flags().Changed(maxEntriesDBSizeFlagName) { } else if cmd.Flags().Changed(maxEntriesDBSizeFlagName) {
// We're parsing human readable file sizes here so its best to be unambiguous // We're parsing human readable file sizes here so its best to be unambiguous
fmt.Printf("Setting max entries db size to %s\n", shared.BytesToHumanReadable(mizuTapOptions.MaxEntriesDBSizeBytes)) fmt.Printf("Setting max entries db size to %s\n", units.BytesToHumanReadable(mizuTapOptions.MaxEntriesDBSizeBytes))
} }
directionLowerCase := strings.ToLower(direction) directionLowerCase := strings.ToLower(direction)

View File

@@ -1,4 +1,4 @@
package shared package units
import "github.com/docker/go-units" import "github.com/docker/go-units"