diff --git a/agent/pkg/providers/stats_provider.go b/agent/pkg/providers/stats_provider.go index 1e0340cd3..f701c6392 100644 --- a/agent/pkg/providers/stats_provider.go +++ b/agent/pkg/providers/stats_provider.go @@ -69,72 +69,15 @@ func GetGeneralStats() GeneralStats { return generalStats } -func getBucketStatsCopy() BucketStats { - bucketStatsCopy := BucketStats{} - bucketStatsLocker.Lock() - if err := copier.Copy(&bucketStatsCopy, bucketsStats); err != nil { - logger.Log.Errorf("Error while copying src stats into temporary copied object") - return nil - } - bucketStatsLocker.Unlock() - return bucketStatsCopy -} - func GetAccumulativeStats() []*AccumulativeStatsProtocol { bucketStatsCopy := getBucketStatsCopy() - if bucketStatsCopy == nil { + if len(bucketStatsCopy) == 0 { return make([]*AccumulativeStatsProtocol, 0) } - protocolToColor := make(map[string]string, 0) - methodsPerProtocolAggregated := make(map[string]map[string]*AccumulativeStatsCounter, 0) - for _, countersOfTimeFrame := range bucketStatsCopy { - for protocolName, value := range countersOfTimeFrame.ProtocolStats { - if _, ok := protocolToColor[protocolName]; !ok { - protocolToColor[protocolName] = value.Color - } - for method, countersValue := range value.MethodsStats { - if _, found := methodsPerProtocolAggregated[protocolName]; !found { - methodsPerProtocolAggregated[protocolName] = map[string]*AccumulativeStatsCounter{} - } - if _, found := methodsPerProtocolAggregated[protocolName][method]; !found { - methodsPerProtocolAggregated[protocolName][method] = &AccumulativeStatsCounter{ - Name: method, - EntriesCount: 0, - VolumeSizeBytes: 0, - } - } - methodsPerProtocolAggregated[protocolName][method].EntriesCount += countersValue.EntriesCount - methodsPerProtocolAggregated[protocolName][method].VolumeSizeBytes += countersValue.VolumeInBytes - } - } - } + methodsPerProtocolAggregated, protocolToColor := getAggregatedStatsAllTime(bucketStatsCopy) - return ConvertToPieData(methodsPerProtocolAggregated, protocolToColor) -} - -func ConvertToPieData(methodsPerProtocolAggregated map[string]map[string]*AccumulativeStatsCounter, protocolToColor map[string]string) []*AccumulativeStatsProtocol { - protocolsData := make([]*AccumulativeStatsProtocol, 0) - for protocolName, value := range methodsPerProtocolAggregated { - entriesCount := 0 - volumeSizeBytes := 0 - methods := make([]*AccumulativeStatsCounter, 0) - for _, methodAccData := range value { - entriesCount += methodAccData.EntriesCount - volumeSizeBytes += methodAccData.VolumeSizeBytes - methods = append(methods, methodAccData) - } - protocolsData = append(protocolsData, &AccumulativeStatsProtocol{ - AccumulativeStatsCounter: AccumulativeStatsCounter{ - Name: protocolName, - EntriesCount: entriesCount, - VolumeSizeBytes: volumeSizeBytes, - }, - Color: protocolToColor[protocolName], - Methods: methods, - }) - } - return protocolsData + return convertAccumulativeStatsDictToArray(methodsPerProtocolAggregated, protocolToColor) } func GetAccumulativeStatsTiming(intervalSeconds int, numberOfBars int) []*AccumulativeStatsProtocolTime { @@ -143,79 +86,11 @@ func GetAccumulativeStatsTiming(intervalSeconds int, numberOfBars int) []*Accumu return make([]*AccumulativeStatsProtocolTime, 0) } - protocolToColor := make(map[string]string, 0) - methodsPerProtocolPerTimeAggregated := make(map[time.Time]map[string]map[string]*AccumulativeStatsCounter, 0) + firstBucketTime := getFirstBucketTime(time.Now().UTC(), intervalSeconds, numberOfBars) - // TODO: Extract to function and add tests for those values - lastBucketTime := time.Now().UTC().Add(-1 * InternalBucketThreshold / 2).Round(InternalBucketThreshold) - firstBucketTime := lastBucketTime.Add(-1 * time.Second * time.Duration(intervalSeconds*(numberOfBars-1))) - bucketStatsIndex := len(bucketStatsCopy) - 1 + methodsPerProtocolPerTimeAggregated, protocolToColor := getAggregatedResultTimingFromSpecificTime(intervalSeconds, bucketStatsCopy, firstBucketTime) - for bucketStatsIndex >= 0 { - currentBucketTime := bucketStatsCopy[bucketStatsIndex].BucketTime - if currentBucketTime.After(firstBucketTime) || currentBucketTime.Equal(firstBucketTime) { - resultBucketRoundedKey := currentBucketTime.Add(-1 * time.Second * time.Duration(intervalSeconds) / 2).Round(time.Second * time.Duration(intervalSeconds)) - - for protocolName, data := range bucketStatsCopy[bucketStatsIndex].ProtocolStats { - if _, ok := protocolToColor[protocolName]; !ok { - protocolToColor[protocolName] = data.Color - } - - for methodName, dataOfMethod := range data.MethodsStats { - - if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey]; !ok { - methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey] = map[string]map[string]*AccumulativeStatsCounter{} - } - if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName]; !ok { - methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName] = map[string]*AccumulativeStatsCounter{} - } - if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName]; !ok { - methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName] = &AccumulativeStatsCounter{ - Name: methodName, - EntriesCount: 0, - VolumeSizeBytes: 0, - } - } - methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName].EntriesCount += dataOfMethod.EntriesCount - methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName].VolumeSizeBytes += dataOfMethod.VolumeInBytes - } - } - } - bucketStatsIndex-- - } - - return ConvertToTimelineData(methodsPerProtocolPerTimeAggregated, protocolToColor) -} - -func ConvertToTimelineData(methodsPerProtocolPerTimeAggregated map[time.Time]map[string]map[string]*AccumulativeStatsCounter, protocolToColor map[string]string) []*AccumulativeStatsProtocolTime { - finalResult := make([]*AccumulativeStatsProtocolTime, 0) - for timeKey, item := range methodsPerProtocolPerTimeAggregated { - protocolsData := make([]*AccumulativeStatsProtocol, 0) - for protocolName := range item { - entriesCount := 0 - volumeSizeBytes := 0 - methods := make([]*AccumulativeStatsCounter, 0) - for _, methodAccData := range methodsPerProtocolPerTimeAggregated[timeKey][protocolName] { - entriesCount += methodAccData.EntriesCount - volumeSizeBytes += methodAccData.VolumeSizeBytes - methods = append(methods, methodAccData) - } - protocolsData = append(protocolsData, &AccumulativeStatsProtocol{ - AccumulativeStatsCounter: AccumulativeStatsCounter{ - Name: protocolName, - EntriesCount: entriesCount, - VolumeSizeBytes: volumeSizeBytes, - }, - Color: protocolToColor[protocolName], - Methods: methods, - }) - } - finalResult = append(finalResult, &AccumulativeStatsProtocolTime{ - Time: timeKey.UnixMilli(), - ProtocolsData: protocolsData, - }) - } - return finalResult + return convertAccumulativeStatsTimelineDictToArray(methodsPerProtocolPerTimeAggregated, protocolToColor) } func EntryAdded(size int, summery *api.BaseEntry) { @@ -233,14 +108,8 @@ func EntryAdded(size int, summery *api.BaseEntry) { generalStats.LastEntryTimestamp = currentTimestamp } -//GetBucketOfTimeStamp Round the entry to the nearest threshold (one minute) floored (e.g: 15:31:45 -> 15:31:00) -func GetBucketOfTimeStamp(timestamp int64) time.Time { - entryTimeStampAsTime := time.UnixMilli(timestamp) - return entryTimeStampAsTime.Add(-1 * InternalBucketThreshold / 2).Round(InternalBucketThreshold) -} - func addToBucketStats(size int, summery *api.BaseEntry) { - entryTimeBucketRounded := GetBucketOfTimeStamp(summery.Timestamp) + entryTimeBucketRounded := getBucketFromTimeStamp(summery.Timestamp) if len(bucketsStats) == 0 { bucketsStats = append(bucketsStats, &TimeFrameStatsValue{ @@ -272,3 +141,148 @@ func addToBucketStats(size int, summery *api.BaseEntry) { bucketOfEntry.ProtocolStats[summery.Protocol.Abbreviation].MethodsStats[summery.Method].EntriesCount += 1 bucketOfEntry.ProtocolStats[summery.Protocol.Abbreviation].MethodsStats[summery.Method].VolumeInBytes += size } + +func getBucketFromTimeStamp(timestamp int64) time.Time { + entryTimeStampAsTime := time.UnixMilli(timestamp) + return entryTimeStampAsTime.Add(-1 * InternalBucketThreshold / 2).Round(InternalBucketThreshold) +} + +func getFirstBucketTime(endTime time.Time, intervalSeconds int, numberOfBars int) time.Time { + lastBucketTime := endTime.Add(-1 * time.Second * time.Duration(intervalSeconds) / 2).Round(time.Second * time.Duration(intervalSeconds)) + firstBucketTime := lastBucketTime.Add(-1 * time.Second * time.Duration(intervalSeconds*(numberOfBars-1))) + return firstBucketTime +} + +func convertAccumulativeStatsTimelineDictToArray(methodsPerProtocolPerTimeAggregated map[time.Time]map[string]map[string]*AccumulativeStatsCounter, protocolToColor map[string]string) []*AccumulativeStatsProtocolTime { + finalResult := make([]*AccumulativeStatsProtocolTime, 0) + for timeKey, item := range methodsPerProtocolPerTimeAggregated { + protocolsData := make([]*AccumulativeStatsProtocol, 0) + for protocolName := range item { + entriesCount := 0 + volumeSizeBytes := 0 + methods := make([]*AccumulativeStatsCounter, 0) + for _, methodAccData := range methodsPerProtocolPerTimeAggregated[timeKey][protocolName] { + entriesCount += methodAccData.EntriesCount + volumeSizeBytes += methodAccData.VolumeSizeBytes + methods = append(methods, methodAccData) + } + protocolsData = append(protocolsData, &AccumulativeStatsProtocol{ + AccumulativeStatsCounter: AccumulativeStatsCounter{ + Name: protocolName, + EntriesCount: entriesCount, + VolumeSizeBytes: volumeSizeBytes, + }, + Color: protocolToColor[protocolName], + Methods: methods, + }) + } + finalResult = append(finalResult, &AccumulativeStatsProtocolTime{ + Time: timeKey.UnixMilli(), + ProtocolsData: protocolsData, + }) + } + return finalResult +} + +func convertAccumulativeStatsDictToArray(methodsPerProtocolAggregated map[string]map[string]*AccumulativeStatsCounter, protocolToColor map[string]string) []*AccumulativeStatsProtocol { + protocolsData := make([]*AccumulativeStatsProtocol, 0) + for protocolName, value := range methodsPerProtocolAggregated { + entriesCount := 0 + volumeSizeBytes := 0 + methods := make([]*AccumulativeStatsCounter, 0) + for _, methodAccData := range value { + entriesCount += methodAccData.EntriesCount + volumeSizeBytes += methodAccData.VolumeSizeBytes + methods = append(methods, methodAccData) + } + protocolsData = append(protocolsData, &AccumulativeStatsProtocol{ + AccumulativeStatsCounter: AccumulativeStatsCounter{ + Name: protocolName, + EntriesCount: entriesCount, + VolumeSizeBytes: volumeSizeBytes, + }, + Color: protocolToColor[protocolName], + Methods: methods, + }) + } + return protocolsData +} + +func getBucketStatsCopy() BucketStats { + bucketStatsCopy := BucketStats{} + bucketStatsLocker.Lock() + if err := copier.Copy(&bucketStatsCopy, bucketsStats); err != nil { + logger.Log.Errorf("Error while copying src stats into temporary copied object") + return nil + } + bucketStatsLocker.Unlock() + return bucketStatsCopy +} + +func getAggregatedResultTimingFromSpecificTime(intervalSeconds int, bucketStats BucketStats, firstBucketTime time.Time) (map[time.Time]map[string]map[string]*AccumulativeStatsCounter, map[string]string) { + protocolToColor := map[string]string{} + methodsPerProtocolPerTimeAggregated := map[time.Time]map[string]map[string]*AccumulativeStatsCounter{} + + bucketStatsIndex := len(bucketStats) - 1 + for bucketStatsIndex >= 0 { + currentBucketTime := bucketStats[bucketStatsIndex].BucketTime + if currentBucketTime.After(firstBucketTime) || currentBucketTime.Equal(firstBucketTime) { + resultBucketRoundedKey := currentBucketTime.Add(-1 * time.Second * time.Duration(intervalSeconds) / 2).Round(time.Second * time.Duration(intervalSeconds)) + + for protocolName, data := range bucketStats[bucketStatsIndex].ProtocolStats { + if _, ok := protocolToColor[protocolName]; !ok { + protocolToColor[protocolName] = data.Color + } + + for methodName, dataOfMethod := range data.MethodsStats { + + if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey]; !ok { + methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey] = map[string]map[string]*AccumulativeStatsCounter{} + } + if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName]; !ok { + methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName] = map[string]*AccumulativeStatsCounter{} + } + if _, ok := methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName]; !ok { + methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName] = &AccumulativeStatsCounter{ + Name: methodName, + EntriesCount: 0, + VolumeSizeBytes: 0, + } + } + methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName].EntriesCount += dataOfMethod.EntriesCount + methodsPerProtocolPerTimeAggregated[resultBucketRoundedKey][protocolName][methodName].VolumeSizeBytes += dataOfMethod.VolumeInBytes + } + } + } + bucketStatsIndex-- + } + return methodsPerProtocolPerTimeAggregated, protocolToColor +} + +func getAggregatedStatsAllTime(bucketStatsCopy BucketStats) (map[string]map[string]*AccumulativeStatsCounter, map[string]string) { + protocolToColor := make(map[string]string, 0) + methodsPerProtocolAggregated := make(map[string]map[string]*AccumulativeStatsCounter, 0) + for _, countersOfTimeFrame := range bucketStatsCopy { + for protocolName, value := range countersOfTimeFrame.ProtocolStats { + if _, ok := protocolToColor[protocolName]; !ok { + protocolToColor[protocolName] = value.Color + } + + for method, countersValue := range value.MethodsStats { + if _, found := methodsPerProtocolAggregated[protocolName]; !found { + methodsPerProtocolAggregated[protocolName] = map[string]*AccumulativeStatsCounter{} + } + if _, found := methodsPerProtocolAggregated[protocolName][method]; !found { + methodsPerProtocolAggregated[protocolName][method] = &AccumulativeStatsCounter{ + Name: method, + EntriesCount: 0, + VolumeSizeBytes: 0, + } + } + methodsPerProtocolAggregated[protocolName][method].EntriesCount += countersValue.EntriesCount + methodsPerProtocolAggregated[protocolName][method].VolumeSizeBytes += countersValue.VolumeInBytes + } + } + } + return methodsPerProtocolAggregated, protocolToColor +} diff --git a/agent/pkg/providers/stats_provider_internal_test.go b/agent/pkg/providers/stats_provider_internal_test.go new file mode 100644 index 000000000..9626b7d93 --- /dev/null +++ b/agent/pkg/providers/stats_provider_internal_test.go @@ -0,0 +1,331 @@ +package providers + +import ( + "fmt" + "reflect" + "testing" + "time" +) + +func TestGetBucketOfTimeStamp(t *testing.T) { + tests := map[int64]time.Time{ + time.Date(2022, time.Month(1), 1, 10, 34, 45, 0, time.Local).UnixMilli(): time.Date(2022, time.Month(1), 1, 10, 34, 00, 0, time.Local), + time.Date(2022, time.Month(1), 1, 10, 34, 00, 0, time.Local).UnixMilli(): time.Date(2022, time.Month(1), 1, 10, 34, 00, 0, time.Local), + time.Date(2022, time.Month(1), 1, 10, 59, 01, 0, time.Local).UnixMilli(): time.Date(2022, time.Month(1), 1, 10, 59, 00, 0, time.Local), + } + + for key, value := range tests { + t.Run(fmt.Sprintf("%v", key), func(t *testing.T) { + + actual := getBucketFromTimeStamp(key) + + if actual != value { + t.Errorf("unexpected result - expected: %v, actual: %v", value, actual) + } + }) + } +} + +type DataForBucketBorderFunction struct { + EndTime time.Time + IntervalInSeconds int + NumberOfBars int +} + +func TestGetBucketBorders(t *testing.T) { + tests := map[DataForBucketBorderFunction]time.Time{ + DataForBucketBorderFunction{ + time.Date(2022, time.Month(1), 1, 10, 34, 45, 0, time.UTC), + 300, + 10, + }: time.Date(2022, time.Month(1), 1, 9, 45, 0, 0, time.UTC), + DataForBucketBorderFunction{ + time.Date(2022, time.Month(1), 1, 10, 35, 45, 0, time.UTC), + 60, + 5, + }: time.Date(2022, time.Month(1), 1, 10, 31, 00, 0, time.UTC), + } + + for key, value := range tests { + t.Run(fmt.Sprintf("%v", key), func(t *testing.T) { + + actual := getFirstBucketTime(key.EndTime, key.IntervalInSeconds, key.NumberOfBars) + + if actual != value { + t.Errorf("unexpected result - expected: %v, actual: %v", value, actual) + } + }) + } +} + +func TestGetAggregatedStatsAllTime(t *testing.T) { + bucketStatsForTest := BucketStats{ + &TimeFrameStatsValue{ + BucketTime: time.Date(2022, time.Month(1), 1, 10, 00, 00, 0, time.UTC), + ProtocolStats: map[string]ProtocolStats{ + "http": { + MethodsStats: map[string]*SizeAndEntriesCount{ + "get": { + EntriesCount: 1, + VolumeInBytes: 2, + }, + "post": { + EntriesCount: 2, + VolumeInBytes: 3, + }, + }, + }, + "kafka": { + MethodsStats: map[string]*SizeAndEntriesCount{ + "listTopics": { + EntriesCount: 5, + VolumeInBytes: 6, + }, + }, + }, + }, + }, + &TimeFrameStatsValue{ + BucketTime: time.Date(2022, time.Month(1), 1, 10, 01, 00, 0, time.UTC), + ProtocolStats: map[string]ProtocolStats{ + "http": { + MethodsStats: map[string]*SizeAndEntriesCount{ + "get": { + EntriesCount: 1, + VolumeInBytes: 2, + }, + "post": { + EntriesCount: 2, + VolumeInBytes: 3, + }, + }, + }, + "redis": { + MethodsStats: map[string]*SizeAndEntriesCount{ + "set": { + EntriesCount: 5, + VolumeInBytes: 6, + }, + }, + }, + }, + }, + } + + expected := map[string]map[string]*AccumulativeStatsCounter{ + "http": { + "post": { + Name: "post", + EntriesCount: 4, + VolumeSizeBytes: 6, + }, + "get": { + Name: "get", + EntriesCount: 2, + VolumeSizeBytes: 4, + }, + }, + "kafka": { + "listTopics": { + Name: "listTopics", + EntriesCount: 5, + VolumeSizeBytes: 6, + }, + }, + "redis": { + "set": { + Name: "set", + EntriesCount: 5, + VolumeSizeBytes: 6, + }, + }, + } + actual, _ := getAggregatedStatsAllTime(bucketStatsForTest) + + if !reflect.DeepEqual(actual, expected) { + t.Errorf("unexpected result - expected: %v, actual: %v", 3, len(actual)) + } +} + +func TestGetAggregatedStatsFromSpecificTime(t *testing.T) { + bucketStatsForTest := BucketStats{ + &TimeFrameStatsValue{ + BucketTime: time.Date(2022, time.Month(1), 1, 10, 00, 00, 0, time.UTC), + ProtocolStats: map[string]ProtocolStats{ + "http": { + MethodsStats: map[string]*SizeAndEntriesCount{ + "get": { + EntriesCount: 1, + VolumeInBytes: 2, + }, + }, + }, + "kafka": { + MethodsStats: map[string]*SizeAndEntriesCount{ + "listTopics": { + EntriesCount: 5, + VolumeInBytes: 6, + }, + }, + }, + }, + }, + &TimeFrameStatsValue{ + BucketTime: time.Date(2022, time.Month(1), 1, 10, 01, 00, 0, time.UTC), + ProtocolStats: map[string]ProtocolStats{ + "http": { + MethodsStats: map[string]*SizeAndEntriesCount{ + "get": { + EntriesCount: 1, + VolumeInBytes: 2, + }, + "post": { + EntriesCount: 2, + VolumeInBytes: 3, + }, + }, + }, + "redis": { + MethodsStats: map[string]*SizeAndEntriesCount{ + "set": { + EntriesCount: 5, + VolumeInBytes: 6, + }, + }, + }, + }, + }, + } + + expected := map[time.Time]map[string]map[string]*AccumulativeStatsCounter{ + time.Date(2022, time.Month(1), 1, 10, 00, 00, 0, time.UTC): { + "http": { + "post": { + Name: "post", + EntriesCount: 2, + VolumeSizeBytes: 3, + }, + "get": { + Name: "get", + EntriesCount: 2, + VolumeSizeBytes: 4, + }, + }, + "kafka": { + "listTopics": { + Name: "listTopics", + EntriesCount: 5, + VolumeSizeBytes: 6, + }, + }, + "redis": { + "set": { + Name: "set", + EntriesCount: 5, + VolumeSizeBytes: 6, + }, + }, + }, + } + actual, _ := getAggregatedResultTimingFromSpecificTime(300, bucketStatsForTest, time.Date(2022, time.Month(1), 1, 10, 00, 00, 0, time.UTC)) + + if !reflect.DeepEqual(actual, expected) { + t.Errorf("unexpected result - expected: %v, actual: %v", 3, len(actual)) + } +} + +func TestGetAggregatedStatsFromSpecificTimeMultipleBuckets(t *testing.T) { + bucketStatsForTest := BucketStats{ + &TimeFrameStatsValue{ + BucketTime: time.Date(2022, time.Month(1), 1, 10, 00, 00, 0, time.UTC), + ProtocolStats: map[string]ProtocolStats{ + "http": { + MethodsStats: map[string]*SizeAndEntriesCount{ + "get": { + EntriesCount: 1, + VolumeInBytes: 2, + }, + }, + }, + "kafka": { + MethodsStats: map[string]*SizeAndEntriesCount{ + "listTopics": { + EntriesCount: 5, + VolumeInBytes: 6, + }, + }, + }, + }, + }, + &TimeFrameStatsValue{ + BucketTime: time.Date(2022, time.Month(1), 1, 10, 01, 00, 0, time.UTC), + ProtocolStats: map[string]ProtocolStats{ + "http": { + MethodsStats: map[string]*SizeAndEntriesCount{ + "get": { + EntriesCount: 1, + VolumeInBytes: 2, + }, + "post": { + EntriesCount: 2, + VolumeInBytes: 3, + }, + }, + }, + "redis": { + MethodsStats: map[string]*SizeAndEntriesCount{ + "set": { + EntriesCount: 5, + VolumeInBytes: 6, + }, + }, + }, + }, + }, + } + + expected := map[time.Time]map[string]map[string]*AccumulativeStatsCounter{ + time.Date(2022, time.Month(1), 1, 10, 00, 00, 0, time.UTC): { + "http": { + "get": { + Name: "get", + EntriesCount: 1, + VolumeSizeBytes: 2, + }, + }, + "kafka": { + "listTopics": { + Name: "listTopics", + EntriesCount: 5, + VolumeSizeBytes: 6, + }, + }, + }, + time.Date(2022, time.Month(1), 1, 10, 01, 00, 0, time.UTC): { + "http": { + "post": { + Name: "post", + EntriesCount: 2, + VolumeSizeBytes: 3, + }, + "get": { + Name: "get", + EntriesCount: 1, + VolumeSizeBytes: 2, + }, + }, + "redis": { + "set": { + Name: "set", + EntriesCount: 5, + VolumeSizeBytes: 6, + }, + }, + }, + } + actual, _ := getAggregatedResultTimingFromSpecificTime(60, bucketStatsForTest, time.Date(2022, time.Month(1), 1, 10, 00, 00, 0, time.UTC)) + + if !reflect.DeepEqual(actual, expected) { + t.Errorf("unexpected result - expected: %v, actual: %v", 3, len(actual)) + } +} diff --git a/agent/pkg/providers/stats_provider_test.go b/agent/pkg/providers/stats_provider_test.go index 8392dc7db..e8890a3a1 100644 --- a/agent/pkg/providers/stats_provider_test.go +++ b/agent/pkg/providers/stats_provider_test.go @@ -81,24 +81,4 @@ func TestEntryAddedVolume(t *testing.T) { } }) } - -} - -func TestGetBucketOfTimeStamp(t *testing.T) { - tests := map[int64]time.Time{ - time.Date(2022, time.Month(1), 1, 10, 34, 45, 0, time.Local).UnixMilli(): time.Date(2022, time.Month(1), 1, 10, 34, 00, 0, time.Local), - time.Date(2022, time.Month(1), 1, 10, 34, 00, 0, time.Local).UnixMilli(): time.Date(2022, time.Month(1), 1, 10, 34, 00, 0, time.Local), - time.Date(2022, time.Month(1), 1, 10, 59, 01, 0, time.Local).UnixMilli(): time.Date(2022, time.Month(1), 1, 10, 59, 00, 0, time.Local), - } - - for key, value := range tests { - t.Run(fmt.Sprintf("%v", key), func(t *testing.T) { - - actual := providers.GetBucketOfTimeStamp(key) - - if actual != value { - t.Errorf("unexpected result - expected: %v, actual: %v", value, actual) - } - }) - } }