VictoriaMetrics/lib/streamaggr/dedup_test.go
Andrii Chubatiuk 15e33d56f1
lib/streamaggr: pick sample with bigger timestamp or value on deduplicator (#5939)
Apply the same deduplication logic as in https://docs.victoriametrics.com/#deduplication
This would require more memory for deduplication, since we need to track timestamp
for each record. However, deduplication should become more consistent.

https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5643

---------

Co-authored-by: Roman Khavronenko <roman@victoriametrics.com>
2024-03-12 22:47:29 +01:00

79 lines
1.9 KiB
Go

package streamaggr
import (
"fmt"
"reflect"
"sync"
"testing"
)
func TestDedupAggrSerial(t *testing.T) {
da := newDedupAggr()
const seriesCount = 100_000
expectedSamplesMap := make(map[string]pushSample)
for i := 0; i < 2; i++ {
samples := make([]pushSample, seriesCount)
for j := range samples {
sample := &samples[j]
sample.key = fmt.Sprintf("key_%d", j)
sample.value = float64(i + j)
expectedSamplesMap[sample.key] = *sample
}
da.pushSamples(samples)
}
if n := da.sizeBytes(); n > 4_200_000 {
t.Fatalf("too big dedupAggr state before flush: %d bytes; it shouldn't exceed 4_200_000 bytes", n)
}
if n := da.itemsCount(); n != seriesCount {
t.Fatalf("unexpected itemsCount; got %d; want %d", n, seriesCount)
}
flushedSamplesMap := make(map[string]pushSample)
var mu sync.Mutex
flushSamples := func(samples []pushSample) {
mu.Lock()
for _, sample := range samples {
flushedSamplesMap[sample.key] = sample
}
mu.Unlock()
}
da.flush(flushSamples, true)
if !reflect.DeepEqual(expectedSamplesMap, flushedSamplesMap) {
t.Fatalf("unexpected samples;\ngot\n%v\nwant\n%v", flushedSamplesMap, expectedSamplesMap)
}
if n := da.sizeBytes(); n > 17_000 {
t.Fatalf("too big dedupAggr state after flush; %d bytes; it shouldn't exceed 17_000 bytes", n)
}
if n := da.itemsCount(); n != 0 {
t.Fatalf("unexpected non-zero itemsCount after flush; got %d", n)
}
}
func TestDedupAggrConcurrent(_ *testing.T) {
const concurrency = 5
const seriesCount = 10_000
da := newDedupAggr()
var wg sync.WaitGroup
for i := 0; i < concurrency; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for i := 0; i < 10; i++ {
samples := make([]pushSample, seriesCount)
for j := range samples {
sample := &samples[j]
sample.key = fmt.Sprintf("key_%d", j)
sample.value = float64(i + j)
}
da.pushSamples(samples)
}
}()
}
wg.Wait()
}