mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-20 23:46:23 +01:00
0d5d46f9db
- Reduce memory usage by up to 5x when de-duplicating samples across big number of time series. - Reduce memory usage by up to 5x when aggregating across big number of output time series. - Add lib/promutils.LabelsCompressor, which is going to be used by other VictoriaMetrics components for reducing memory usage for marshaled []prompbmarshal.Label. - Add `dedup_interval` option at aggregation config, which allows setting individual deduplication intervals per each aggregation. - Add `keep_metric_names` option at aggregation config, which allows keeping the original metric names in the output samples. - Add `unique_samples` output, which counts the number of unique sample values. - Add `increase_prometheus` and `total_prometheus` outputs, which ignore the first sample per each newly encountered time series. - Use 64-bit hashes instead of marshaled labels as map keys when calculating `count_series` output. This makes obsolete https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5579 - Expose various metrics, which may help debugging stream aggregation: - vm_streamaggr_dedup_state_size_bytes - the size of data structures responsible for deduplication - vm_streamaggr_dedup_state_items_count - the number of items in the deduplication data structures - vm_streamaggr_labels_compressor_size_bytes - the size of labels compressor data structures - vm_streamaggr_labels_compressor_items_count - the number of entries in the labels compressor - vm_streamaggr_flush_duration_seconds - a histogram, which shows the duration of stream aggregation flushes - vm_streamaggr_dedup_flush_duration_seconds - a histogram, which shows the duration of deduplication flushes - vm_streamaggr_flush_timeouts_total - counter for timed out stream aggregation flushes, which took longer than the configured interval - vm_streamaggr_dedup_flush_timeouts_total - counter for timed out deduplication flushes, which took longer than the configured dedup_interval - Actualize docs/stream-aggregation.md The memory usage reduction increases CPU usage during stream aggregation by up to 30%. This commit is based on https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5850 Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5898
55 lines
1.0 KiB
Go
55 lines
1.0 KiB
Go
package promutils
|
|
|
|
import (
|
|
"sync/atomic"
|
|
"testing"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
|
)
|
|
|
|
func BenchmarkLabelsCompressorCompress(b *testing.B) {
|
|
var lc LabelsCompressor
|
|
series := newTestSeries(100, 10)
|
|
|
|
b.ReportAllocs()
|
|
b.SetBytes(int64(len(series)))
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
var dst []byte
|
|
for pb.Next() {
|
|
dst = dst[:0]
|
|
for _, labels := range series {
|
|
dst = lc.Compress(dst, labels)
|
|
}
|
|
Sink.Add(uint64(len(dst)))
|
|
}
|
|
})
|
|
}
|
|
|
|
func BenchmarkLabelsCompressorDecompress(b *testing.B) {
|
|
var lc LabelsCompressor
|
|
series := newTestSeries(100, 10)
|
|
datas := make([][]byte, len(series))
|
|
var dst []byte
|
|
for i, labels := range series {
|
|
dstLen := len(dst)
|
|
dst = lc.Compress(dst, labels)
|
|
datas[i] = dst[dstLen:]
|
|
}
|
|
|
|
b.ReportAllocs()
|
|
b.SetBytes(int64(len(series)))
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
var labels []prompbmarshal.Label
|
|
for pb.Next() {
|
|
for _, data := range datas {
|
|
labels = lc.Decompress(labels[:0], data)
|
|
}
|
|
Sink.Add(uint64(len(labels)))
|
|
}
|
|
})
|
|
}
|
|
|
|
var Sink atomic.Uint64
|