mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-26 20:30:10 +01:00
5034aa0773
- Add Try* prefix to functions, which return bool result in order to improve readability and reduce the probability of missing check for the result returned from these functions. - Call the adjustSampleValues() only once on input samples. Previously it was called on every attempt to flush data to peristent queue. - Properly restore the initial state of WriteRequest passed to tryPushWriteRequest() before returning from this function after unsuccessful push to persistent queue. Previously a part of WriteRequest samples may be lost in such case. - Add -remoteWrite.dropSamplesOnOverload command-line flag, which can be used for dropping incoming samples instead of returning 429 Too Many Requests error to the client when -remoteWrite.disableOnDiskQueue is set and the remote storage cannot keep up with the data ingestion rate. - Add vmagent_remotewrite_samples_dropped_total metric, which counts the number of dropped samples. - Add vmagent_remotewrite_push_failures_total metric, which counts the number of unsuccessful attempts to push data to persistent queue when -remoteWrite.disableOnDiskQueue is set. - Remove vmagent_remotewrite_aggregation_metrics_dropped_total and vm_promscrape_push_samples_dropped_total metrics, because they are replaced with vmagent_remotewrite_samples_dropped_total metric. - Update 'Disabling on-disk persistence' docs at docs/vmagent.md - Update stale comments in the code Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5088 Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2110
70 lines
1.9 KiB
Go
70 lines
1.9 KiB
Go
package persistentqueue
|
|
|
|
import (
|
|
"fmt"
|
|
"testing"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
|
)
|
|
|
|
func BenchmarkFastQueueThroughputSerial(b *testing.B) {
|
|
const iterationsCount = 10
|
|
for _, blockSize := range []int{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6} {
|
|
block := make([]byte, blockSize)
|
|
b.Run(fmt.Sprintf("block-size-%d", blockSize), func(b *testing.B) {
|
|
b.ReportAllocs()
|
|
b.SetBytes(int64(blockSize) * iterationsCount)
|
|
path := fmt.Sprintf("bench-fast-queue-throughput-serial-%d", blockSize)
|
|
mustDeleteDir(path)
|
|
fq := MustOpenFastQueue(path, "foobar", iterationsCount*2, 0, false)
|
|
defer func() {
|
|
fq.MustClose()
|
|
mustDeleteDir(path)
|
|
}()
|
|
for i := 0; i < b.N; i++ {
|
|
writeReadIterationFastQueue(fq, block, iterationsCount)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func BenchmarkFastQueueThroughputConcurrent(b *testing.B) {
|
|
const iterationsCount = 10
|
|
for _, blockSize := range []int{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6} {
|
|
block := make([]byte, blockSize)
|
|
b.Run(fmt.Sprintf("block-size-%d", blockSize), func(b *testing.B) {
|
|
b.ReportAllocs()
|
|
b.SetBytes(int64(blockSize) * iterationsCount)
|
|
path := fmt.Sprintf("bench-fast-queue-throughput-concurrent-%d", blockSize)
|
|
mustDeleteDir(path)
|
|
fq := MustOpenFastQueue(path, "foobar", iterationsCount*cgroup.AvailableCPUs()*2, 0, false)
|
|
defer func() {
|
|
fq.MustClose()
|
|
mustDeleteDir(path)
|
|
}()
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
for pb.Next() {
|
|
writeReadIterationFastQueue(fq, block, iterationsCount)
|
|
}
|
|
})
|
|
})
|
|
}
|
|
}
|
|
|
|
func writeReadIterationFastQueue(fq *FastQueue, block []byte, iterationsCount int) {
|
|
for i := 0; i < iterationsCount; i++ {
|
|
if !fq.TryWriteBlock(block) {
|
|
panic(fmt.Errorf("TryWriteBlock must return true"))
|
|
}
|
|
}
|
|
var ok bool
|
|
bb := bbPool.Get()
|
|
for i := 0; i < iterationsCount; i++ {
|
|
bb.B, ok = fq.MustReadBlock(bb.B[:0])
|
|
if !ok {
|
|
panic(fmt.Errorf("unexpected ok=false"))
|
|
}
|
|
}
|
|
bbPool.Put(bb)
|
|
}
|