VictoriaMetrics/app/vlinsert/loki/loki_json_timing_test.go
Aliaksandr Valialkin 7b33a27874
lib/logstorage: follow-up for 8a23d08c21
- Compare the actual free disk space to the value provided via -storage.minFreeDiskSpaceBytes
  directly inside the Storage.IsReadOnly(). This should work fast in most cases.
  This simplifies the logic at lib/storage.

- Do not take into account -storage.minFreeDiskSpaceBytes during background merges, since
  it results in uncontrolled growth of small parts when the free disk space approaches -storage.minFreeDiskSpaceBytes.
  The background merge logic uses another mechanism for determining whether there is enough
  disk space for the merge - it reserves the needed disk space before the merge
  and releases it after the merge. This prevents from out of disk space errors during background merge.

- Properly handle corner cases for flushing in-memory data to disk when the storage
  enters read-only mode. This is better than losing the in-memory data.

- Return back Storage.MustAddRows() instead of Storage.AddRows(),
  since the only case when AddRows() can return error is when the storage is in read-only mode.
  This case must be handled by the caller by calling Storage.IsReadOnly()
  before adding rows to the storage.
  This simplifies the code a bit, since the caller of Storage.MustAddRows() shouldn't handle
  errors returned by Storage.AddRows().

- Properly store parsed logs to Storage if parts of the request contain invalid log lines.
  Previously the parsed logs could be lost in this case.

Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4737
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4945
2023-10-02 16:52:23 +02:00

79 lines
1.8 KiB
Go

package loki
import (
"fmt"
"strconv"
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
)
func BenchmarkParseJSONRequest(b *testing.B) {
for _, streams := range []int{5, 10} {
for _, rows := range []int{100, 1000} {
for _, labels := range []int{10, 50} {
b.Run(fmt.Sprintf("streams_%d/rows_%d/labels_%d", streams, rows, labels), func(b *testing.B) {
benchmarkParseJSONRequest(b, streams, rows, labels)
})
}
}
}
}
func benchmarkParseJSONRequest(b *testing.B, streams, rows, labels int) {
b.ReportAllocs()
b.SetBytes(int64(streams * rows))
b.RunParallel(func(pb *testing.PB) {
data := getJSONBody(streams, rows, labels)
for pb.Next() {
_, err := parseJSONRequest(data, func(timestamp int64, fields []logstorage.Field) {})
if err != nil {
panic(fmt.Errorf("unexpected error: %s", err))
}
}
})
}
func getJSONBody(streams, rows, labels int) []byte {
body := append([]byte{}, `{"streams":[`...)
now := time.Now().UnixNano()
valuePrefix := fmt.Sprintf(`["%d","value_`, now)
for i := 0; i < streams; i++ {
body = append(body, `{"stream":{`...)
for j := 0; j < labels; j++ {
body = append(body, `"label_`...)
body = strconv.AppendInt(body, int64(j), 10)
body = append(body, `":"value_`...)
body = strconv.AppendInt(body, int64(j), 10)
body = append(body, '"')
if j < labels-1 {
body = append(body, ',')
}
}
body = append(body, `}, "values":[`...)
for j := 0; j < rows; j++ {
body = append(body, valuePrefix...)
body = strconv.AppendInt(body, int64(j), 10)
body = append(body, `"]`...)
if j < rows-1 {
body = append(body, ',')
}
}
body = append(body, `]}`...)
if i < streams-1 {
body = append(body, ',')
}
}
body = append(body, `]}`...)
return body
}