2020-02-23 12:35:47 +01:00
package remotewrite
import (
"flag"
"sync"
2020-09-03 11:10:47 +02:00
"sync/atomic"
2020-02-23 12:35:47 +01:00
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
2024-01-30 11:18:17 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
2021-02-01 13:27:05 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
2023-02-21 03:38:49 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding/zstd"
2020-05-14 21:01:51 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
2020-08-16 16:05:52 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
2022-03-18 18:06:18 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
2020-02-23 12:35:47 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
2020-11-07 15:16:56 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
2024-01-22 17:12:37 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timeutil"
2020-02-23 12:35:47 +01:00
"github.com/VictoriaMetrics/metrics"
"github.com/golang/snappy"
)
2020-02-25 18:57:47 +01:00
var (
flushInterval = flag . Duration ( "remoteWrite.flushInterval" , time . Second , "Interval for flushing the data to remote storage. " +
2021-03-01 10:50:39 +01:00
"This option takes effect only when less than 10K data points per second are pushed to -remoteWrite.url" )
2021-11-04 14:39:14 +01:00
maxUnpackedBlockSize = flagutil . NewBytes ( "remoteWrite.maxBlockSize" , 8 * 1024 * 1024 , "The maximum block size to send to remote storage. Bigger blocks may improve performance at the cost of the increased memory usage. See also -remoteWrite.maxRowsPerBlock" )
maxRowsPerBlock = flag . Int ( "remoteWrite.maxRowsPerBlock" , 10000 , "The maximum number of samples to send in each block to remote storage. Higher number may improve performance at the cost of the increased memory usage. See also -remoteWrite.maxBlockSize" )
2023-02-27 20:03:49 +01:00
vmProtoCompressLevel = flag . Int ( "remoteWrite.vmProtoCompressLevel" , 0 , "The compression level for VictoriaMetrics remote write protocol. " +
"Higher values reduce network traffic at the cost of higher CPU usage. Negative values reduce CPU usage at the cost of increased network traffic. " +
2024-04-18 01:31:37 +02:00
"See https://docs.victoriametrics.com/vmagent/#victoriametrics-remote-write-protocol" )
2020-02-25 18:57:47 +01:00
)
2020-02-23 12:35:47 +01:00
type pendingSeries struct {
mu sync . Mutex
wr writeRequest
stopCh chan struct { }
periodicFlusherWG sync . WaitGroup
}
2023-11-24 13:42:11 +01:00
func newPendingSeries ( fq * persistentqueue . FastQueue , isVMRemoteWrite bool , significantFigures , roundDigits int ) * pendingSeries {
2020-02-23 12:35:47 +01:00
var ps pendingSeries
2023-11-24 13:42:11 +01:00
ps . wr . fq = fq
2023-02-21 03:38:49 +01:00
ps . wr . isVMRemoteWrite = isVMRemoteWrite
2021-02-01 13:27:05 +01:00
ps . wr . significantFigures = significantFigures
ps . wr . roundDigits = roundDigits
2020-02-23 12:35:47 +01:00
ps . stopCh = make ( chan struct { } )
ps . periodicFlusherWG . Add ( 1 )
go func ( ) {
defer ps . periodicFlusherWG . Done ( )
ps . periodicFlusher ( )
} ( )
return & ps
}
func ( ps * pendingSeries ) MustStop ( ) {
close ( ps . stopCh )
ps . periodicFlusherWG . Wait ( )
}
2023-11-25 10:31:30 +01:00
func ( ps * pendingSeries ) TryPush ( tss [ ] prompbmarshal . TimeSeries ) bool {
2020-02-23 12:35:47 +01:00
ps . mu . Lock ( )
2023-11-25 10:31:30 +01:00
ok := ps . wr . tryPush ( tss )
2020-02-23 12:35:47 +01:00
ps . mu . Unlock ( )
2023-11-25 10:31:30 +01:00
return ok
2020-02-23 12:35:47 +01:00
}
func ( ps * pendingSeries ) periodicFlusher ( ) {
2020-05-14 21:01:51 +02:00
flushSeconds := int64 ( flushInterval . Seconds ( ) )
if flushSeconds <= 0 {
flushSeconds = 1
}
2024-01-22 17:12:37 +01:00
d := timeutil . AddJitterToDuration ( * flushInterval )
ticker := time . NewTicker ( d )
2020-02-23 12:35:47 +01:00
defer ticker . Stop ( )
2023-11-24 13:42:11 +01:00
for {
2020-02-23 12:35:47 +01:00
select {
case <- ps . stopCh :
2023-11-24 13:42:11 +01:00
ps . mu . Lock ( )
ps . wr . mustFlushOnStop ( )
ps . mu . Unlock ( )
return
2020-02-23 12:35:47 +01:00
case <- ticker . C :
2024-02-24 01:44:19 +01:00
if fasttime . UnixTimestamp ( ) - ps . wr . lastFlushTime . Load ( ) < uint64 ( flushSeconds ) {
2020-02-23 12:35:47 +01:00
continue
}
}
ps . mu . Lock ( )
2023-11-25 10:31:30 +01:00
_ = ps . wr . tryFlush ( )
2020-02-23 12:35:47 +01:00
ps . mu . Unlock ( )
}
}
type writeRequest struct {
2024-02-24 01:44:19 +01:00
lastFlushTime atomic . Uint64
2020-02-23 12:35:47 +01:00
2023-11-25 10:31:30 +01:00
// The queue to send blocks to.
2023-11-24 13:42:11 +01:00
fq * persistentqueue . FastQueue
2020-09-03 11:10:47 +02:00
2023-02-21 03:38:49 +01:00
// Whether to encode the write request with VictoriaMetrics remote write protocol.
isVMRemoteWrite bool
2023-11-25 10:31:30 +01:00
// How many significant figures must be left before sending the writeRequest to fq.
2021-02-01 13:27:05 +01:00
significantFigures int
2023-11-25 10:31:30 +01:00
// How many decimal digits after point must be left before sending the writeRequest to fq.
2021-02-01 13:27:05 +01:00
roundDigits int
wr prompbmarshal . WriteRequest
Revert "Exemplar support (#5982)"
This reverts commit 5a3abfa0414ab495cbc34a58146b540aa8289636.
Reason for revert: exemplars aren't in wide use because they have numerous issues which prevent their adoption (see below).
Adding support for examplars into VictoriaMetrics introduces non-trivial code changes. These code changes need to be supported forever
once the release of VictoriaMetrics with exemplar support is published. That's why I don't think this is a good feature despite
that the source code of the reverted commit has an excellent quality. See https://docs.victoriametrics.com/goals/ .
Issues with Prometheus exemplars:
- Prometheus still has only experimental support for exemplars after more than three years since they were introduced.
It stores exemplars in memory, so they are lost after Prometheus restart. This doesn't look like production-ready feature.
See https://github.com/prometheus/docs/blob/0a2f3b37940e2949eefe752ed7b6c768e0b00128/content/docs/instrumenting/exposition_formats.md?plain=1#L153-L159
and https://prometheus.io/docs/prometheus/latest/feature_flags/#exemplars-storage
- It is very non-trivial to expose exemplars alongside metrics in your application, since the official Prometheus SDKs
for metrics' exposition ( https://prometheus.io/docs/instrumenting/clientlibs/ ) either have very hard-to-use API
for exposing histograms or do not have this API at all. For example, try figuring out how to expose exemplars
via https://pkg.go.dev/github.com/prometheus/client_golang@v1.19.1/prometheus .
- It looks like exemplars are supported for Histogram metric types only -
see https://pkg.go.dev/github.com/prometheus/client_golang@v1.19.1/prometheus#Timer.ObserveDurationWithExemplar .
Exemplars aren't supported for Counter, Gauge and Summary metric types.
- Grafana has very poor support for Prometheus exemplars. It looks like it supports exemplars only when the query
contains histogram_quantile() function. It queries exemplars via special Prometheus API -
https://prometheus.io/docs/prometheus/latest/querying/api/#querying-exemplars - (which is still marked as experimental, btw.)
and then displays all the returned exemplars on the graph as special dots. The issue is that this doesn't work
in production in most cases when the histogram_quantile() is calculated over thousands of histogram buckets
exposed by big number of application instances. Every histogram bucket may expose an exemplar on every timestamp shown on the graph.
This makes the graph unusable, since it is litterally filled with thousands of exemplar dots.
Neither Prometheus API nor Grafana doesn't provide the ability to filter out unneeded exemplars.
- Exemplars are usually connected to traces. While traces are good for some
I doubt exemplars will become production-ready in the near future because of the issues outlined above.
Alternative to exemplars:
Exemplars are marketed as a silver bullet for the correlation between metrics, traces and logs -
just click the exemplar dot on some graph in Grafana and instantly see the corresponding trace or log entry!
This doesn't work as expected in production as shown above. Are there better solutions, which work in production?
Yes - just use time-based and label-based correlation between metrics, traces and logs. Assign the same `job`
and `instance` labels to metrics, logs and traces, so you can quickly find the needed trace or log entry
by these labes on the time range with the anomaly on metrics' graph.
2024-07-03 15:30:11 +02:00
tss [ ] prompbmarshal . TimeSeries
labels [ ] prompbmarshal . Label
samples [ ] prompbmarshal . Sample
2024-01-30 11:18:17 +01:00
// buf holds labels data
buf [ ] byte
2020-02-23 12:35:47 +01:00
}
func ( wr * writeRequest ) reset ( ) {
2023-11-25 10:31:30 +01:00
// Do not reset lastFlushTime, fq, isVMRemoteWrite, significantFigures and roundDigits, since they are re-used.
2021-02-01 13:27:05 +01:00
2020-02-23 12:35:47 +01:00
wr . wr . Timeseries = nil
2024-04-20 21:00:00 +02:00
clear ( wr . tss )
2020-02-23 12:35:47 +01:00
wr . tss = wr . tss [ : 0 ]
2020-11-07 15:16:56 +01:00
promrelabel . CleanLabels ( wr . labels )
2020-02-23 12:35:47 +01:00
wr . labels = wr . labels [ : 0 ]
wr . samples = wr . samples [ : 0 ]
wr . buf = wr . buf [ : 0 ]
}
2023-11-25 10:31:30 +01:00
// mustFlushOnStop force pushes wr data into wr.fq
//
// This is needed in order to properly save in-memory data to persistent queue on graceful shutdown.
2023-11-24 13:42:11 +01:00
func ( wr * writeRequest ) mustFlushOnStop ( ) {
2020-02-23 12:35:47 +01:00
wr . wr . Timeseries = wr . tss
2023-11-25 10:31:30 +01:00
if ! tryPushWriteRequest ( & wr . wr , wr . mustWriteBlock , wr . isVMRemoteWrite ) {
logger . Panicf ( "BUG: final flush must always return true" )
2023-11-24 13:42:11 +01:00
}
2020-02-23 12:35:47 +01:00
wr . reset ( )
}
2023-11-25 10:31:30 +01:00
func ( wr * writeRequest ) mustWriteBlock ( block [ ] byte ) bool {
wr . fq . MustWriteBlockIgnoreDisabledPQ ( block )
return true
}
func ( wr * writeRequest ) tryFlush ( ) bool {
2023-11-24 13:42:11 +01:00
wr . wr . Timeseries = wr . tss
2024-02-24 01:44:19 +01:00
wr . lastFlushTime . Store ( fasttime . UnixTimestamp ( ) )
2023-11-25 10:31:30 +01:00
if ! tryPushWriteRequest ( & wr . wr , wr . fq . TryWriteBlock , wr . isVMRemoteWrite ) {
2023-11-24 13:42:11 +01:00
return false
}
wr . reset ( )
return true
}
2023-11-25 10:31:30 +01:00
func adjustSampleValues ( samples [ ] prompbmarshal . Sample , significantFigures , roundDigits int ) {
if n := significantFigures ; n > 0 {
2021-02-01 13:27:05 +01:00
for i := range samples {
s := & samples [ i ]
s . Value = decimal . RoundToSignificantFigures ( s . Value , n )
}
}
2023-11-25 10:31:30 +01:00
if n := roundDigits ; n < 100 {
2021-02-01 13:27:05 +01:00
for i := range samples {
s := & samples [ i ]
s . Value = decimal . RoundToDecimalDigits ( s . Value , n )
}
}
}
2023-11-25 10:31:30 +01:00
func ( wr * writeRequest ) tryPush ( src [ ] prompbmarshal . TimeSeries ) bool {
2020-02-23 12:35:47 +01:00
tssDst := wr . tss
2021-11-04 14:39:14 +01:00
maxSamplesPerBlock := * maxRowsPerBlock
// Allow up to 10x of labels per each block on average.
maxLabelsPerBlock := 10 * maxSamplesPerBlock
2020-02-23 12:35:47 +01:00
for i := range src {
2021-11-04 14:39:14 +01:00
if len ( wr . samples ) >= maxSamplesPerBlock || len ( wr . labels ) >= maxLabelsPerBlock {
2020-09-03 11:08:14 +02:00
wr . tss = tssDst
2023-11-25 10:31:30 +01:00
if ! wr . tryFlush ( ) {
2023-11-24 13:42:11 +01:00
return false
}
2020-02-23 12:35:47 +01:00
tssDst = wr . tss
}
2023-11-25 10:31:30 +01:00
tsSrc := & src [ i ]
adjustSampleValues ( tsSrc . Samples , wr . significantFigures , wr . roundDigits )
2023-11-24 13:42:11 +01:00
tssDst = append ( tssDst , prompbmarshal . TimeSeries { } )
2023-11-25 10:31:30 +01:00
wr . copyTimeSeries ( & tssDst [ len ( tssDst ) - 1 ] , tsSrc )
2020-02-23 12:35:47 +01:00
}
2023-11-24 13:42:11 +01:00
2020-02-23 12:35:47 +01:00
wr . tss = tssDst
2023-11-24 13:42:11 +01:00
return true
2020-02-23 12:35:47 +01:00
}
func ( wr * writeRequest ) copyTimeSeries ( dst , src * prompbmarshal . TimeSeries ) {
labelsDst := wr . labels
labelsLen := len ( wr . labels )
samplesDst := wr . samples
buf := wr . buf
for i := range src . Labels {
labelsDst = append ( labelsDst , prompbmarshal . Label { } )
dstLabel := & labelsDst [ len ( labelsDst ) - 1 ]
srcLabel := & src . Labels [ i ]
buf = append ( buf , srcLabel . Name ... )
dstLabel . Name = bytesutil . ToUnsafeString ( buf [ len ( buf ) - len ( srcLabel . Name ) : ] )
buf = append ( buf , srcLabel . Value ... )
dstLabel . Value = bytesutil . ToUnsafeString ( buf [ len ( buf ) - len ( srcLabel . Value ) : ] )
}
dst . Labels = labelsDst [ labelsLen : ]
2020-05-15 16:35:59 +02:00
samplesDst = append ( samplesDst , src . Samples ... )
dst . Samples = samplesDst [ len ( samplesDst ) - len ( src . Samples ) : ]
2020-02-23 12:35:47 +01:00
wr . samples = samplesDst
wr . labels = labelsDst
wr . buf = buf
}
2024-01-30 11:18:17 +01:00
// marshalConcurrency limits the maximum number of concurrent workers, which marshal and compress WriteRequest.
var marshalConcurrencyCh = make ( chan struct { } , cgroup . AvailableCPUs ( ) )
2023-11-25 10:31:30 +01:00
func tryPushWriteRequest ( wr * prompbmarshal . WriteRequest , tryPushBlock func ( block [ ] byte ) bool , isVMRemoteWrite bool ) bool {
2020-02-23 12:35:47 +01:00
if len ( wr . Timeseries ) == 0 {
// Nothing to push
2023-11-24 13:42:11 +01:00
return true
2020-02-23 12:35:47 +01:00
}
Revert "Exemplar support (#5982)"
This reverts commit 5a3abfa0414ab495cbc34a58146b540aa8289636.
Reason for revert: exemplars aren't in wide use because they have numerous issues which prevent their adoption (see below).
Adding support for examplars into VictoriaMetrics introduces non-trivial code changes. These code changes need to be supported forever
once the release of VictoriaMetrics with exemplar support is published. That's why I don't think this is a good feature despite
that the source code of the reverted commit has an excellent quality. See https://docs.victoriametrics.com/goals/ .
Issues with Prometheus exemplars:
- Prometheus still has only experimental support for exemplars after more than three years since they were introduced.
It stores exemplars in memory, so they are lost after Prometheus restart. This doesn't look like production-ready feature.
See https://github.com/prometheus/docs/blob/0a2f3b37940e2949eefe752ed7b6c768e0b00128/content/docs/instrumenting/exposition_formats.md?plain=1#L153-L159
and https://prometheus.io/docs/prometheus/latest/feature_flags/#exemplars-storage
- It is very non-trivial to expose exemplars alongside metrics in your application, since the official Prometheus SDKs
for metrics' exposition ( https://prometheus.io/docs/instrumenting/clientlibs/ ) either have very hard-to-use API
for exposing histograms or do not have this API at all. For example, try figuring out how to expose exemplars
via https://pkg.go.dev/github.com/prometheus/client_golang@v1.19.1/prometheus .
- It looks like exemplars are supported for Histogram metric types only -
see https://pkg.go.dev/github.com/prometheus/client_golang@v1.19.1/prometheus#Timer.ObserveDurationWithExemplar .
Exemplars aren't supported for Counter, Gauge and Summary metric types.
- Grafana has very poor support for Prometheus exemplars. It looks like it supports exemplars only when the query
contains histogram_quantile() function. It queries exemplars via special Prometheus API -
https://prometheus.io/docs/prometheus/latest/querying/api/#querying-exemplars - (which is still marked as experimental, btw.)
and then displays all the returned exemplars on the graph as special dots. The issue is that this doesn't work
in production in most cases when the histogram_quantile() is calculated over thousands of histogram buckets
exposed by big number of application instances. Every histogram bucket may expose an exemplar on every timestamp shown on the graph.
This makes the graph unusable, since it is litterally filled with thousands of exemplar dots.
Neither Prometheus API nor Grafana doesn't provide the ability to filter out unneeded exemplars.
- Exemplars are usually connected to traces. While traces are good for some
I doubt exemplars will become production-ready in the near future because of the issues outlined above.
Alternative to exemplars:
Exemplars are marketed as a silver bullet for the correlation between metrics, traces and logs -
just click the exemplar dot on some graph in Grafana and instantly see the corresponding trace or log entry!
This doesn't work as expected in production as shown above. Are there better solutions, which work in production?
Yes - just use time-based and label-based correlation between metrics, traces and logs. Assign the same `job`
and `instance` labels to metrics, logs and traces, so you can quickly find the needed trace or log entry
by these labes on the time range with the anomaly on metrics' graph.
2024-07-03 15:30:11 +02:00
2024-01-30 11:18:17 +01:00
marshalConcurrencyCh <- struct { } { }
2020-02-23 12:35:47 +01:00
bb := writeRequestBufPool . Get ( )
2024-01-14 22:04:45 +01:00
bb . B = wr . MarshalProtobuf ( bb . B [ : 0 ] )
2022-12-15 04:26:24 +01:00
if len ( bb . B ) <= maxUnpackedBlockSize . IntN ( ) {
2024-01-30 11:18:17 +01:00
zb := compressBufPool . Get ( )
2023-02-21 03:38:49 +01:00
if isVMRemoteWrite {
2023-02-27 20:03:49 +01:00
zb . B = zstd . CompressLevel ( zb . B [ : 0 ] , bb . B , * vmProtoCompressLevel )
2023-02-21 03:38:49 +01:00
} else {
zb . B = snappy . Encode ( zb . B [ : cap ( zb . B ) ] , bb . B )
}
2020-02-25 18:34:35 +01:00
writeRequestBufPool . Put ( bb )
2024-01-30 11:18:17 +01:00
<- marshalConcurrencyCh
2020-02-25 18:57:47 +01:00
if len ( zb . B ) <= persistentqueue . MaxBlockSize {
2024-01-30 11:18:17 +01:00
zbLen := len ( zb . B )
ok := tryPushBlock ( zb . B )
compressBufPool . Put ( zb )
if ok {
blockSizeRows . Update ( float64 ( len ( wr . Timeseries ) ) )
blockSizeBytes . Update ( float64 ( zbLen ) )
2023-11-24 13:42:11 +01:00
}
2024-01-30 11:18:17 +01:00
return ok
2020-02-25 18:57:47 +01:00
}
2024-01-30 11:18:17 +01:00
compressBufPool . Put ( zb )
2020-02-25 18:57:47 +01:00
} else {
writeRequestBufPool . Put ( bb )
2024-01-30 11:18:17 +01:00
<- marshalConcurrencyCh
2020-02-23 12:35:47 +01:00
}
2022-03-18 18:06:18 +01:00
// Too big block. Recursively split it into smaller parts if possible.
if len ( wr . Timeseries ) == 1 {
// A single time series left. Recursively split its samples into smaller parts if possible.
samples := wr . Timeseries [ 0 ] . Samples
if len ( samples ) == 1 {
logger . Warnf ( "dropping a sample for metric with too long labels exceeding -remoteWrite.maxBlockSize=%d bytes" , maxUnpackedBlockSize . N )
2023-11-24 13:42:11 +01:00
return true
2022-03-18 18:06:18 +01:00
}
n := len ( samples ) / 2
wr . Timeseries [ 0 ] . Samples = samples [ : n ]
2023-11-25 10:31:30 +01:00
if ! tryPushWriteRequest ( wr , tryPushBlock , isVMRemoteWrite ) {
wr . Timeseries [ 0 ] . Samples = samples
2023-11-24 13:42:11 +01:00
return false
}
2022-03-18 18:06:18 +01:00
wr . Timeseries [ 0 ] . Samples = samples [ n : ]
2023-11-25 10:31:30 +01:00
if ! tryPushWriteRequest ( wr , tryPushBlock , isVMRemoteWrite ) {
wr . Timeseries [ 0 ] . Samples = samples
2023-11-24 13:42:11 +01:00
return false
}
2022-03-18 18:06:18 +01:00
wr . Timeseries [ 0 ] . Samples = samples
2023-11-24 13:42:11 +01:00
return true
2022-03-18 18:06:18 +01:00
}
2020-02-23 12:35:47 +01:00
timeseries := wr . Timeseries
n := len ( timeseries ) / 2
wr . Timeseries = timeseries [ : n ]
2023-11-25 10:31:30 +01:00
if ! tryPushWriteRequest ( wr , tryPushBlock , isVMRemoteWrite ) {
wr . Timeseries = timeseries
2023-11-24 13:42:11 +01:00
return false
}
2020-02-23 12:35:47 +01:00
wr . Timeseries = timeseries [ n : ]
2023-11-25 10:31:30 +01:00
if ! tryPushWriteRequest ( wr , tryPushBlock , isVMRemoteWrite ) {
wr . Timeseries = timeseries
2023-11-24 13:42:11 +01:00
return false
}
2020-02-23 12:35:47 +01:00
wr . Timeseries = timeseries
2023-11-24 13:42:11 +01:00
return true
2020-02-23 12:35:47 +01:00
}
var (
blockSizeBytes = metrics . NewHistogram ( ` vmagent_remotewrite_block_size_bytes ` )
blockSizeRows = metrics . NewHistogram ( ` vmagent_remotewrite_block_size_rows ` )
)
2024-01-30 11:18:17 +01:00
var (
writeRequestBufPool bytesutil . ByteBufferPool
compressBufPool bytesutil . ByteBufferPool
)