2020-02-23 12:35:47 +01:00
package promremotewrite
2019-05-22 23:16:55 +02:00
import (
2020-09-28 01:06:27 +02:00
"bufio"
2019-05-22 23:16:55 +02:00
"fmt"
2020-01-28 21:53:50 +01:00
"io"
2019-05-22 23:16:55 +02:00
"net/http"
"runtime"
"sync"
2020-01-28 21:53:50 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
2020-08-16 16:05:52 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
2019-05-22 23:16:55 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/metrics"
2020-01-28 21:53:50 +01:00
"github.com/golang/snappy"
2019-05-22 23:16:55 +02:00
)
2020-08-16 16:05:52 +02:00
var maxInsertRequestSize = flagutil . NewBytes ( "maxInsertRequestSize" , 32 * 1024 * 1024 , "The maximum size in bytes of a single Prometheus remote_write API request" )
2020-01-28 21:53:50 +01:00
2020-02-23 12:35:47 +01:00
// ParseStream parses Prometheus remote_write message req and calls callback for the parsed timeseries.
//
2020-09-28 03:11:55 +02:00
// callback shouldn't hold tss after returning.
func ParseStream ( req * http . Request , callback func ( tss [ ] prompb . TimeSeries ) error ) error {
2020-09-28 01:06:27 +02:00
ctx := getPushCtx ( req . Body )
2019-05-22 23:16:55 +02:00
defer putPushCtx ( ctx )
2020-09-28 01:06:27 +02:00
if err := ctx . Read ( ) ; err != nil {
2019-05-22 23:16:55 +02:00
return err
}
2020-11-13 11:16:08 +01:00
// Synchronously process the request in order to properly return errors to ParseStream caller,
// so it could properly return HTTP 503 status code in response.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/896
bb := bodyBufferPool . Get ( )
defer bodyBufferPool . Put ( bb )
var err error
bb . B , err = snappy . Decode ( bb . B [ : cap ( bb . B ) ] , ctx . reqBuf . B )
if err != nil {
return fmt . Errorf ( "cannot decompress request with length %d: %w" , len ( ctx . reqBuf . B ) , err )
}
if len ( bb . B ) > maxInsertRequestSize . N {
return fmt . Errorf ( "too big unpacked request; mustn't exceed `-maxInsertRequestSize=%d` bytes; got %d bytes" , maxInsertRequestSize . N , len ( bb . B ) )
}
wr := getWriteRequest ( )
defer putWriteRequest ( wr )
if err := wr . Unmarshal ( bb . B ) ; err != nil {
unmarshalErrors . Inc ( )
return fmt . Errorf ( "cannot unmarshal prompb.WriteRequest with size %d bytes: %w" , len ( bb . B ) , err )
}
rows := 0
tss := wr . Timeseries
for i := range tss {
rows += len ( tss [ i ] . Samples )
}
rowsRead . Add ( rows )
if err := callback ( tss ) ; err != nil {
return fmt . Errorf ( "error when processing imported data: %w" , err )
2020-11-13 09:58:33 +01:00
}
2020-11-13 11:16:08 +01:00
return nil
2019-05-22 23:16:55 +02:00
}
2020-11-13 11:16:08 +01:00
var bodyBufferPool bytesutil . ByteBufferPool
2019-05-22 23:16:55 +02:00
type pushCtx struct {
2020-09-28 01:06:27 +02:00
br * bufio . Reader
2020-09-28 03:11:55 +02:00
reqBuf bytesutil . ByteBuffer
2019-05-22 23:16:55 +02:00
}
func ( ctx * pushCtx ) reset ( ) {
2020-09-28 01:06:27 +02:00
ctx . br . Reset ( nil )
2020-09-28 03:11:55 +02:00
ctx . reqBuf . Reset ( )
2019-05-22 23:16:55 +02:00
}
2020-09-28 01:06:27 +02:00
func ( ctx * pushCtx ) Read ( ) error {
2020-02-23 12:35:47 +01:00
readCalls . Inc ( )
2020-09-28 03:11:55 +02:00
lr := io . LimitReader ( ctx . br , int64 ( maxInsertRequestSize . N ) + 1 )
reqLen , err := ctx . reqBuf . ReadFrom ( lr )
2019-05-22 23:16:55 +02:00
if err != nil {
2020-02-23 12:35:47 +01:00
readErrors . Inc ( )
2020-09-28 03:11:55 +02:00
return fmt . Errorf ( "cannot read compressed request: %w" , err )
2019-05-22 23:16:55 +02:00
}
2020-09-28 03:11:55 +02:00
if reqLen > int64 ( maxInsertRequestSize . N ) {
readErrors . Inc ( )
return fmt . Errorf ( "too big packed request; mustn't exceed `-maxInsertRequestSize=%d` bytes" , maxInsertRequestSize . N )
2020-02-23 12:35:47 +01:00
}
2019-05-22 23:16:55 +02:00
return nil
}
var (
2020-02-28 19:19:35 +01:00
readCalls = metrics . NewCounter ( ` vm_protoparser_read_calls_total { type="promremotewrite"} ` )
readErrors = metrics . NewCounter ( ` vm_protoparser_read_errors_total { type="promremotewrite"} ` )
rowsRead = metrics . NewCounter ( ` vm_protoparser_rows_read_total { type="promremotewrite"} ` )
2020-07-08 13:18:41 +02:00
unmarshalErrors = metrics . NewCounter ( ` vm_protoparser_unmarshal_errors_total { type="promremotewrite"} ` )
2019-05-22 23:16:55 +02:00
)
2020-09-28 01:06:27 +02:00
func getPushCtx ( r io . Reader ) * pushCtx {
2019-05-22 23:16:55 +02:00
select {
case ctx := <- pushCtxPoolCh :
2020-09-28 01:06:27 +02:00
ctx . br . Reset ( r )
2019-05-22 23:16:55 +02:00
return ctx
default :
if v := pushCtxPool . Get ( ) ; v != nil {
2020-09-28 01:06:27 +02:00
ctx := v . ( * pushCtx )
ctx . br . Reset ( r )
return ctx
}
return & pushCtx {
br : bufio . NewReaderSize ( r , 64 * 1024 ) ,
2019-05-22 23:16:55 +02:00
}
}
}
func putPushCtx ( ctx * pushCtx ) {
ctx . reset ( )
select {
case pushCtxPoolCh <- ctx :
default :
pushCtxPool . Put ( ctx )
}
}
var pushCtxPool sync . Pool
var pushCtxPoolCh = make ( chan * pushCtx , runtime . GOMAXPROCS ( - 1 ) )
2020-01-28 21:53:50 +01:00
2020-11-13 11:16:08 +01:00
func getWriteRequest ( ) * prompb . WriteRequest {
v := writeRequestPool . Get ( )
2020-09-28 03:11:55 +02:00
if v == nil {
2020-11-13 11:16:08 +01:00
return & prompb . WriteRequest { }
2020-09-28 03:11:55 +02:00
}
2020-11-13 11:16:08 +01:00
return v . ( * prompb . WriteRequest )
2020-09-28 03:11:55 +02:00
}
2020-11-13 11:16:08 +01:00
func putWriteRequest ( wr * prompb . WriteRequest ) {
wr . Reset ( )
writeRequestPool . Put ( wr )
2020-09-28 03:11:55 +02:00
}
2020-11-13 11:16:08 +01:00
var writeRequestPool sync . Pool