lib/protoparser/promremotewrite: synchronously process Prometheus remote_write requests

There is no reason in processing these requests asynchronously in the face of https://github.com/VictoriaMetrics/VictoriaMetrics/issues/896
Synchronous processing code is easier to read and understand than the previous async code
This commit is contained in:
Aliaksandr Valialkin 2020-11-13 12:16:08 +02:00
parent 739b88c1e4
commit 9dfe00c962

View File

@ -10,9 +10,7 @@ import (
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
"github.com/VictoriaMetrics/metrics" "github.com/VictoriaMetrics/metrics"
"github.com/golang/snappy" "github.com/golang/snappy"
) )
@ -21,9 +19,6 @@ var maxInsertRequestSize = flagutil.NewBytes("maxInsertRequestSize", 32*1024*102
// ParseStream parses Prometheus remote_write message req and calls callback for the parsed timeseries. // ParseStream parses Prometheus remote_write message req and calls callback for the parsed timeseries.
// //
// The callback can be called concurrently multiple times for streamed data from req.
// The callback can be called after ParseStream returns.
//
// callback shouldn't hold tss after returning. // callback shouldn't hold tss after returning.
func ParseStream(req *http.Request, callback func(tss []prompb.TimeSeries) error) error { func ParseStream(req *http.Request, callback func(tss []prompb.TimeSeries) error) error {
ctx := getPushCtx(req.Body) ctx := getPushCtx(req.Body)
@ -31,34 +26,50 @@ func ParseStream(req *http.Request, callback func(tss []prompb.TimeSeries) error
if err := ctx.Read(); err != nil { if err := ctx.Read(); err != nil {
return err return err
} }
uw := getUnmarshalWork()
ctx.wg.Add(1) // Synchronously process the request in order to properly return errors to ParseStream caller,
uw.callback = func(tss []prompb.TimeSeries) error { // so it could properly return HTTP 503 status code in response.
// Propagate the error to the caller of ParseStream, so it could properly return HTTP 503 status code on error.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/896 // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/896
ctx.err = callback(tss) bb := bodyBufferPool.Get()
ctx.wg.Done() defer bodyBufferPool.Put(bb)
// Do not return the error from callback in order to prevent from double logging. var err error
return nil bb.B, err = snappy.Decode(bb.B[:cap(bb.B)], ctx.reqBuf.B)
if err != nil {
return fmt.Errorf("cannot decompress request with length %d: %w", len(ctx.reqBuf.B), err)
} }
uw.reqBuf, ctx.reqBuf.B = ctx.reqBuf.B, uw.reqBuf if len(bb.B) > maxInsertRequestSize.N {
common.ScheduleUnmarshalWork(uw) return fmt.Errorf("too big unpacked request; mustn't exceed `-maxInsertRequestSize=%d` bytes; got %d bytes", maxInsertRequestSize.N, len(bb.B))
ctx.wg.Wait() }
return ctx.err wr := getWriteRequest()
defer putWriteRequest(wr)
if err := wr.Unmarshal(bb.B); err != nil {
unmarshalErrors.Inc()
return fmt.Errorf("cannot unmarshal prompb.WriteRequest with size %d bytes: %w", len(bb.B), err)
}
rows := 0
tss := wr.Timeseries
for i := range tss {
rows += len(tss[i].Samples)
}
rowsRead.Add(rows)
if err := callback(tss); err != nil {
return fmt.Errorf("error when processing imported data: %w", err)
}
return nil
} }
var bodyBufferPool bytesutil.ByteBufferPool
type pushCtx struct { type pushCtx struct {
br *bufio.Reader br *bufio.Reader
reqBuf bytesutil.ByteBuffer reqBuf bytesutil.ByteBuffer
wg sync.WaitGroup
err error
} }
func (ctx *pushCtx) reset() { func (ctx *pushCtx) reset() {
ctx.br.Reset(nil) ctx.br.Reset(nil)
ctx.reqBuf.Reset() ctx.reqBuf.Reset()
ctx.err = nil
} }
func (ctx *pushCtx) Read() error { func (ctx *pushCtx) Read() error {
@ -112,66 +123,17 @@ func putPushCtx(ctx *pushCtx) {
var pushCtxPool sync.Pool var pushCtxPool sync.Pool
var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1)) var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1))
type unmarshalWork struct { func getWriteRequest() *prompb.WriteRequest {
wr prompb.WriteRequest v := writeRequestPool.Get()
callback func(tss []prompb.TimeSeries) error
reqBuf []byte
}
func (uw *unmarshalWork) reset() {
uw.wr.Reset()
uw.callback = nil
uw.reqBuf = uw.reqBuf[:0]
}
// Unmarshal implements common.UnmarshalWork
func (uw *unmarshalWork) Unmarshal() {
bb := bodyBufferPool.Get()
defer bodyBufferPool.Put(bb)
var err error
bb.B, err = snappy.Decode(bb.B[:cap(bb.B)], uw.reqBuf)
if err != nil {
logger.Errorf("cannot decompress request with length %d: %s", len(uw.reqBuf), err)
return
}
if len(bb.B) > maxInsertRequestSize.N {
logger.Errorf("too big unpacked request; mustn't exceed `-maxInsertRequestSize=%d` bytes; got %d bytes", maxInsertRequestSize.N, len(bb.B))
return
}
if err := uw.wr.Unmarshal(bb.B); err != nil {
unmarshalErrors.Inc()
logger.Errorf("cannot unmarshal prompb.WriteRequest with size %d bytes: %s", len(bb.B), err)
return
}
rows := 0
tss := uw.wr.Timeseries
for i := range tss {
rows += len(tss[i].Samples)
}
rowsRead.Add(rows)
if err := uw.callback(tss); err != nil {
logger.Errorf("error when processing imported data: %s", err)
putUnmarshalWork(uw)
return
}
putUnmarshalWork(uw)
}
var bodyBufferPool bytesutil.ByteBufferPool
func getUnmarshalWork() *unmarshalWork {
v := unmarshalWorkPool.Get()
if v == nil { if v == nil {
return &unmarshalWork{} return &prompb.WriteRequest{}
} }
return v.(*unmarshalWork) return v.(*prompb.WriteRequest)
} }
func putUnmarshalWork(uw *unmarshalWork) { func putWriteRequest(wr *prompb.WriteRequest) {
uw.reset() wr.Reset()
unmarshalWorkPool.Put(uw) writeRequestPool.Put(wr)
} }
var unmarshalWorkPool sync.Pool var writeRequestPool sync.Pool