2021-05-08 16:55:44 +02:00
|
|
|
package clusternative
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/consts"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/handshake"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
|
|
|
"github.com/VictoriaMetrics/metrics"
|
|
|
|
)
|
|
|
|
|
|
|
|
// ParseStream parses data sent from vminsert to bc and calls callback for parsed rows.
|
2021-10-08 12:52:56 +02:00
|
|
|
// Optional function isReadOnly must return true if the storage cannot accept new data.
|
|
|
|
// In thic case the data read from bc isn't accepted and the readonly status is sent back bc.
|
2021-05-08 16:55:44 +02:00
|
|
|
//
|
|
|
|
// The callback can be called concurrently multiple times for streamed data from req.
|
|
|
|
//
|
|
|
|
// callback shouldn't hold block after returning.
|
2021-10-08 11:52:56 +02:00
|
|
|
func ParseStream(bc *handshake.BufferedConn, callback func(rows []storage.MetricRow) error, isReadOnly func() bool) error {
|
2021-05-08 16:55:44 +02:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
var (
|
|
|
|
callbackErrLock sync.Mutex
|
|
|
|
callbackErr error
|
|
|
|
)
|
|
|
|
for {
|
2021-06-23 14:45:05 +02:00
|
|
|
// Do not use unmarshalWork pool, since every unmarshalWork structure usually occupies
|
|
|
|
// big amounts of memory (more than consts.MaxInsertPacketSize bytes).
|
|
|
|
// The pool would result in increased memory usage.
|
|
|
|
uw := &unmarshalWork{}
|
2021-05-08 16:55:44 +02:00
|
|
|
uw.callback = func(rows []storage.MetricRow) {
|
|
|
|
if err := callback(rows); err != nil {
|
|
|
|
processErrors.Inc()
|
|
|
|
callbackErrLock.Lock()
|
|
|
|
if callbackErr == nil {
|
|
|
|
callbackErr = fmt.Errorf("error when processing native block: %w", err)
|
|
|
|
}
|
|
|
|
callbackErrLock.Unlock()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
uw.wg = &wg
|
|
|
|
var err error
|
2021-10-08 11:52:56 +02:00
|
|
|
uw.reqBuf, err = readBlock(uw.reqBuf[:0], bc, isReadOnly)
|
2021-05-08 16:55:44 +02:00
|
|
|
if err != nil {
|
|
|
|
wg.Wait()
|
|
|
|
if err == io.EOF {
|
|
|
|
// Remote end gracefully closed the connection.
|
|
|
|
return nil
|
|
|
|
}
|
2021-05-27 11:08:47 +02:00
|
|
|
return err
|
2021-05-08 16:55:44 +02:00
|
|
|
}
|
|
|
|
blocksRead.Inc()
|
|
|
|
wg.Add(1)
|
|
|
|
common.ScheduleUnmarshalWork(uw)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// readBlock reads the next data block from vminsert-initiated bc, appends it to dst and returns the result.
|
2021-10-08 11:52:56 +02:00
|
|
|
func readBlock(dst []byte, bc *handshake.BufferedConn, isReadOnly func() bool) ([]byte, error) {
|
2021-10-08 12:52:56 +02:00
|
|
|
sizeBuf := auxBufPool.Get()
|
|
|
|
defer auxBufPool.Put(sizeBuf)
|
2022-01-25 14:16:24 +01:00
|
|
|
sizeBuf.B = bytesutil.ResizeNoCopy(sizeBuf.B, 8)
|
2021-05-08 16:55:44 +02:00
|
|
|
if _, err := io.ReadFull(bc, sizeBuf.B); err != nil {
|
|
|
|
if err != io.EOF {
|
|
|
|
readErrors.Inc()
|
|
|
|
err = fmt.Errorf("cannot read packet size: %w", err)
|
|
|
|
}
|
|
|
|
return dst, err
|
|
|
|
}
|
|
|
|
packetSize := encoding.UnmarshalUint64(sizeBuf.B)
|
|
|
|
if packetSize > consts.MaxInsertPacketSize {
|
|
|
|
parseErrors.Inc()
|
|
|
|
return dst, fmt.Errorf("too big packet size: %d; shouldn't exceed %d", packetSize, consts.MaxInsertPacketSize)
|
|
|
|
}
|
|
|
|
dstLen := len(dst)
|
2022-01-25 14:16:24 +01:00
|
|
|
dst = bytesutil.ResizeWithCopy(dst, dstLen+int(packetSize))
|
2021-05-08 16:55:44 +02:00
|
|
|
if n, err := io.ReadFull(bc, dst[dstLen:]); err != nil {
|
|
|
|
readErrors.Inc()
|
|
|
|
return dst, fmt.Errorf("cannot read packet with size %d bytes: %w; read only %d bytes", packetSize, err, n)
|
|
|
|
}
|
2021-10-08 12:52:56 +02:00
|
|
|
if isReadOnly != nil && isReadOnly() {
|
|
|
|
// The vmstorage is in readonly mode, so drop the read block of data
|
|
|
|
// and send `read only` status to vminsert.
|
|
|
|
dst = dst[:dstLen]
|
|
|
|
if err := sendAck(bc, 2); err != nil {
|
|
|
|
writeErrors.Inc()
|
|
|
|
return dst, fmt.Errorf("cannot send readonly status to vminsert: %w", err)
|
|
|
|
}
|
|
|
|
return dst, nil
|
|
|
|
}
|
2021-05-08 16:55:44 +02:00
|
|
|
// Send `ack` to vminsert that the packet has been received.
|
2021-10-08 12:52:56 +02:00
|
|
|
if err := sendAck(bc, 1); err != nil {
|
|
|
|
writeErrors.Inc()
|
|
|
|
return dst, fmt.Errorf("cannot send `ack` to vminsert: %w", err)
|
|
|
|
}
|
|
|
|
return dst, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func sendAck(bc *handshake.BufferedConn, status byte) error {
|
2021-05-08 16:55:44 +02:00
|
|
|
deadline := time.Now().Add(5 * time.Second)
|
|
|
|
if err := bc.SetWriteDeadline(deadline); err != nil {
|
2021-10-08 12:52:56 +02:00
|
|
|
return fmt.Errorf("cannot set write deadline: %w", err)
|
2021-05-08 16:55:44 +02:00
|
|
|
}
|
2021-10-08 12:52:56 +02:00
|
|
|
b := auxBufPool.Get()
|
|
|
|
defer auxBufPool.Put(b)
|
2021-10-08 14:37:57 +02:00
|
|
|
b.B = append(b.B[:0], status)
|
|
|
|
if _, err := bc.Write(b.B); err != nil {
|
2021-10-08 12:52:56 +02:00
|
|
|
return err
|
2021-05-08 16:55:44 +02:00
|
|
|
}
|
|
|
|
if err := bc.Flush(); err != nil {
|
2021-10-08 12:52:56 +02:00
|
|
|
return err
|
2021-05-08 16:55:44 +02:00
|
|
|
}
|
2021-10-08 12:52:56 +02:00
|
|
|
return nil
|
2021-05-08 16:55:44 +02:00
|
|
|
}
|
|
|
|
|
2021-10-08 12:52:56 +02:00
|
|
|
var auxBufPool bytesutil.ByteBufferPool
|
2021-05-08 16:55:44 +02:00
|
|
|
|
|
|
|
var (
|
|
|
|
readErrors = metrics.NewCounter(`vm_protoparser_read_errors_total{type="clusternative"}`)
|
|
|
|
writeErrors = metrics.NewCounter(`vm_protoparser_write_errors_total{type="clusternative"}`)
|
|
|
|
rowsRead = metrics.NewCounter(`vm_protoparser_rows_read_total{type="clusternative"}`)
|
|
|
|
blocksRead = metrics.NewCounter(`vm_protoparser_blocks_read_total{type="clusternative"}`)
|
|
|
|
|
|
|
|
parseErrors = metrics.NewCounter(`vm_protoparser_parse_errors_total{type="clusternative"}`)
|
|
|
|
processErrors = metrics.NewCounter(`vm_protoparser_process_errors_total{type="clusternative"}`)
|
|
|
|
)
|
|
|
|
|
|
|
|
type unmarshalWork struct {
|
2021-07-02 12:32:56 +02:00
|
|
|
wg *sync.WaitGroup
|
|
|
|
callback func(rows []storage.MetricRow)
|
|
|
|
reqBuf []byte
|
|
|
|
mrs []storage.MetricRow
|
2021-05-08 16:55:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Unmarshal implements common.UnmarshalWork
|
|
|
|
func (uw *unmarshalWork) Unmarshal() {
|
2021-06-23 14:45:05 +02:00
|
|
|
reqBuf := uw.reqBuf
|
|
|
|
for len(reqBuf) > 0 {
|
2021-05-08 16:55:44 +02:00
|
|
|
// Limit the number of rows passed to callback in order to reduce memory usage
|
|
|
|
// when processing big packets of rows.
|
2021-06-23 14:45:05 +02:00
|
|
|
mrs, tail, err := storage.UnmarshalMetricRows(uw.mrs[:0], reqBuf, maxRowsPerCallback)
|
|
|
|
uw.mrs = mrs
|
|
|
|
if err != nil {
|
|
|
|
parseErrors.Inc()
|
|
|
|
logger.Errorf("cannot unmarshal MetricRow from clusternative block with size %d (remaining %d bytes): %s", len(reqBuf), len(tail), err)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
rowsRead.Add(len(mrs))
|
|
|
|
uw.callback(mrs)
|
|
|
|
reqBuf = tail
|
2021-05-08 16:55:44 +02:00
|
|
|
}
|
2021-06-23 14:45:05 +02:00
|
|
|
wg := uw.wg
|
|
|
|
wg.Done()
|
2021-05-08 16:55:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
const maxRowsPerCallback = 10000
|