2019-05-22 23:16:55 +02:00
package influx
import (
2019-06-14 08:57:13 +02:00
"flag"
2019-05-22 23:16:55 +02:00
"fmt"
"io"
"net/http"
"runtime"
"sync"
"time"
2019-05-28 16:31:35 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common"
2019-05-22 23:16:55 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter"
2019-05-22 23:23:23 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/netstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
2019-05-22 23:16:55 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
2019-06-07 20:16:05 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
2019-05-22 23:16:55 +02:00
"github.com/VictoriaMetrics/metrics"
)
2019-06-14 08:57:13 +02:00
var (
2019-06-25 12:32:57 +02:00
measurementFieldSeparator = flag . String ( "influxMeasurementFieldSeparator" , "_" , "Separator for `{measurement}{separator}{field_name}` metric name when inserted via Influx line protocol" )
2019-06-14 09:51:57 +02:00
skipSingleField = flag . Bool ( "influxSkipSingleField" , false , "Uses `{measurement}` instead of `{measurement}{separator}{field_name}` for metic name if Influx line contains only a single field" )
2019-06-14 08:57:13 +02:00
)
2019-07-27 12:20:47 +02:00
var (
rowsInserted = tenantmetrics . NewCounterMap ( ` vm_rows_inserted_total { type="influx"} ` )
rowsPerInsert = metrics . NewSummary ( ` vm_rows_per_insert { type="influx"} ` )
)
2019-05-22 23:16:55 +02:00
// InsertHandler processes remote write for influx line protocol.
//
// See https://github.com/influxdata/influxdb/blob/4cbdc197b8117fee648d62e2e5be75c6575352f0/tsdb/README.md
2019-05-22 23:23:23 +02:00
func InsertHandler ( at * auth . Token , req * http . Request ) error {
2019-05-22 23:16:55 +02:00
return concurrencylimiter . Do ( func ( ) error {
2019-05-22 23:23:23 +02:00
return insertHandlerInternal ( at , req )
2019-05-22 23:16:55 +02:00
} )
}
2019-05-22 23:23:23 +02:00
func insertHandlerInternal ( at * auth . Token , req * http . Request ) error {
2019-05-22 23:16:55 +02:00
influxReadCalls . Inc ( )
r := req . Body
if req . Header . Get ( "Content-Encoding" ) == "gzip" {
2019-08-22 11:27:18 +02:00
zr , err := common . GetGzipReader ( r )
2019-05-22 23:16:55 +02:00
if err != nil {
return fmt . Errorf ( "cannot read gzipped influx line protocol data: %s" , err )
}
2019-08-22 11:27:18 +02:00
defer common . PutGzipReader ( zr )
2019-05-22 23:16:55 +02:00
r = zr
}
q := req . URL . Query ( )
tsMultiplier := int64 ( 1e6 )
switch q . Get ( "precision" ) {
case "ns" :
tsMultiplier = 1e6
case "u" :
tsMultiplier = 1e3
case "ms" :
tsMultiplier = 1
case "s" :
tsMultiplier = - 1e3
case "m" :
tsMultiplier = - 1e3 * 60
case "h" :
tsMultiplier = - 1e3 * 3600
}
// Read db tag from https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint
db := q . Get ( "db" )
ctx := getPushCtx ( )
defer putPushCtx ( ctx )
for ctx . Read ( r , tsMultiplier ) {
2019-05-22 23:23:23 +02:00
if err := ctx . InsertRows ( at , db ) ; err != nil {
2019-05-22 23:16:55 +02:00
return err
}
}
return ctx . Error ( )
}
2019-05-22 23:23:23 +02:00
func ( ctx * pushCtx ) InsertRows ( at * auth . Token , db string ) error {
2019-05-22 23:16:55 +02:00
rows := ctx . Rows . Rows
ic := & ctx . Common
2019-05-22 23:23:23 +02:00
ic . Reset ( )
2019-07-27 12:20:47 +02:00
rowsTotal := 0
2019-05-22 23:16:55 +02:00
for i := range rows {
r := & rows [ i ]
ic . Labels = ic . Labels [ : 0 ]
2019-08-24 12:51:51 +02:00
hasDBLabel := false
2019-05-22 23:16:55 +02:00
for j := range r . Tags {
tag := & r . Tags [ j ]
2019-08-24 12:51:51 +02:00
if tag . Key == "db" {
hasDBLabel = true
}
2019-05-22 23:16:55 +02:00
ic . AddLabel ( tag . Key , tag . Value )
}
2019-08-24 12:51:51 +02:00
if len ( db ) > 0 && ! hasDBLabel {
ic . AddLabel ( "db" , db )
}
2019-05-22 23:23:23 +02:00
ic . MetricNameBuf = storage . MarshalMetricNameRaw ( ic . MetricNameBuf [ : 0 ] , at . AccountID , at . ProjectID , ic . Labels )
metricNameBufLen := len ( ic . MetricNameBuf )
2019-05-22 23:16:55 +02:00
ctx . metricGroupBuf = append ( ctx . metricGroupBuf [ : 0 ] , r . Measurement ... )
2019-06-14 09:51:57 +02:00
skipFieldKey := len ( r . Fields ) == 1 && * skipSingleField
if ! skipFieldKey {
ctx . metricGroupBuf = append ( ctx . metricGroupBuf , * measurementFieldSeparator ... )
}
2019-05-22 23:16:55 +02:00
metricGroupPrefixLen := len ( ctx . metricGroupBuf )
2019-05-22 23:23:23 +02:00
ic . AddLabel ( "" , "placeholder" )
placeholderLabel := & ic . Labels [ len ( ic . Labels ) - 1 ]
2019-05-22 23:16:55 +02:00
for j := range r . Fields {
f := & r . Fields [ j ]
2019-06-14 09:51:57 +02:00
if ! skipFieldKey {
ctx . metricGroupBuf = append ( ctx . metricGroupBuf [ : metricGroupPrefixLen ] , f . Key ... )
}
2019-05-22 23:16:55 +02:00
metricGroup := bytesutil . ToUnsafeString ( ctx . metricGroupBuf )
2019-05-22 23:23:23 +02:00
ic . Labels = ic . Labels [ : len ( ic . Labels ) - 1 ]
2019-05-22 23:16:55 +02:00
ic . AddLabel ( "" , metricGroup )
2019-05-22 23:23:23 +02:00
ic . MetricNameBuf = storage . MarshalMetricLabelRaw ( ic . MetricNameBuf [ : metricNameBufLen ] , placeholderLabel )
storageNodeIdx := ic . GetStorageNodeIdx ( at , ic . Labels )
if err := ic . WriteDataPointExt ( at , storageNodeIdx , ic . MetricNameBuf , r . Timestamp , f . Value ) ; err != nil {
return err
}
2019-05-22 23:16:55 +02:00
}
2019-07-27 12:20:47 +02:00
rowsTotal += len ( r . Fields )
2019-05-22 23:16:55 +02:00
}
2019-07-27 12:20:47 +02:00
rowsInserted . Get ( at ) . Add ( rowsTotal )
rowsPerInsert . Update ( float64 ( rowsTotal ) )
2019-05-22 23:16:55 +02:00
return ic . FlushBufs ( )
}
func ( ctx * pushCtx ) Read ( r io . Reader , tsMultiplier int64 ) bool {
if ctx . err != nil {
return false
}
2019-05-28 16:31:35 +02:00
ctx . reqBuf , ctx . tailBuf , ctx . err = common . ReadLinesBlock ( r , ctx . reqBuf , ctx . tailBuf )
if ctx . err != nil {
if ctx . err != io . EOF {
influxReadErrors . Inc ( )
ctx . err = fmt . Errorf ( "cannot read influx line protocol data: %s" , ctx . err )
}
2019-05-22 23:16:55 +02:00
return false
}
2019-08-24 10:40:21 +02:00
ctx . Rows . Unmarshal ( bytesutil . ToUnsafeString ( ctx . reqBuf ) )
2019-05-22 23:16:55 +02:00
// Adjust timestamps according to tsMultiplier
currentTs := time . Now ( ) . UnixNano ( ) / 1e6
if tsMultiplier >= 1 {
for i := range ctx . Rows . Rows {
row := & ctx . Rows . Rows [ i ]
if row . Timestamp == 0 {
row . Timestamp = currentTs
} else {
row . Timestamp /= tsMultiplier
}
}
} else if tsMultiplier < 0 {
tsMultiplier = - tsMultiplier
2019-08-01 23:24:06 +02:00
currentTs -= currentTs % tsMultiplier
2019-05-22 23:16:55 +02:00
for i := range ctx . Rows . Rows {
row := & ctx . Rows . Rows [ i ]
if row . Timestamp == 0 {
row . Timestamp = currentTs
} else {
row . Timestamp *= tsMultiplier
}
}
}
return true
}
var (
2019-08-24 10:40:21 +02:00
influxReadCalls = metrics . NewCounter ( ` vm_read_calls_total { name="influx"} ` )
influxReadErrors = metrics . NewCounter ( ` vm_read_errors_total { name="influx"} ` )
2019-05-22 23:16:55 +02:00
)
type pushCtx struct {
Rows Rows
2019-05-22 23:23:23 +02:00
Common netstorage . InsertCtx
2019-05-22 23:16:55 +02:00
2019-05-28 16:31:35 +02:00
reqBuf [ ] byte
2019-05-22 23:16:55 +02:00
tailBuf [ ] byte
metricGroupBuf [ ] byte
err error
}
func ( ctx * pushCtx ) Error ( ) error {
if ctx . err == io . EOF {
return nil
}
return ctx . err
}
func ( ctx * pushCtx ) reset ( ) {
ctx . Rows . Reset ( )
2019-05-22 23:23:23 +02:00
ctx . Common . Reset ( )
2019-05-28 16:31:35 +02:00
ctx . reqBuf = ctx . reqBuf [ : 0 ]
2019-05-22 23:16:55 +02:00
ctx . tailBuf = ctx . tailBuf [ : 0 ]
ctx . metricGroupBuf = ctx . metricGroupBuf [ : 0 ]
ctx . err = nil
}
func getPushCtx ( ) * pushCtx {
select {
case ctx := <- pushCtxPoolCh :
return ctx
default :
if v := pushCtxPool . Get ( ) ; v != nil {
return v . ( * pushCtx )
}
return & pushCtx { }
}
}
func putPushCtx ( ctx * pushCtx ) {
ctx . reset ( )
select {
case pushCtxPoolCh <- ctx :
default :
pushCtxPool . Put ( ctx )
}
}
var pushCtxPool sync . Pool
var pushCtxPoolCh = make ( chan * pushCtx , runtime . GOMAXPROCS ( - 1 ) )