package influx import ( "flag" "net/http" "runtime" "sync" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/netstorage" "github.com/VictoriaMetrics/VictoriaMetrics/lib/auth" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/influx" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" "github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics" "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" "github.com/VictoriaMetrics/metrics" ) var ( measurementFieldSeparator = flag.String("influxMeasurementFieldSeparator", "_", "Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol") skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if Influx line contains only a single field") ) var ( rowsInserted = tenantmetrics.NewCounterMap(`vm_rows_inserted_total{type="influx"}`) rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="influx"}`) ) // InsertHandler processes remote write for influx line protocol. // // See https://github.com/influxdata/influxdb/blob/4cbdc197b8117fee648d62e2e5be75c6575352f0/tsdb/README.md func InsertHandler(at *auth.Token, req *http.Request) error { return writeconcurrencylimiter.Do(func() error { return parser.ParseStream(req, func(db string, rows []parser.Row) error { return insertRows(at, db, rows) }) }) } func insertRows(at *auth.Token, db string, rows []parser.Row) error { ctx := getPushCtx() defer putPushCtx(ctx) ic := &ctx.Common rowsTotal := 0 for i := range rows { r := &rows[i] ic.Labels = ic.Labels[:0] hasDBLabel := false for j := range r.Tags { tag := &r.Tags[j] if tag.Key == "db" { hasDBLabel = true } ic.AddLabel(tag.Key, tag.Value) } if len(db) > 0 && !hasDBLabel { ic.AddLabel("db", db) } ic.MetricNameBuf = storage.MarshalMetricNameRaw(ic.MetricNameBuf[:0], at.AccountID, at.ProjectID, ic.Labels) metricNameBufLen := len(ic.MetricNameBuf) ctx.metricGroupBuf = append(ctx.metricGroupBuf[:0], r.Measurement...) skipFieldKey := len(r.Fields) == 1 && *skipSingleField if len(ctx.metricGroupBuf) > 0 && !skipFieldKey { ctx.metricGroupBuf = append(ctx.metricGroupBuf, *measurementFieldSeparator...) } metricGroupPrefixLen := len(ctx.metricGroupBuf) ic.AddLabel("", "placeholder") placeholderLabel := &ic.Labels[len(ic.Labels)-1] for j := range r.Fields { f := &r.Fields[j] if !skipFieldKey { ctx.metricGroupBuf = append(ctx.metricGroupBuf[:metricGroupPrefixLen], f.Key...) } metricGroup := bytesutil.ToUnsafeString(ctx.metricGroupBuf) ic.Labels = ic.Labels[:len(ic.Labels)-1] ic.AddLabel("", metricGroup) ic.MetricNameBuf = storage.MarshalMetricLabelRaw(ic.MetricNameBuf[:metricNameBufLen], placeholderLabel) storageNodeIdx := ic.GetStorageNodeIdx(at, ic.Labels) if err := ic.WriteDataPointExt(at, storageNodeIdx, ic.MetricNameBuf, r.Timestamp, f.Value); err != nil { return err } } rowsTotal += len(r.Fields) } rowsInserted.Get(at).Add(rowsTotal) rowsPerInsert.Update(float64(rowsTotal)) return ic.FlushBufs() } type pushCtx struct { Common netstorage.InsertCtx metricGroupBuf []byte } func (ctx *pushCtx) reset() { ctx.Common.Reset() ctx.metricGroupBuf = ctx.metricGroupBuf[:0] } func getPushCtx() *pushCtx { select { case ctx := <-pushCtxPoolCh: return ctx default: if v := pushCtxPool.Get(); v != nil { return v.(*pushCtx) } return &pushCtx{} } } func putPushCtx(ctx *pushCtx) { ctx.reset() select { case pushCtxPoolCh <- ctx: default: pushCtxPool.Put(ctx) } } var pushCtxPool sync.Pool var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1))