mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-29 23:30:04 +01:00
1f2cb594d9
* lib/promscrape: make concurrency control optional Before, `-maxConcurrentInserts` was limiting all calls to `promscrape.Parse` function: during ingestion and scraping. This behavior is incorrect. Cmd-line flag `-maxConcurrentInserts` should have effect onl on ingestion. Since both pipelines use the same `promscrape.Parse` function, we extend it to make concurrency limiter optional. So caller can decide whether concurrency should be limited or not. This commit makesc53b5788b4
obsolete. Signed-off-by: hagen1778 <roman@victoriametrics.com> * Revert "dashboards: move `Concurrent inserts` panel to Troubleshooting section" This reverts commitc53b5788b4
. --------- Signed-off-by: hagen1778 <roman@victoriametrics.com>
80 lines
2.6 KiB
Go
80 lines
2.6 KiB
Go
package prometheusimport
|
|
|
|
import (
|
|
"net/http"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/netstorage"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
|
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
|
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus/stream"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
|
"github.com/VictoriaMetrics/metrics"
|
|
)
|
|
|
|
var (
|
|
rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="prometheus"}`)
|
|
rowsTenantInserted = tenantmetrics.NewCounterMap(`vm_tenant_inserted_rows_total{type="prometheus"}`)
|
|
rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="prometheus"}`)
|
|
)
|
|
|
|
// InsertHandler processes `/api/v1/import/prometheus` request.
|
|
func InsertHandler(at *auth.Token, req *http.Request) error {
|
|
extraLabels, err := parserCommon.GetExtraLabels(req)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defaultTimestamp, err := parserCommon.GetTimestamp(req)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
|
|
return stream.Parse(req.Body, defaultTimestamp, isGzipped, true, func(rows []parser.Row) error {
|
|
return insertRows(at, rows, extraLabels)
|
|
}, func(s string) {
|
|
httpserver.LogError(req, s)
|
|
})
|
|
}
|
|
|
|
func insertRows(at *auth.Token, rows []parser.Row, extraLabels []prompbmarshal.Label) error {
|
|
ctx := netstorage.GetInsertCtx()
|
|
defer netstorage.PutInsertCtx(ctx)
|
|
|
|
ctx.Reset() // This line is required for initializing ctx internals.
|
|
perTenantRows := make(map[auth.Token]int)
|
|
hasRelabeling := relabel.HasRelabeling()
|
|
for i := range rows {
|
|
r := &rows[i]
|
|
ctx.Labels = ctx.Labels[:0]
|
|
ctx.AddLabel("", r.Metric)
|
|
for j := range r.Tags {
|
|
tag := &r.Tags[j]
|
|
ctx.AddLabel(tag.Key, tag.Value)
|
|
}
|
|
for j := range extraLabels {
|
|
label := &extraLabels[j]
|
|
ctx.AddLabel(label.Name, label.Value)
|
|
}
|
|
if hasRelabeling {
|
|
ctx.ApplyRelabeling()
|
|
}
|
|
if len(ctx.Labels) == 0 {
|
|
// Skip metric without labels.
|
|
continue
|
|
}
|
|
ctx.SortLabelsIfNeeded()
|
|
atLocal := ctx.GetLocalAuthToken(at)
|
|
if err := ctx.WriteDataPoint(atLocal, ctx.Labels, r.Timestamp, r.Value); err != nil {
|
|
return err
|
|
}
|
|
perTenantRows[*atLocal]++
|
|
}
|
|
rowsInserted.Add(len(rows))
|
|
rowsTenantInserted.MultiAdd(perTenantRows)
|
|
rowsPerInsert.Update(float64(len(rows)))
|
|
return ctx.FlushBufs()
|
|
}
|