mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-05 01:01:09 +01:00
46d7792b72
- fix broken tests - cosmetic code cleanup - document the change at https://docs.victoriametrics.com/vmagent.html#multitenancy - document the change at https://docs.victoriametrics.com/CHANGELOG.html
99 lines
3.2 KiB
Go
99 lines
3.2 KiB
Go
package vmimport
|
|
|
|
import (
|
|
"io"
|
|
"net/http"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
|
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
|
parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/vmimport"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
|
"github.com/VictoriaMetrics/metrics"
|
|
)
|
|
|
|
var (
|
|
rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="vmimport"}`)
|
|
rowsTenantInserted = tenantmetrics.NewCounterMap(`vmagent_tenant_inserted_rows_total{type="vmimport"}`)
|
|
rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="vmimport"}`)
|
|
)
|
|
|
|
// InsertHandler processes `/api/v1/import` request.
|
|
//
|
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6
|
|
func InsertHandler(at *auth.Token, req *http.Request) error {
|
|
extraLabels, err := parserCommon.GetExtraLabels(req)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
return writeconcurrencylimiter.Do(func() error {
|
|
isGzipped := req.Header.Get("Content-Encoding") == "gzip"
|
|
return parser.ParseStream(req.Body, isGzipped, func(rows []parser.Row) error {
|
|
return insertRows(at, rows, extraLabels)
|
|
})
|
|
})
|
|
}
|
|
|
|
// InsertHandlerForReader processes metrics from given reader
|
|
func InsertHandlerForReader(r io.Reader, isGzipped bool) error {
|
|
return writeconcurrencylimiter.Do(func() error {
|
|
return parser.ParseStream(r, isGzipped, func(rows []parser.Row) error {
|
|
return insertRows(nil, rows, nil)
|
|
})
|
|
})
|
|
}
|
|
|
|
func insertRows(at *auth.Token, rows []parser.Row, extraLabels []prompbmarshal.Label) error {
|
|
ctx := common.GetPushCtx()
|
|
defer common.PutPushCtx(ctx)
|
|
|
|
rowsTotal := 0
|
|
tssDst := ctx.WriteRequest.Timeseries[:0]
|
|
labels := ctx.Labels[:0]
|
|
samples := ctx.Samples[:0]
|
|
for i := range rows {
|
|
r := &rows[i]
|
|
rowsTotal += len(r.Values)
|
|
labelsLen := len(labels)
|
|
for j := range r.Tags {
|
|
tag := &r.Tags[j]
|
|
labels = append(labels, prompbmarshal.Label{
|
|
Name: bytesutil.ToUnsafeString(tag.Key),
|
|
Value: bytesutil.ToUnsafeString(tag.Value),
|
|
})
|
|
}
|
|
labels = append(labels, extraLabels...)
|
|
values := r.Values
|
|
timestamps := r.Timestamps
|
|
if len(timestamps) != len(values) {
|
|
logger.Panicf("BUG: len(timestamps)=%d must match len(values)=%d", len(timestamps), len(values))
|
|
}
|
|
samplesLen := len(samples)
|
|
for j, value := range values {
|
|
samples = append(samples, prompbmarshal.Sample{
|
|
Value: value,
|
|
Timestamp: timestamps[j],
|
|
})
|
|
}
|
|
tssDst = append(tssDst, prompbmarshal.TimeSeries{
|
|
Labels: labels[labelsLen:],
|
|
Samples: samples[samplesLen:],
|
|
})
|
|
}
|
|
ctx.WriteRequest.Timeseries = tssDst
|
|
ctx.Labels = labels
|
|
ctx.Samples = samples
|
|
remotewrite.Push(at, &ctx.WriteRequest)
|
|
rowsInserted.Add(rowsTotal)
|
|
if at != nil {
|
|
rowsTenantInserted.Get(at).Add(rowsTotal)
|
|
}
|
|
rowsPerInsert.Update(float64(rowsTotal))
|
|
return nil
|
|
}
|