2023-10-05 14:39:51 +02:00
|
|
|
package newrelic
|
|
|
|
|
|
|
|
import (
|
|
|
|
"net/http"
|
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/metrics"
|
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
2023-10-16 00:25:23 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
2023-10-05 14:39:51 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
|
|
|
parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/newrelic"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/newrelic/stream"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="newrelic"}`)
|
|
|
|
rowsTenantInserted = tenantmetrics.NewCounterMap(`vmagent_tenant_inserted_rows_total{type="newrelic"}`)
|
|
|
|
rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="newrelic"}`)
|
|
|
|
)
|
|
|
|
|
|
|
|
// InsertHandlerForHTTP processes remote write for NewRelic POST /infra/v2/metrics/events/bulk request.
|
|
|
|
func InsertHandlerForHTTP(at *auth.Token, req *http.Request) error {
|
|
|
|
extraLabels, err := parserCommon.GetExtraLabels(req)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ce := req.Header.Get("Content-Encoding")
|
|
|
|
isGzip := ce == "gzip"
|
2023-10-16 00:25:23 +02:00
|
|
|
return stream.Parse(req.Body, isGzip, func(rows []newrelic.Row) error {
|
|
|
|
return insertRows(at, rows, extraLabels)
|
2023-10-05 14:39:51 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-10-16 00:25:23 +02:00
|
|
|
func insertRows(at *auth.Token, rows []newrelic.Row, extraLabels []prompbmarshal.Label) error {
|
2023-10-05 14:39:51 +02:00
|
|
|
ctx := common.GetPushCtx()
|
|
|
|
defer common.PutPushCtx(ctx)
|
|
|
|
|
2023-10-16 00:25:23 +02:00
|
|
|
samplesCount := 0
|
2023-10-05 14:39:51 +02:00
|
|
|
tssDst := ctx.WriteRequest.Timeseries[:0]
|
|
|
|
labels := ctx.Labels[:0]
|
|
|
|
samples := ctx.Samples[:0]
|
|
|
|
for i := range rows {
|
|
|
|
r := &rows[i]
|
2023-10-16 00:25:23 +02:00
|
|
|
tags := r.Tags
|
|
|
|
srcSamples := r.Samples
|
|
|
|
for j := range srcSamples {
|
|
|
|
s := &srcSamples[j]
|
|
|
|
labelsLen := len(labels)
|
2023-10-05 14:39:51 +02:00
|
|
|
labels = append(labels, prompbmarshal.Label{
|
2023-10-16 00:25:23 +02:00
|
|
|
Name: "__name__",
|
|
|
|
Value: bytesutil.ToUnsafeString(s.Name),
|
2023-10-05 14:39:51 +02:00
|
|
|
})
|
2023-10-16 00:25:23 +02:00
|
|
|
for k := range tags {
|
|
|
|
t := &tags[k]
|
|
|
|
labels = append(labels, prompbmarshal.Label{
|
|
|
|
Name: bytesutil.ToUnsafeString(t.Key),
|
|
|
|
Value: bytesutil.ToUnsafeString(t.Value),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
samples = append(samples, prompbmarshal.Sample{
|
|
|
|
Value: s.Value,
|
|
|
|
Timestamp: r.Timestamp,
|
|
|
|
})
|
|
|
|
tssDst = append(tssDst, prompbmarshal.TimeSeries{
|
|
|
|
Labels: labels[labelsLen:],
|
|
|
|
Samples: samples[len(samples)-1:],
|
|
|
|
})
|
|
|
|
labels = append(labels, extraLabels...)
|
2023-10-05 14:39:51 +02:00
|
|
|
}
|
2023-10-16 00:25:23 +02:00
|
|
|
samplesCount += len(srcSamples)
|
2023-10-05 14:39:51 +02:00
|
|
|
}
|
|
|
|
ctx.WriteRequest.Timeseries = tssDst
|
|
|
|
ctx.Labels = labels
|
|
|
|
ctx.Samples = samples
|
2023-11-25 10:31:30 +01:00
|
|
|
if !remotewrite.TryPush(at, &ctx.WriteRequest) {
|
2023-11-24 13:42:11 +01:00
|
|
|
return remotewrite.ErrQueueFullHTTPRetry
|
|
|
|
}
|
2023-10-05 14:39:51 +02:00
|
|
|
rowsInserted.Add(len(rows))
|
|
|
|
if at != nil {
|
2023-10-16 00:25:23 +02:00
|
|
|
rowsTenantInserted.Get(at).Add(samplesCount)
|
2023-10-05 14:39:51 +02:00
|
|
|
}
|
2023-10-16 00:25:23 +02:00
|
|
|
rowsPerInsert.Update(float64(samplesCount))
|
2023-10-05 14:39:51 +02:00
|
|
|
return nil
|
|
|
|
}
|