mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-20 23:46:23 +01:00
2c334ed953
This is a follow-up for f60c08a7bd
Changes:
- Make sure all the urls related to NewRelic protocol start from /newrelic . Previously some urls were started from /api/v1/newrelic
- Remove /api/v1 part from NewRelic urls, since it has no sense
- Remove automatic transformation from CamelCase to snake_case for NewRelic labels and metric names,
since it may complicate the transition from NewRelic to VictoriaMetrics. Preserve all the metric names and label names,
so users could query metrics and labels by the same names which are used in NewRelic.
The automatic transformation from CamelCase to snake_case can be added later as a special action for relabeling rules if needed.
- Properly update per-tenant data ingestion stats at app/vmagent/newrelic/request_handler.go . Previously it was always zero.
- Fix NewRelic urls in vmagent when multitenant data ingestion is enabled. Previously they were mistakenly started from `/`.
- Document NewRelic data ingestion url at docs/Cluster-VictoriaMetrics.md
- Remove superflouos memory allocations at lib/protoparser/newrelic
- Improve tests at lib/protoparser/newrelic/*
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3520
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4712
107 lines
2.3 KiB
Go
107 lines
2.3 KiB
Go
package stream
|
|
|
|
import (
|
|
"bytes"
|
|
"compress/gzip"
|
|
"fmt"
|
|
"reflect"
|
|
"testing"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/newrelic"
|
|
)
|
|
|
|
func TestParseFailure(t *testing.T) {
|
|
f := func(req string) {
|
|
t.Helper()
|
|
|
|
callback := func(rows []newrelic.Row) error {
|
|
panic(fmt.Errorf("unexpected call into callback"))
|
|
}
|
|
r := bytes.NewReader([]byte(req))
|
|
if err := Parse(r, false, callback); err == nil {
|
|
t.Fatalf("expecting non-empty error")
|
|
}
|
|
}
|
|
f("")
|
|
f("foo")
|
|
f("{}")
|
|
f("[1,2,3]")
|
|
}
|
|
|
|
func TestParseSuccess(t *testing.T) {
|
|
f := func(req string, expectedRows []newrelic.Row) {
|
|
t.Helper()
|
|
|
|
callback := func(rows []newrelic.Row) error {
|
|
if !reflect.DeepEqual(rows, expectedRows) {
|
|
return fmt.Errorf("unexpected rows\ngot\n%v\nwant\n%v", rows, expectedRows)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// Parse from uncompressed reader
|
|
r := bytes.NewReader([]byte(req))
|
|
if err := Parse(r, false, callback); err != nil {
|
|
t.Fatalf("unexpected error when parsing uncompressed request: %s", err)
|
|
}
|
|
|
|
var bb bytes.Buffer
|
|
zw := gzip.NewWriter(&bb)
|
|
if _, err := zw.Write([]byte(req)); err != nil {
|
|
t.Fatalf("cannot compress request: %s", err)
|
|
}
|
|
if err := zw.Close(); err != nil {
|
|
t.Fatalf("cannot close compressed writer: %s", err)
|
|
}
|
|
if err := Parse(&bb, true, callback); err != nil {
|
|
t.Fatalf("unexpected error when parsing compressed request: %s", err)
|
|
}
|
|
}
|
|
|
|
f("[]", nil)
|
|
f(`[{"Events":[]}]`, nil)
|
|
f(`[{
|
|
"EntityID":28257883748326179,
|
|
"IsAgent":true,
|
|
"Events":[
|
|
{
|
|
"eventType":"SystemSample",
|
|
"timestamp":1690286061,
|
|
"entityKey":"macbook-pro.local",
|
|
"dc": "1",
|
|
"diskWritesPerSecond":-34.21,
|
|
"uptime":762376
|
|
}
|
|
],
|
|
"ReportingAgentID":28257883748326179
|
|
}]`, []newrelic.Row{
|
|
{
|
|
Tags: []newrelic.Tag{
|
|
{
|
|
Key: []byte("eventType"),
|
|
Value: []byte("SystemSample"),
|
|
},
|
|
{
|
|
Key: []byte("entityKey"),
|
|
Value: []byte("macbook-pro.local"),
|
|
},
|
|
{
|
|
Key: []byte("dc"),
|
|
Value: []byte("1"),
|
|
},
|
|
},
|
|
Samples: []newrelic.Sample{
|
|
{
|
|
Name: []byte("diskWritesPerSecond"),
|
|
Value: -34.21,
|
|
},
|
|
{
|
|
Name: []byte("uptime"),
|
|
Value: 762376,
|
|
},
|
|
},
|
|
Timestamp: 1690286061000,
|
|
},
|
|
})
|
|
}
|