mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-22 16:36:27 +01:00
f548adce0b
- Parse protobuf if Content-Type isn't set to `application/json` - this behavior is documented at https://grafana.com/docs/loki/latest/api/#push-log-entries-to-loki - Properly handle gzip'ped JSON requests. The `gzip` header must be read from `Content-Encoding` instead of `Content-Type` header - Properly flush all the parsed logs with the explicit call to vlstorage.MustAddRows() at the end of query handler - Check JSON field types more strictly. - Allow parsing Loki timestamp as floating-point number. Such a timestamp can be generated by some clients, which store timestamps in float64 instead of int64. - Optimize parsing of Loki labels in Prometheus text exposition format. - Simplify tests. - Remove lib/slicesutil, since there are no more users for it. - Update docs with missing info and fix various typos. For example, it should be enough to have `instance` and `job` labels as stream fields in most Loki setups. - Allow empty of missing timestamps in the ingested logs. The current timestamp at VictoriaLogs side is then used for the ingested logs. This simplifies debugging and testing of the provided HTTP-based data ingestion APIs. The remaining MAJOR issue, which needs to be addressed: victoria-logs binary size increased from 13MB to 22MB after adding support for Loki data ingestion protocol at https://github.com/VictoriaMetrics/VictoriaMetrics/pull/4482 . This is because of shitty protobuf dependencies. They must be replaced with another protobuf implementation similar to the one used at lib/prompb or lib/prompbmarshal .
172 lines
4.5 KiB
Go
172 lines
4.5 KiB
Go
package loki
|
|
|
|
import (
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"strconv"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter"
|
|
"github.com/VictoriaMetrics/metrics"
|
|
"github.com/golang/snappy"
|
|
)
|
|
|
|
var (
|
|
rowsIngestedProtobufTotal = metrics.NewCounter(`vl_rows_ingested_total{type="loki",format="protobuf"}`)
|
|
bytesBufPool bytesutil.ByteBufferPool
|
|
pushReqsPool sync.Pool
|
|
)
|
|
|
|
func handleProtobuf(r *http.Request, w http.ResponseWriter) bool {
|
|
wcr := writeconcurrencylimiter.GetReader(r.Body)
|
|
data, err := io.ReadAll(wcr)
|
|
writeconcurrencylimiter.PutReader(wcr)
|
|
if err != nil {
|
|
httpserver.Errorf(w, r, "cannot read request body: %s", err)
|
|
return true
|
|
}
|
|
|
|
cp, err := getCommonParams(r)
|
|
if err != nil {
|
|
httpserver.Errorf(w, r, "cannot parse common params from request: %s", err)
|
|
return true
|
|
}
|
|
lr := logstorage.GetLogRows(cp.StreamFields, cp.IgnoreFields)
|
|
processLogMessage := cp.GetProcessLogMessageFunc(lr)
|
|
n, err := parseProtobufRequest(data, processLogMessage)
|
|
vlstorage.MustAddRows(lr)
|
|
logstorage.PutLogRows(lr)
|
|
if err != nil {
|
|
httpserver.Errorf(w, r, "cannot parse loki request: %s", err)
|
|
return true
|
|
}
|
|
rowsIngestedProtobufTotal.Add(n)
|
|
return true
|
|
}
|
|
|
|
func parseProtobufRequest(data []byte, processLogMessage func(timestamp int64, fields []logstorage.Field)) (int, error) {
|
|
bb := bytesBufPool.Get()
|
|
defer bytesBufPool.Put(bb)
|
|
|
|
buf, err := snappy.Decode(bb.B[:cap(bb.B)], data)
|
|
if err != nil {
|
|
return 0, fmt.Errorf("cannot decode snappy-encoded request body: %w", err)
|
|
}
|
|
bb.B = buf
|
|
|
|
req := getPushRequest()
|
|
defer putPushRequest(req)
|
|
|
|
err = req.Unmarshal(bb.B)
|
|
if err != nil {
|
|
return 0, fmt.Errorf("cannot parse request body: %s", err)
|
|
}
|
|
|
|
var commonFields []logstorage.Field
|
|
rowsIngested := 0
|
|
streams := req.Streams
|
|
currentTimestamp := time.Now().UnixNano()
|
|
for i := range streams {
|
|
stream := &streams[i]
|
|
// st.Labels contains labels for the stream.
|
|
// Labels are same for all entries in the stream.
|
|
commonFields, err = parsePromLabels(commonFields[:0], stream.Labels)
|
|
if err != nil {
|
|
return rowsIngested, fmt.Errorf("cannot parse stream labels %q: %s", stream.Labels, err)
|
|
}
|
|
fields := commonFields
|
|
|
|
entries := stream.Entries
|
|
for j := range entries {
|
|
entry := &entries[j]
|
|
fields = append(fields[:len(commonFields)], logstorage.Field{
|
|
Name: "_msg",
|
|
Value: entry.Line,
|
|
})
|
|
ts := entry.Timestamp.UnixNano()
|
|
if ts == 0 {
|
|
ts = currentTimestamp
|
|
}
|
|
processLogMessage(ts, fields)
|
|
}
|
|
rowsIngested += len(stream.Entries)
|
|
}
|
|
return rowsIngested, nil
|
|
}
|
|
|
|
// parsePromLabels parses log fields in Prometheus text exposition format from s, appends them to dst and returns the result.
|
|
//
|
|
// See test data of promtail for examples: https://github.com/grafana/loki/blob/a24ef7b206e0ca63ee74ca6ecb0a09b745cd2258/pkg/push/types_test.go
|
|
func parsePromLabels(dst []logstorage.Field, s string) ([]logstorage.Field, error) {
|
|
// Make sure s is wrapped into `{...}`
|
|
s = strings.TrimSpace(s)
|
|
if len(s) < 2 {
|
|
return nil, fmt.Errorf("too short string to parse: %q", s)
|
|
}
|
|
if s[0] != '{' {
|
|
return nil, fmt.Errorf("missing `{` at the beginning of %q", s)
|
|
}
|
|
if s[len(s)-1] != '}' {
|
|
return nil, fmt.Errorf("missing `}` at the end of %q", s)
|
|
}
|
|
s = s[1 : len(s)-1]
|
|
|
|
for len(s) > 0 {
|
|
// Parse label name
|
|
n := strings.IndexByte(s, '=')
|
|
if n < 0 {
|
|
return nil, fmt.Errorf("cannot find `=` char for label value at %s", s)
|
|
}
|
|
name := s[:n]
|
|
s = s[n+1:]
|
|
|
|
// Parse label value
|
|
qs, err := strconv.QuotedPrefix(s)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("cannot parse value for label %q at %s: %w", name, s, err)
|
|
}
|
|
s = s[len(qs):]
|
|
value, err := strconv.Unquote(qs)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("cannot unquote value %q for label %q: %w", qs, name, err)
|
|
}
|
|
|
|
// Append the found field to dst.
|
|
dst = append(dst, logstorage.Field{
|
|
Name: name,
|
|
Value: value,
|
|
})
|
|
|
|
// Check whether there are other labels remaining
|
|
if len(s) == 0 {
|
|
break
|
|
}
|
|
if !strings.HasPrefix(s, ",") {
|
|
return nil, fmt.Errorf("missing `,` char at %s", s)
|
|
}
|
|
s = s[1:]
|
|
s = strings.TrimPrefix(s, " ")
|
|
}
|
|
return dst, nil
|
|
}
|
|
|
|
func getPushRequest() *PushRequest {
|
|
v := pushReqsPool.Get()
|
|
if v == nil {
|
|
return &PushRequest{}
|
|
}
|
|
return v.(*PushRequest)
|
|
}
|
|
|
|
func putPushRequest(req *PushRequest) {
|
|
req.Reset()
|
|
pushReqsPool.Put(req)
|
|
}
|