2023-06-20 07:55:12 +02:00
|
|
|
package logsql
|
|
|
|
|
|
|
|
import (
|
2024-05-12 16:33:29 +02:00
|
|
|
"context"
|
2024-05-15 04:55:44 +02:00
|
|
|
"fmt"
|
|
|
|
"math"
|
2023-06-20 07:55:12 +02:00
|
|
|
"net/http"
|
2024-05-20 04:08:30 +02:00
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
2024-05-15 04:55:44 +02:00
|
|
|
"time"
|
2023-06-20 07:55:12 +02:00
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vlstorage"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
2024-02-18 21:58:47 +01:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
2023-06-20 07:55:12 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logstorage"
|
2024-05-15 04:55:44 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
2023-06-20 07:55:12 +02:00
|
|
|
)
|
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
// ProcessHitsRequest handles /select/logsql/hits request.
|
|
|
|
//
|
|
|
|
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-hits-stats
|
|
|
|
func ProcessHitsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
|
|
q, tenantIDs, err := parseCommonArgs(r)
|
2023-06-20 07:55:12 +02:00
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "%s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
// Obtain step
|
|
|
|
stepStr := r.FormValue("step")
|
|
|
|
if stepStr == "" {
|
|
|
|
stepStr = "1d"
|
|
|
|
}
|
|
|
|
step, err := promutils.ParseDuration(stepStr)
|
2023-06-20 07:55:12 +02:00
|
|
|
if err != nil {
|
2024-05-20 04:08:30 +02:00
|
|
|
httpserver.Errorf(w, r, "cannot parse 'step' arg: %s", err)
|
2023-06-20 07:55:12 +02:00
|
|
|
return
|
|
|
|
}
|
2024-05-20 04:08:30 +02:00
|
|
|
if step <= 0 {
|
|
|
|
httpserver.Errorf(w, r, "'step' must be bigger than zero")
|
|
|
|
}
|
2024-02-18 22:01:34 +01:00
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
// Obtain offset
|
|
|
|
offsetStr := r.FormValue("offset")
|
|
|
|
if offsetStr == "" {
|
|
|
|
offsetStr = "0s"
|
|
|
|
}
|
|
|
|
offset, err := promutils.ParseDuration(offsetStr)
|
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "cannot parse 'offset' arg: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Obtain field entries
|
|
|
|
fields := r.Form["field"]
|
|
|
|
|
|
|
|
// Prepare the query
|
|
|
|
q.AddCountByTimePipe(int64(step), int64(offset), fields)
|
|
|
|
q.Optimize()
|
|
|
|
|
|
|
|
var mLock sync.Mutex
|
|
|
|
m := make(map[string]*hitsSeries)
|
|
|
|
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
|
|
|
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
timestampValues := columns[0].Values
|
|
|
|
hitsValues := columns[len(columns)-1].Values
|
|
|
|
columns = columns[1 : len(columns)-1]
|
|
|
|
|
|
|
|
bb := blockResultPool.Get()
|
|
|
|
for i := range timestamps {
|
|
|
|
timestampStr := strings.Clone(timestampValues[i])
|
|
|
|
hitsStr := strings.Clone(hitsValues[i])
|
|
|
|
|
|
|
|
bb.Reset()
|
|
|
|
WriteLabelsForHits(bb, columns, i)
|
|
|
|
|
|
|
|
mLock.Lock()
|
|
|
|
hs, ok := m[string(bb.B)]
|
|
|
|
if !ok {
|
|
|
|
k := string(bb.B)
|
|
|
|
hs = &hitsSeries{}
|
|
|
|
m[k] = hs
|
|
|
|
}
|
|
|
|
hs.timestamps = append(hs.timestamps, timestampStr)
|
|
|
|
hs.values = append(hs.values, hitsStr)
|
|
|
|
mLock.Unlock()
|
|
|
|
}
|
|
|
|
blockResultPool.Put(bb)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Execute the query
|
|
|
|
if err := vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock); err != nil {
|
|
|
|
httpserver.Errorf(w, r, "cannot execute query [%s]: %s", q, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write response
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
WriteHitsSeries(w, m)
|
|
|
|
}
|
|
|
|
|
|
|
|
type hitsSeries struct {
|
|
|
|
timestamps []string
|
|
|
|
values []string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hs *hitsSeries) sort() {
|
|
|
|
sort.Sort(hs)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hs *hitsSeries) Len() int {
|
|
|
|
return len(hs.timestamps)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hs *hitsSeries) Swap(i, j int) {
|
|
|
|
hs.timestamps[i], hs.timestamps[j] = hs.timestamps[j], hs.timestamps[i]
|
|
|
|
hs.values[i], hs.values[j] = hs.values[j], hs.values[i]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (hs *hitsSeries) Less(i, j int) bool {
|
|
|
|
return hs.timestamps[i] < hs.timestamps[j]
|
|
|
|
}
|
|
|
|
|
|
|
|
// ProcessFieldNamesRequest handles /select/logsql/field_names request.
|
|
|
|
//
|
|
|
|
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-field-names
|
|
|
|
func ProcessFieldNamesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
|
|
q, tenantIDs, err := parseCommonArgs(r)
|
2024-05-15 04:55:44 +02:00
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "%s", err)
|
|
|
|
return
|
|
|
|
}
|
2024-05-20 04:08:30 +02:00
|
|
|
|
|
|
|
// Obtain field names for the given query
|
|
|
|
q.Optimize()
|
|
|
|
fieldNames, err := vlstorage.GetFieldNames(ctx, tenantIDs, q)
|
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "cannot obtain field names: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write results
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
2024-05-24 03:06:55 +02:00
|
|
|
WriteValuesWithHitsJSON(w, fieldNames)
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ProcessFieldValuesRequest handles /select/logsql/field_values request.
|
|
|
|
//
|
|
|
|
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-field-values
|
|
|
|
func ProcessFieldValuesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
|
|
q, tenantIDs, err := parseCommonArgs(r)
|
2024-05-15 04:55:44 +02:00
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "%s", err)
|
|
|
|
return
|
|
|
|
}
|
2024-05-20 04:08:30 +02:00
|
|
|
|
|
|
|
// Parse fieldName query arg
|
2024-05-22 21:01:20 +02:00
|
|
|
fieldName := r.FormValue("field")
|
2024-05-20 04:08:30 +02:00
|
|
|
if fieldName == "" {
|
2024-05-22 21:01:20 +02:00
|
|
|
httpserver.Errorf(w, r, "missing 'field' query arg")
|
2024-05-20 04:08:30 +02:00
|
|
|
return
|
2024-05-15 04:55:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Parse limit query arg
|
|
|
|
limit, err := httputils.GetInt(r, "limit")
|
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "%s", err)
|
|
|
|
return
|
|
|
|
}
|
2024-05-20 04:08:30 +02:00
|
|
|
if limit < 0 {
|
|
|
|
limit = 0
|
2024-05-14 03:05:03 +02:00
|
|
|
}
|
2024-05-20 04:08:30 +02:00
|
|
|
|
|
|
|
// Obtain unique values for the given field
|
2024-05-15 04:55:44 +02:00
|
|
|
q.Optimize()
|
2024-05-20 04:08:30 +02:00
|
|
|
values, err := vlstorage.GetFieldValues(ctx, tenantIDs, q, fieldName, uint64(limit))
|
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "cannot obtain values for field %q: %s", fieldName, err)
|
|
|
|
return
|
|
|
|
}
|
2024-05-14 03:05:03 +02:00
|
|
|
|
2024-05-22 21:01:20 +02:00
|
|
|
// Write results
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
2024-05-24 03:06:55 +02:00
|
|
|
WriteValuesWithHitsJSON(w, values)
|
2024-05-22 21:01:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ProcessStreamLabelNamesRequest processes /select/logsql/stream_label_names request.
|
|
|
|
//
|
|
|
|
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-stream-label-names
|
|
|
|
func ProcessStreamLabelNamesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
|
|
q, tenantIDs, err := parseCommonArgs(r)
|
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "%s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Obtain stream label names for the given query
|
|
|
|
q.Optimize()
|
|
|
|
names, err := vlstorage.GetStreamLabelNames(ctx, tenantIDs, q)
|
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "cannot obtain stream label names: %s", err)
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write results
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
2024-05-24 03:06:55 +02:00
|
|
|
WriteValuesWithHitsJSON(w, names)
|
2024-05-22 21:01:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ProcessStreamLabelValuesRequest processes /select/logsql/stream_label_values request.
|
|
|
|
//
|
|
|
|
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-stream-label-values
|
|
|
|
func ProcessStreamLabelValuesRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
|
|
q, tenantIDs, err := parseCommonArgs(r)
|
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "%s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse labelName query arg
|
|
|
|
labelName := r.FormValue("label")
|
|
|
|
if labelName == "" {
|
|
|
|
httpserver.Errorf(w, r, "missing 'label' query arg")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse limit query arg
|
|
|
|
limit, err := httputils.GetInt(r, "limit")
|
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "%s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if limit < 0 {
|
|
|
|
limit = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Obtain stream label names for the given query
|
|
|
|
q.Optimize()
|
|
|
|
values, err := vlstorage.GetStreamLabelValues(ctx, tenantIDs, q, labelName, uint64(limit))
|
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "cannot obtain stream label values: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write results
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
2024-05-24 03:06:55 +02:00
|
|
|
WriteValuesWithHitsJSON(w, values)
|
2024-05-22 21:01:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ProcessStreamsRequest processes /select/logsql/streams request.
|
|
|
|
//
|
|
|
|
// See https://docs.victoriametrics.com/victorialogs/querying/#querying-streams
|
|
|
|
func ProcessStreamsRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
|
|
q, tenantIDs, err := parseCommonArgs(r)
|
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "%s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse limit query arg
|
|
|
|
limit, err := httputils.GetInt(r, "limit")
|
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "%s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if limit < 0 {
|
|
|
|
limit = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Obtain streams for the given query
|
|
|
|
q.Optimize()
|
|
|
|
streams, err := vlstorage.GetStreams(ctx, tenantIDs, q, uint64(limit))
|
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "cannot obtain streams: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write results
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
2024-05-24 03:06:55 +02:00
|
|
|
WriteValuesWithHitsJSON(w, streams)
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ProcessQueryRequest handles /select/logsql/query request.
|
|
|
|
//
|
|
|
|
// See https://docs.victoriametrics.com/victorialogs/querying/#http-api
|
|
|
|
func ProcessQueryRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) {
|
|
|
|
q, tenantIDs, err := parseCommonArgs(r)
|
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "%s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse limit query arg
|
|
|
|
limit, err := httputils.GetInt(r, "limit")
|
|
|
|
if err != nil {
|
|
|
|
httpserver.Errorf(w, r, "%s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if limit > 0 {
|
|
|
|
q.AddPipeLimit(uint64(limit))
|
|
|
|
}
|
2024-05-12 16:33:29 +02:00
|
|
|
|
2024-05-14 03:05:03 +02:00
|
|
|
bw := getBufferedWriter(w)
|
2024-05-12 16:33:29 +02:00
|
|
|
|
|
|
|
writeBlock := func(_ uint, timestamps []int64, columns []logstorage.BlockColumn) {
|
2024-05-20 04:08:30 +02:00
|
|
|
if len(columns) == 0 || len(columns[0].Values) == 0 {
|
2024-02-18 22:01:34 +01:00
|
|
|
return
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
2024-02-18 22:01:34 +01:00
|
|
|
|
2023-06-20 07:55:12 +02:00
|
|
|
bb := blockResultPool.Get()
|
2024-05-12 16:33:29 +02:00
|
|
|
for i := range timestamps {
|
|
|
|
WriteJSONRow(bb, columns, i)
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
2024-05-14 03:05:03 +02:00
|
|
|
bw.WriteIgnoreErrors(bb.B)
|
2024-02-18 22:01:34 +01:00
|
|
|
blockResultPool.Put(bb)
|
2024-05-12 16:33:29 +02:00
|
|
|
}
|
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
w.Header().Set("Content-Type", "application/stream+json")
|
|
|
|
q.Optimize()
|
2024-05-14 03:05:03 +02:00
|
|
|
err = vlstorage.RunQuery(ctx, tenantIDs, q, writeBlock)
|
2024-05-12 16:33:29 +02:00
|
|
|
|
2024-05-14 03:05:03 +02:00
|
|
|
bw.FlushIgnoreErrors()
|
|
|
|
putBufferedWriter(bw)
|
2024-05-12 16:33:29 +02:00
|
|
|
|
|
|
|
if err != nil {
|
2024-05-20 04:08:30 +02:00
|
|
|
httpserver.Errorf(w, r, "cannot execute query [%s]: %s", q, err)
|
2024-05-12 16:33:29 +02:00
|
|
|
}
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
var blockResultPool bytesutil.ByteBufferPool
|
2024-05-15 04:55:44 +02:00
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
func parseCommonArgs(r *http.Request) (*logstorage.Query, []logstorage.TenantID, error) {
|
|
|
|
// Extract tenantID
|
|
|
|
tenantID, err := logstorage.GetTenantIDFromRequest(r)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("cannot obtain tenanID: %w", err)
|
|
|
|
}
|
|
|
|
tenantIDs := []logstorage.TenantID{tenantID}
|
|
|
|
|
|
|
|
// Parse query
|
|
|
|
qStr := r.FormValue("query")
|
|
|
|
q, err := logstorage.ParseQuery(qStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, fmt.Errorf("cannot parse query [%s]: %s", qStr, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Parse optional start and end args
|
|
|
|
start, okStart, err := getTimeNsec(r, "start")
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
end, okEnd, err := getTimeNsec(r, "end")
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
if okStart || okEnd {
|
|
|
|
if !okStart {
|
|
|
|
start = math.MinInt64
|
|
|
|
}
|
|
|
|
if !okEnd {
|
|
|
|
end = math.MaxInt64
|
|
|
|
}
|
|
|
|
q.AddTimeFilter(start, end)
|
|
|
|
}
|
|
|
|
|
|
|
|
return q, tenantIDs, nil
|
|
|
|
}
|
|
|
|
|
2024-05-15 04:55:44 +02:00
|
|
|
func getTimeNsec(r *http.Request, argName string) (int64, bool, error) {
|
|
|
|
s := r.FormValue(argName)
|
|
|
|
if s == "" {
|
|
|
|
return 0, false, nil
|
|
|
|
}
|
|
|
|
currentTimestamp := float64(time.Now().UnixNano()) / 1e9
|
|
|
|
secs, err := promutils.ParseTimeAt(s, currentTimestamp)
|
|
|
|
if err != nil {
|
|
|
|
return 0, false, fmt.Errorf("cannot parse %s=%s: %w", argName, s, err)
|
|
|
|
}
|
|
|
|
return int64(secs * 1e9), true, nil
|
|
|
|
}
|