2020-09-10 23:29:26 +02:00
|
|
|
package searchutils
|
|
|
|
|
|
|
|
import (
|
|
|
|
"flag"
|
|
|
|
"fmt"
|
|
|
|
"net/http"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2020-09-11 12:18:57 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
2023-06-20 07:31:57 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httputils"
|
2021-12-06 16:07:06 +01:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
2020-09-10 23:29:26 +02:00
|
|
|
"github.com/VictoriaMetrics/metricsql"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2021-03-30 20:38:59 +02:00
|
|
|
maxExportDuration = flag.Duration("search.maxExportDuration", time.Hour*24*30, "The maximum duration for /api/v1/export call")
|
|
|
|
maxQueryDuration = flag.Duration("search.maxQueryDuration", time.Second*30, "The maximum duration for query execution")
|
|
|
|
maxStatusRequestDuration = flag.Duration("search.maxStatusRequestDuration", time.Minute*5, "The maximum duration for /api/v1/status/* requests")
|
2020-09-10 23:29:26 +02:00
|
|
|
)
|
|
|
|
|
2020-09-22 00:21:20 +02:00
|
|
|
// GetMaxQueryDuration returns the maximum duration for query from r.
|
|
|
|
func GetMaxQueryDuration(r *http.Request) time.Duration {
|
2023-06-20 07:31:57 +02:00
|
|
|
dms, err := httputils.GetDuration(r, "timeout", 0)
|
2020-09-22 00:21:20 +02:00
|
|
|
if err != nil {
|
|
|
|
dms = 0
|
|
|
|
}
|
|
|
|
d := time.Duration(dms) * time.Millisecond
|
|
|
|
if d <= 0 || d > *maxQueryDuration {
|
|
|
|
d = *maxQueryDuration
|
|
|
|
}
|
|
|
|
return d
|
|
|
|
}
|
|
|
|
|
2020-09-10 23:29:26 +02:00
|
|
|
// GetDeadlineForQuery returns deadline for the given query r.
|
2020-09-11 12:18:57 +02:00
|
|
|
func GetDeadlineForQuery(r *http.Request, startTime time.Time) Deadline {
|
2020-09-10 23:29:26 +02:00
|
|
|
dMax := maxQueryDuration.Milliseconds()
|
|
|
|
return getDeadlineWithMaxDuration(r, startTime, dMax, "-search.maxQueryDuration")
|
|
|
|
}
|
|
|
|
|
2021-03-30 20:38:59 +02:00
|
|
|
// GetDeadlineForStatusRequest returns deadline for the given request to /api/v1/status/*.
|
|
|
|
func GetDeadlineForStatusRequest(r *http.Request, startTime time.Time) Deadline {
|
|
|
|
dMax := maxStatusRequestDuration.Milliseconds()
|
|
|
|
return getDeadlineWithMaxDuration(r, startTime, dMax, "-search.maxStatusRequestDuration")
|
|
|
|
}
|
|
|
|
|
2020-09-10 23:29:26 +02:00
|
|
|
// GetDeadlineForExport returns deadline for the given request to /api/v1/export.
|
2020-09-11 12:18:57 +02:00
|
|
|
func GetDeadlineForExport(r *http.Request, startTime time.Time) Deadline {
|
2020-09-10 23:29:26 +02:00
|
|
|
dMax := maxExportDuration.Milliseconds()
|
|
|
|
return getDeadlineWithMaxDuration(r, startTime, dMax, "-search.maxExportDuration")
|
|
|
|
}
|
|
|
|
|
2020-09-11 12:18:57 +02:00
|
|
|
func getDeadlineWithMaxDuration(r *http.Request, startTime time.Time, dMax int64, flagHint string) Deadline {
|
2023-06-20 07:31:57 +02:00
|
|
|
d, err := httputils.GetDuration(r, "timeout", 0)
|
2020-09-10 23:29:26 +02:00
|
|
|
if err != nil {
|
|
|
|
d = 0
|
|
|
|
}
|
|
|
|
if d <= 0 || d > dMax {
|
|
|
|
d = dMax
|
|
|
|
}
|
|
|
|
timeout := time.Duration(d) * time.Millisecond
|
2020-09-11 12:18:57 +02:00
|
|
|
return NewDeadline(startTime, timeout, flagHint)
|
2020-09-10 23:29:26 +02:00
|
|
|
}
|
|
|
|
|
2020-09-11 12:18:57 +02:00
|
|
|
// Deadline contains deadline with the corresponding timeout for pretty error messages.
|
|
|
|
type Deadline struct {
|
|
|
|
deadline uint64
|
|
|
|
|
|
|
|
timeout time.Duration
|
|
|
|
flagHint string
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewDeadline returns deadline for the given timeout.
|
|
|
|
//
|
|
|
|
// flagHint must contain a hit for command-line flag, which could be used
|
|
|
|
// in order to increase timeout.
|
|
|
|
func NewDeadline(startTime time.Time, timeout time.Duration, flagHint string) Deadline {
|
|
|
|
return Deadline{
|
|
|
|
deadline: uint64(startTime.Add(timeout).Unix()),
|
|
|
|
timeout: timeout,
|
|
|
|
flagHint: flagHint,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-06 12:19:45 +02:00
|
|
|
// DeadlineFromTimestamp returns deadline from the given timestamp in seconds.
|
|
|
|
func DeadlineFromTimestamp(timestamp uint64) Deadline {
|
|
|
|
startTime := time.Now()
|
|
|
|
timeout := time.Unix(int64(timestamp), 0).Sub(startTime)
|
|
|
|
return NewDeadline(startTime, timeout, "")
|
|
|
|
}
|
|
|
|
|
2020-09-11 12:18:57 +02:00
|
|
|
// Exceeded returns true if deadline is exceeded.
|
|
|
|
func (d *Deadline) Exceeded() bool {
|
|
|
|
return fasttime.UnixTimestamp() > d.deadline
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deadline returns deadline in unix timestamp seconds.
|
|
|
|
func (d *Deadline) Deadline() uint64 {
|
|
|
|
return d.deadline
|
|
|
|
}
|
|
|
|
|
|
|
|
// String returns human-readable string representation for d.
|
|
|
|
func (d *Deadline) String() string {
|
2020-11-30 23:14:06 +01:00
|
|
|
startTime := time.Unix(int64(d.deadline), 0).Add(-d.timeout)
|
|
|
|
elapsed := time.Since(startTime)
|
2022-07-06 12:19:45 +02:00
|
|
|
msg := fmt.Sprintf("%.3f seconds (elapsed %.3f seconds)", d.timeout.Seconds(), elapsed.Seconds())
|
2022-12-08 22:07:30 +01:00
|
|
|
if float64(elapsed)/float64(d.timeout) > 0.9 && d.flagHint != "" {
|
2022-07-06 12:19:45 +02:00
|
|
|
msg += fmt.Sprintf("; the timeout can be adjusted with `%s` command-line flag", d.flagHint)
|
|
|
|
}
|
|
|
|
return msg
|
2020-09-11 12:18:57 +02:00
|
|
|
}
|
2021-03-23 13:16:29 +01:00
|
|
|
|
2021-12-06 16:07:06 +01:00
|
|
|
// GetExtraTagFilters returns additional label filters from request.
|
|
|
|
//
|
|
|
|
// Label filters can be present in extra_label and extra_filters[] query args.
|
|
|
|
// They are combined. For example, the following query args:
|
2022-07-11 18:21:59 +02:00
|
|
|
//
|
|
|
|
// extra_label=t1=v1&extra_label=t2=v2&extra_filters[]={env="prod",team="devops"}&extra_filters={env=~"dev|staging",team!="devops"}
|
|
|
|
//
|
2021-12-06 16:07:06 +01:00
|
|
|
// should be translated to the following filters joined with "or":
|
2022-07-11 18:21:59 +02:00
|
|
|
//
|
|
|
|
// {env="prod",team="devops",t1="v1",t2="v2"}
|
|
|
|
// {env=~"dev|staging",team!="devops",t1="v1",t2="v2"}
|
2021-12-06 16:07:06 +01:00
|
|
|
func GetExtraTagFilters(r *http.Request) ([][]storage.TagFilter, error) {
|
|
|
|
var tagFilters []storage.TagFilter
|
|
|
|
for _, match := range r.Form["extra_label"] {
|
2021-03-23 13:16:29 +01:00
|
|
|
tmp := strings.SplitN(match, "=", 2)
|
|
|
|
if len(tmp) != 2 {
|
|
|
|
return nil, fmt.Errorf("`extra_label` query arg must have the format `name=value`; got %q", match)
|
|
|
|
}
|
2022-06-09 18:46:26 +02:00
|
|
|
if tmp[0] == "__name__" {
|
|
|
|
// This is required for storage.Search.
|
|
|
|
tmp[0] = ""
|
|
|
|
}
|
2021-03-23 13:16:29 +01:00
|
|
|
tagFilters = append(tagFilters, storage.TagFilter{
|
|
|
|
Key: []byte(tmp[0]),
|
|
|
|
Value: []byte(tmp[1]),
|
|
|
|
})
|
|
|
|
}
|
2022-06-09 18:46:26 +02:00
|
|
|
extraFilters := append([]string{}, r.Form["extra_filters"]...)
|
2021-12-06 16:07:06 +01:00
|
|
|
extraFilters = append(extraFilters, r.Form["extra_filters[]"]...)
|
|
|
|
if len(extraFilters) == 0 {
|
|
|
|
if len(tagFilters) == 0 {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
return [][]storage.TagFilter{tagFilters}, nil
|
|
|
|
}
|
|
|
|
var etfs [][]storage.TagFilter
|
|
|
|
for _, extraFilter := range extraFilters {
|
2023-07-16 08:48:21 +02:00
|
|
|
tfss, err := ParseMetricSelector(extraFilter)
|
2021-12-06 16:07:06 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse extra_filters=%s: %w", extraFilter, err)
|
|
|
|
}
|
2023-07-16 08:48:21 +02:00
|
|
|
for i := range tfss {
|
|
|
|
tfss[i] = append(tfss[i], tagFilters...)
|
|
|
|
}
|
|
|
|
etfs = append(etfs, tfss...)
|
2021-12-06 16:07:06 +01:00
|
|
|
}
|
|
|
|
return etfs, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// JoinTagFilterss adds etfs to every src filter and returns the result.
|
|
|
|
func JoinTagFilterss(src, etfs [][]storage.TagFilter) [][]storage.TagFilter {
|
|
|
|
if len(src) == 0 {
|
|
|
|
return etfs
|
|
|
|
}
|
|
|
|
if len(etfs) == 0 {
|
|
|
|
return src
|
|
|
|
}
|
|
|
|
var dst [][]storage.TagFilter
|
|
|
|
for _, tf := range src {
|
|
|
|
for _, etf := range etfs {
|
|
|
|
tfs := append([]storage.TagFilter{}, tf...)
|
|
|
|
tfs = append(tfs, etf...)
|
|
|
|
dst = append(dst, tfs)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// ParseMetricSelector parses s containing PromQL metric selector and returns the corresponding LabelFilters.
|
2023-07-16 08:48:21 +02:00
|
|
|
func ParseMetricSelector(s string) ([][]storage.TagFilter, error) {
|
2021-12-06 16:07:06 +01:00
|
|
|
expr, err := metricsql.Parse(s)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
me, ok := expr.(*metricsql.MetricExpr)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("expecting metricSelector; got %q", expr.AppendString(nil))
|
|
|
|
}
|
2023-07-16 08:48:21 +02:00
|
|
|
if len(me.LabelFilterss) == 0 {
|
|
|
|
return nil, fmt.Errorf("labelFilterss cannot be empty")
|
2021-12-06 16:07:06 +01:00
|
|
|
}
|
2023-07-16 08:48:21 +02:00
|
|
|
tfss := ToTagFilterss(me.LabelFilterss)
|
|
|
|
return tfss, nil
|
2021-12-06 16:07:06 +01:00
|
|
|
}
|
|
|
|
|
2023-07-16 08:48:21 +02:00
|
|
|
// ToTagFilterss converts lfss to or-delimited slices of storage.TagFilter
|
|
|
|
func ToTagFilterss(lfss [][]metricsql.LabelFilter) [][]storage.TagFilter {
|
|
|
|
tfss := make([][]storage.TagFilter, len(lfss))
|
|
|
|
for i, lfs := range lfss {
|
|
|
|
tfs := make([]storage.TagFilter, len(lfs))
|
|
|
|
for j := range lfs {
|
|
|
|
toTagFilter(&tfs[j], &lfs[j])
|
|
|
|
}
|
|
|
|
tfss[i] = tfs
|
2021-12-06 16:07:06 +01:00
|
|
|
}
|
2023-07-16 08:48:21 +02:00
|
|
|
return tfss
|
2021-12-06 16:07:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func toTagFilter(dst *storage.TagFilter, src *metricsql.LabelFilter) {
|
|
|
|
if src.Label != "__name__" {
|
|
|
|
dst.Key = []byte(src.Label)
|
|
|
|
} else {
|
|
|
|
// This is required for storage.Search.
|
|
|
|
dst.Key = nil
|
|
|
|
}
|
|
|
|
dst.Value = []byte(src.Value)
|
|
|
|
dst.IsRegexp = src.IsRegexp
|
|
|
|
dst.IsNegative = src.IsNegative
|
2021-03-23 13:16:29 +01:00
|
|
|
}
|