mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-23 12:31:07 +01:00
app/vmselect: accept optional extra_filters[]
query args for all the supported Prometheus querying APIs
See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1863
This commit is contained in:
parent
45d082bbe2
commit
ff15a752c1
11
README.md
11
README.md
@ -517,9 +517,10 @@ All the Prometheus querying API handlers can be prepended with `/prometheus` pre
|
||||
### Prometheus querying API enhancements
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_label=user_id=123&query=<query>` would automatically add `{user_id="123"}` label filter to the given `<query>`. This functionality can be used
|
||||
for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting
|
||||
in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
`/api/v1/query_range?extra_label=user_id=123&extra_label=group_id=456&query=<query>` would automatically add `{user_id="123",group_id="456"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_filters[]=series_selector` query arg, which can be used for enforcing arbitrary label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_filters[]={env=~"prod|staging",user="xyz"}&query=<query>` would automatically add `{env=~"prod|staging",user="xyz"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_filters[]` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
||||
@ -556,8 +557,8 @@ VictoriaMetrics supports the following Graphite APIs, which are needed for [Grap
|
||||
|
||||
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg for all the Graphite APIs. This arg can be used for limiting the scope of time series
|
||||
visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics.
|
||||
VictoriaMetrics accepts optional query args: `extra_label=<label_name>=<label_value>` and `extra_filters[]=series_selector` query args for all the Graphite APIs. These args can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||
|
@ -32,7 +32,7 @@ func TagsDelSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Re
|
||||
var row graphiteparser.Row
|
||||
var tagsPool []graphiteparser.Tag
|
||||
ct := startTime.UnixNano() / 1e6
|
||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||
}
|
||||
@ -53,8 +53,8 @@ func TagsDelSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.Re
|
||||
Value: []byte(tag.Value),
|
||||
})
|
||||
}
|
||||
tfs = append(tfs, etfs...)
|
||||
sq := storage.NewSearchQuery(0, ct, [][]storage.TagFilter{tfs})
|
||||
tfss := joinTagFilterss(tfs, etfs)
|
||||
sq := storage.NewSearchQuery(0, ct, tfss)
|
||||
n, err := netstorage.DeleteSeries(sq, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot delete series for %q: %w", sq, err)
|
||||
@ -181,7 +181,7 @@ func TagsAutoCompleteValuesHandler(startTime time.Time, w http.ResponseWriter, r
|
||||
valuePrefix := r.FormValue("valuePrefix")
|
||||
exprs := r.Form["expr"]
|
||||
var tagValues []string
|
||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||
}
|
||||
@ -266,7 +266,7 @@ func TagsAutoCompleteTagsHandler(startTime time.Time, w http.ResponseWriter, r *
|
||||
tagPrefix := r.FormValue("tagPrefix")
|
||||
exprs := r.Form["expr"]
|
||||
var labels []string
|
||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||
}
|
||||
@ -345,7 +345,7 @@ func TagsFindSeriesHandler(startTime time.Time, w http.ResponseWriter, r *http.R
|
||||
if len(exprs) == 0 {
|
||||
return fmt.Errorf("expecting at least one `expr` query arg")
|
||||
}
|
||||
etfs, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot setup tag filters: %w", err)
|
||||
}
|
||||
@ -474,14 +474,14 @@ func getInt(r *http.Request, argName string) (int, error) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func getSearchQueryForExprs(startTime time.Time, etfs []storage.TagFilter, exprs []string) (*storage.SearchQuery, error) {
|
||||
func getSearchQueryForExprs(startTime time.Time, etfs [][]storage.TagFilter, exprs []string) (*storage.SearchQuery, error) {
|
||||
tfs, err := exprsToTagFilters(exprs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ct := startTime.UnixNano() / 1e6
|
||||
tfs = append(tfs, etfs...)
|
||||
sq := storage.NewSearchQuery(0, ct, [][]storage.TagFilter{tfs})
|
||||
tfss := joinTagFilterss(tfs, etfs)
|
||||
sq := storage.NewSearchQuery(0, ct, tfss)
|
||||
return sq, nil
|
||||
}
|
||||
|
||||
@ -524,3 +524,7 @@ func parseFilterExpr(s string) (*storage.TagFilter, error) {
|
||||
IsRegexp: isRegexp,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func joinTagFilterss(tfs []storage.TagFilter, extraFilters [][]storage.TagFilter) [][]storage.TagFilter {
|
||||
return searchutils.JoinTagFilterss([][]storage.TagFilter{tfs}, extraFilters)
|
||||
}
|
||||
|
@ -283,11 +283,11 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||
if start >= end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := exportHandler(w, matches, etf, start, end, format, maxRowsPerLine, reduceMemUsage, deadline); err != nil {
|
||||
if err := exportHandler(w, matches, etfs, start, end, format, maxRowsPerLine, reduceMemUsage, deadline); err != nil {
|
||||
return fmt.Errorf("error when exporting data for queries=%q on the time range (start=%d, end=%d): %w", matches, start, end, err)
|
||||
}
|
||||
return nil
|
||||
@ -295,7 +295,7 @@ func ExportHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||
|
||||
var exportDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/export"}`)
|
||||
|
||||
func exportHandler(w http.ResponseWriter, matches []string, etf []storage.TagFilter, start, end int64, format string, maxRowsPerLine int, reduceMemUsage bool, deadline searchutils.Deadline) error {
|
||||
func exportHandler(w http.ResponseWriter, matches []string, etfs [][]storage.TagFilter, start, end int64, format string, maxRowsPerLine int, reduceMemUsage bool, deadline searchutils.Deadline) error {
|
||||
writeResponseFunc := WriteExportStdResponse
|
||||
writeLineFunc := func(xb *exportBlock, resultsCh chan<- *quicktemplate.ByteBuffer) {
|
||||
bb := quicktemplate.AcquireByteBuffer()
|
||||
@ -352,7 +352,7 @@ func exportHandler(w http.ResponseWriter, matches []string, etf []storage.TagFil
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
|
||||
sq := storage.NewSearchQuery(start, end, tagFilterss)
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
@ -478,13 +478,13 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
matches := getMatchesFromRequest(r)
|
||||
var labelValues []string
|
||||
if len(matches) == 0 && len(etf) == 0 {
|
||||
if len(matches) == 0 && len(etfs) == 0 {
|
||||
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labelValues, err = netstorage.GetLabelValues(labelName, deadline)
|
||||
@ -527,7 +527,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
labelValues, err = labelValuesWithMatches(labelName, matches, etf, start, end, deadline)
|
||||
labelValues, err = labelValuesWithMatches(labelName, matches, etfs, start, end, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain label values for %q, match[]=%q, start=%d, end=%d: %w", labelName, matches, start, end, err)
|
||||
}
|
||||
@ -543,7 +543,7 @@ func LabelValuesHandler(startTime time.Time, labelName string, w http.ResponseWr
|
||||
return nil
|
||||
}
|
||||
|
||||
func labelValuesWithMatches(labelName string, matches []string, etf []storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||
func labelValuesWithMatches(labelName string, matches []string, etfs [][]storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -564,7 +564,7 @@ func labelValuesWithMatches(labelName string, matches []string, etf []storage.Ta
|
||||
if start >= end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
if len(tagFilterss) == 0 {
|
||||
logger.Panicf("BUG: tagFilterss must be non-empty")
|
||||
}
|
||||
@ -648,7 +648,7 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -679,13 +679,13 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
||||
topN = n
|
||||
}
|
||||
var status *storage.TSDBStatus
|
||||
if len(matches) == 0 && len(etf) == 0 {
|
||||
if len(matches) == 0 && len(etfs) == 0 {
|
||||
status, err = netstorage.GetTSDBStatusForDate(deadline, date, topN)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err)
|
||||
}
|
||||
} else {
|
||||
status, err = tsdbStatusWithMatches(matches, etf, date, topN, deadline)
|
||||
status, err = tsdbStatusWithMatches(matches, etfs, date, topN, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain tsdb status with matches for date=%d, topN=%d: %w", date, topN, err)
|
||||
}
|
||||
@ -700,12 +700,12 @@ func TSDBStatusHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
||||
return nil
|
||||
}
|
||||
|
||||
func tsdbStatusWithMatches(matches []string, etf []storage.TagFilter, date uint64, topN int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
|
||||
func tsdbStatusWithMatches(matches []string, etfs [][]storage.TagFilter, date uint64, topN int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
if len(tagFilterss) == 0 {
|
||||
logger.Panicf("BUG: tagFilterss must be non-empty")
|
||||
}
|
||||
@ -731,13 +731,13 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||
if err := r.ParseForm(); err != nil {
|
||||
return fmt.Errorf("cannot parse form values: %w", err)
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
matches := getMatchesFromRequest(r)
|
||||
var labels []string
|
||||
if len(matches) == 0 && len(etf) == 0 {
|
||||
if len(matches) == 0 && len(etfs) == 0 {
|
||||
if len(r.Form["start"]) == 0 && len(r.Form["end"]) == 0 {
|
||||
var err error
|
||||
labels, err = netstorage.GetLabels(deadline)
|
||||
@ -778,7 +778,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
labels, err = labelsWithMatches(matches, etf, start, end, deadline)
|
||||
labels, err = labelsWithMatches(matches, etfs, start, end, deadline)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot obtain labels for match[]=%q, start=%d, end=%d: %w", matches, start, end, err)
|
||||
}
|
||||
@ -794,7 +794,7 @@ func LabelsHandler(startTime time.Time, w http.ResponseWriter, r *http.Request)
|
||||
return nil
|
||||
}
|
||||
|
||||
func labelsWithMatches(matches []string, etf []storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||
func labelsWithMatches(matches []string, etfs [][]storage.TagFilter, start, end int64, deadline searchutils.Deadline) ([]string, error) {
|
||||
tagFilterss, err := getTagFilterssFromMatches(matches)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -802,7 +802,7 @@ func labelsWithMatches(matches []string, etf []storage.TagFilter, start, end int
|
||||
if start >= end {
|
||||
end = start + defaultStep
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
if len(tagFilterss) == 0 {
|
||||
logger.Panicf("BUG: tagFilterss must be non-empty")
|
||||
}
|
||||
@ -999,7 +999,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||
if len(query) > maxQueryLen.N {
|
||||
return fmt.Errorf("too long query; got %d bytes; mustn't exceed `-search.maxQueryLen=%d` bytes", len(query), maxQueryLen.N)
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1014,7 +1014,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||
if end < start {
|
||||
end = start
|
||||
}
|
||||
if err := exportHandler(w, []string{childQuery}, etf, start, end, "promapi", 0, false, deadline); err != nil {
|
||||
if err := exportHandler(w, []string{childQuery}, etfs, start, end, "promapi", 0, false, deadline); err != nil {
|
||||
return fmt.Errorf("error when exporting data for query=%q on the time range (start=%d, end=%d): %w", childQuery, start, end, err)
|
||||
}
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
@ -1030,7 +1030,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||
start -= offset
|
||||
end := start
|
||||
start = end - window
|
||||
if err := queryRangeHandler(startTime, w, childQuery, start, end, step, r, ct, etf); err != nil {
|
||||
if err := queryRangeHandler(startTime, w, childQuery, start, end, step, r, ct, etfs); err != nil {
|
||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", childQuery, start, end, step, err)
|
||||
}
|
||||
queryDuration.UpdateDuration(startTime)
|
||||
@ -1055,7 +1055,7 @@ func QueryHandler(startTime time.Time, w http.ResponseWriter, r *http.Request) e
|
||||
Deadline: deadline,
|
||||
LookbackDelta: lookbackDelta,
|
||||
RoundDigits: getRoundDigits(r),
|
||||
EnforcedTagFilters: etf,
|
||||
EnforcedTagFilterss: etfs,
|
||||
}
|
||||
result, err := promql.Exec(&ec, query, true)
|
||||
if err != nil {
|
||||
@ -1105,17 +1105,17 @@ func QueryRangeHandler(startTime time.Time, w http.ResponseWriter, r *http.Reque
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := queryRangeHandler(startTime, w, query, start, end, step, r, ct, etf); err != nil {
|
||||
if err := queryRangeHandler(startTime, w, query, start, end, step, r, ct, etfs); err != nil {
|
||||
return fmt.Errorf("error when executing query=%q on the time range (start=%d, end=%d, step=%d): %w", query, start, end, step, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64, etf []storage.TagFilter) error {
|
||||
func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string, start, end, step int64, r *http.Request, ct int64, etfs [][]storage.TagFilter) error {
|
||||
deadline := searchutils.GetDeadlineForQuery(r, startTime)
|
||||
mayCache := !searchutils.GetBool(r, "nocache")
|
||||
lookbackDelta, err := getMaxLookback(r)
|
||||
@ -1146,7 +1146,7 @@ func queryRangeHandler(startTime time.Time, w http.ResponseWriter, query string,
|
||||
MayCache: mayCache,
|
||||
LookbackDelta: lookbackDelta,
|
||||
RoundDigits: getRoundDigits(r),
|
||||
EnforcedTagFilters: etf,
|
||||
EnforcedTagFilterss: etfs,
|
||||
}
|
||||
result, err := promql.Exec(&ec, query, false)
|
||||
if err != nil {
|
||||
@ -1254,24 +1254,12 @@ func getMaxLookback(r *http.Request) (int64, error) {
|
||||
return searchutils.GetDuration(r, "max_lookback", d)
|
||||
}
|
||||
|
||||
func addEnforcedFiltersToTagFilterss(dstTfss [][]storage.TagFilter, enforcedFilters []storage.TagFilter) [][]storage.TagFilter {
|
||||
if len(dstTfss) == 0 {
|
||||
return [][]storage.TagFilter{
|
||||
enforcedFilters,
|
||||
}
|
||||
}
|
||||
for i := range dstTfss {
|
||||
dstTfss[i] = append(dstTfss[i], enforcedFilters...)
|
||||
}
|
||||
return dstTfss
|
||||
}
|
||||
|
||||
func getTagFilterssFromMatches(matches []string) ([][]storage.TagFilter, error) {
|
||||
tagFilterss := make([][]storage.TagFilter, 0, len(matches))
|
||||
for _, match := range matches {
|
||||
tagFilters, err := promql.ParseMetricSelector(match)
|
||||
tagFilters, err := searchutils.ParseMetricSelector(match)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse %q: %w", match, err)
|
||||
return nil, fmt.Errorf("cannot parse matches[]=%s: %w", match, err)
|
||||
}
|
||||
tagFilterss = append(tagFilterss, tagFilters)
|
||||
}
|
||||
@ -1287,11 +1275,11 @@ func getTagFilterssFromRequest(r *http.Request) ([][]storage.TagFilter, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
etf, err := searchutils.GetEnforcedTagFiltersFromRequest(r)
|
||||
etfs, err := searchutils.GetExtraTagFilters(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tagFilterss = addEnforcedFiltersToTagFilterss(tagFilterss, etf)
|
||||
tagFilterss = searchutils.JoinTagFilterss(tagFilterss, etfs)
|
||||
return tagFilterss, nil
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
)
|
||||
|
||||
func TestRemoveEmptyValuesAndTimeseries(t *testing.T) {
|
||||
@ -196,38 +195,3 @@ func TestAdjustLastPoints(t *testing.T) {
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// helper for tests
|
||||
func tfFromKV(k, v string) storage.TagFilter {
|
||||
return storage.TagFilter{
|
||||
Key: []byte(k),
|
||||
Value: []byte(v),
|
||||
}
|
||||
}
|
||||
|
||||
func Test_addEnforcedFiltersToTagFilterss(t *testing.T) {
|
||||
f := func(t *testing.T, dstTfss [][]storage.TagFilter, enforcedFilters []storage.TagFilter, want [][]storage.TagFilter) {
|
||||
t.Helper()
|
||||
got := addEnforcedFiltersToTagFilterss(dstTfss, enforcedFilters)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("unxpected result for addEnforcedFiltersToTagFilterss, \ngot: %v,\n want: %v", want, got)
|
||||
}
|
||||
}
|
||||
f(t, [][]storage.TagFilter{{tfFromKV("label", "value")}},
|
||||
nil,
|
||||
[][]storage.TagFilter{{tfFromKV("label", "value")}})
|
||||
|
||||
f(t, nil,
|
||||
[]storage.TagFilter{tfFromKV("ext-label", "ext-value")},
|
||||
[][]storage.TagFilter{{tfFromKV("ext-label", "ext-value")}})
|
||||
|
||||
f(t, [][]storage.TagFilter{
|
||||
{tfFromKV("l1", "v1")},
|
||||
{tfFromKV("l2", "v2")},
|
||||
},
|
||||
[]storage.TagFilter{tfFromKV("ext-l1", "v2")},
|
||||
[][]storage.TagFilter{
|
||||
{tfFromKV("l1", "v1"), tfFromKV("ext-l1", "v2")},
|
||||
{tfFromKV("l2", "v2"), tfFromKV("ext-l1", "v2")},
|
||||
})
|
||||
}
|
||||
|
@ -104,8 +104,8 @@ type EvalConfig struct {
|
||||
// How many decimal digits after the point to leave in response.
|
||||
RoundDigits int
|
||||
|
||||
// EnforcedTagFilters used for apply additional label filters to query.
|
||||
EnforcedTagFilters []storage.TagFilter
|
||||
// EnforcedTagFilterss may contain additional label filters to use in the query.
|
||||
EnforcedTagFilterss [][]storage.TagFilter
|
||||
|
||||
timestamps []int64
|
||||
timestampsOnce sync.Once
|
||||
@ -121,7 +121,7 @@ func newEvalConfig(src *EvalConfig) *EvalConfig {
|
||||
ec.MayCache = src.MayCache
|
||||
ec.LookbackDelta = src.LookbackDelta
|
||||
ec.RoundDigits = src.RoundDigits
|
||||
ec.EnforcedTagFilters = src.EnforcedTagFilters
|
||||
ec.EnforcedTagFilterss = src.EnforcedTagFilterss
|
||||
|
||||
// do not copy src.timestamps - they must be generated again.
|
||||
return &ec
|
||||
@ -672,16 +672,15 @@ func evalRollupFuncWithMetricExpr(ec *EvalConfig, funcName string, rf rollupFunc
|
||||
}
|
||||
|
||||
// Fetch the remaining part of the result.
|
||||
tfs := toTagFilters(me.LabelFilters)
|
||||
// append external filters.
|
||||
tfs = append(tfs, ec.EnforcedTagFilters...)
|
||||
tfs := searchutils.ToTagFilters(me.LabelFilters)
|
||||
tfss := searchutils.JoinTagFilterss([][]storage.TagFilter{tfs}, ec.EnforcedTagFilterss)
|
||||
minTimestamp := start - maxSilenceInterval
|
||||
if window > ec.Step {
|
||||
minTimestamp -= window
|
||||
} else {
|
||||
minTimestamp -= ec.Step
|
||||
}
|
||||
sq := storage.NewSearchQuery(minTimestamp, ec.End, [][]storage.TagFilter{tfs})
|
||||
sq := storage.NewSearchQuery(minTimestamp, ec.End, tfss)
|
||||
rss, err := netstorage.ProcessSearchQuery(sq, true, ec.Deadline)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -877,26 +876,6 @@ func mulNoOverflow(a, b int64) int64 {
|
||||
return a * b
|
||||
}
|
||||
|
||||
func toTagFilters(lfs []metricsql.LabelFilter) []storage.TagFilter {
|
||||
tfs := make([]storage.TagFilter, len(lfs))
|
||||
for i := range lfs {
|
||||
toTagFilter(&tfs[i], &lfs[i])
|
||||
}
|
||||
return tfs
|
||||
}
|
||||
|
||||
func toTagFilter(dst *storage.TagFilter, src *metricsql.LabelFilter) {
|
||||
if src.Label != "__name__" {
|
||||
dst.Key = []byte(src.Label)
|
||||
} else {
|
||||
// This is required for storage.Search.
|
||||
dst.Key = nil
|
||||
}
|
||||
dst.Value = []byte(src.Value)
|
||||
dst.IsRegexp = src.IsRegexp
|
||||
dst.IsNegative = src.IsNegative
|
||||
}
|
||||
|
||||
func dropStaleNaNs(funcName string, values []float64, timestamps []int64) ([]float64, []int64) {
|
||||
if *noStaleMarkers || funcName == "default_rollup" {
|
||||
// Do not drop Prometheus staleness marks (aka stale NaNs) for default_rollup() function,
|
||||
|
@ -1,9 +1,6 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
@ -43,21 +40,3 @@ func IsMetricSelectorWithRollup(s string) (childQuery string, window, offset *me
|
||||
wrappedQuery := me.AppendString(nil)
|
||||
return string(wrappedQuery), re.Window, re.Offset
|
||||
}
|
||||
|
||||
// ParseMetricSelector parses s containing PromQL metric selector
|
||||
// and returns the corresponding LabelFilters.
|
||||
func ParseMetricSelector(s string) ([]storage.TagFilter, error) {
|
||||
expr, err := parsePromQLWithCache(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
me, ok := expr.(*metricsql.MetricExpr)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expecting metricSelector; got %q", expr.AppendString(nil))
|
||||
}
|
||||
if len(me.LabelFilters) == 0 {
|
||||
return nil, fmt.Errorf("labelFilters cannot be empty")
|
||||
}
|
||||
tfs := toTagFilters(me.LabelFilters)
|
||||
return tfs, nil
|
||||
}
|
||||
|
@ -1,50 +0,0 @@
|
||||
package promql
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseMetricSelectorSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
tfs, err := ParseMetricSelector(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when parsing %q: %s", s, err)
|
||||
}
|
||||
if tfs == nil {
|
||||
t.Fatalf("expecting non-nil tfs when parsing %q", s)
|
||||
}
|
||||
}
|
||||
f("foo")
|
||||
f(":foo")
|
||||
f(" :fo:bar.baz")
|
||||
f(`a{}`)
|
||||
f(`{foo="bar"}`)
|
||||
f(`{:f:oo=~"bar.+"}`)
|
||||
f(`foo {bar != "baz"}`)
|
||||
f(` foo { bar !~ "^ddd(x+)$", a="ss", __name__="sffd"} `)
|
||||
f(`(foo)`)
|
||||
f(`\п\р\и\в\е\т{\ы="111"}`)
|
||||
}
|
||||
|
||||
func TestParseMetricSelectorError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
tfs, err := ParseMetricSelector(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
if tfs != nil {
|
||||
t.Fatalf("expecting nil tfs when parsing %q", s)
|
||||
}
|
||||
}
|
||||
f("")
|
||||
f(`{}`)
|
||||
f(`foo bar`)
|
||||
f(`foo+bar`)
|
||||
f(`sum(bar)`)
|
||||
f(`x{y}`)
|
||||
f(`x{y+z}`)
|
||||
f(`foo[5m]`)
|
||||
f(`foo offset 5m`)
|
||||
}
|
@ -194,7 +194,7 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
|
||||
bb := bbPool.Get()
|
||||
defer bbPool.Put(bb)
|
||||
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilters)
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||
metainfoBuf := rrc.c.Get(nil, bb.B)
|
||||
if len(metainfoBuf) == 0 {
|
||||
return nil, ec.Start
|
||||
@ -214,7 +214,7 @@ func (rrc *rollupResultCache) Get(ec *EvalConfig, expr metricsql.Expr, window in
|
||||
if len(compressedResultBuf.B) == 0 {
|
||||
mi.RemoveKey(key)
|
||||
metainfoBuf = mi.Marshal(metainfoBuf[:0])
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilters)
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||
rrc.c.Set(bb.B, metainfoBuf)
|
||||
return nil, ec.Start
|
||||
}
|
||||
@ -317,7 +317,7 @@ func (rrc *rollupResultCache) Put(ec *EvalConfig, expr metricsql.Expr, window in
|
||||
bb.B = key.Marshal(bb.B[:0])
|
||||
rrc.c.SetBig(bb.B, compressedResultBuf.B)
|
||||
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilters)
|
||||
bb.B = marshalRollupResultCacheKey(bb.B[:0], expr, window, ec.Step, ec.EnforcedTagFilterss)
|
||||
metainfoBuf := rrc.c.Get(nil, bb.B)
|
||||
var mi rollupResultCacheMetainfo
|
||||
if len(metainfoBuf) > 0 {
|
||||
@ -347,15 +347,20 @@ var tooBigRollupResults = metrics.NewCounter("vm_too_big_rollup_results_total")
|
||||
// Increment this value every time the format of the cache changes.
|
||||
const rollupResultCacheVersion = 8
|
||||
|
||||
func marshalRollupResultCacheKey(dst []byte, expr metricsql.Expr, window, step int64, filters []storage.TagFilter) []byte {
|
||||
func marshalRollupResultCacheKey(dst []byte, expr metricsql.Expr, window, step int64, etfs [][]storage.TagFilter) []byte {
|
||||
dst = append(dst, rollupResultCacheVersion)
|
||||
dst = encoding.MarshalUint64(dst, rollupResultCacheKeyPrefix)
|
||||
dst = encoding.MarshalInt64(dst, window)
|
||||
dst = encoding.MarshalInt64(dst, step)
|
||||
dst = expr.AppendString(dst)
|
||||
for _, f := range filters {
|
||||
for i, etf := range etfs {
|
||||
for _, f := range etf {
|
||||
dst = f.Marshal(dst)
|
||||
}
|
||||
if i+1 < len(etfs) {
|
||||
dst = append(dst, '|')
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/searchutils"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
@ -218,7 +219,7 @@ func getAbsentTimeseries(ec *EvalConfig, arg metricsql.Expr) []*timeseries {
|
||||
if !ok {
|
||||
return rvs
|
||||
}
|
||||
tfs := toTagFilters(me.LabelFilters)
|
||||
tfs := searchutils.ToTagFilters(me.LabelFilters)
|
||||
for i := range tfs {
|
||||
tf := &tfs[i]
|
||||
if len(tf.Key) == 0 {
|
||||
|
@ -9,9 +9,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
"github.com/VictoriaMetrics/metricsql"
|
||||
)
|
||||
|
||||
@ -198,15 +197,17 @@ func (d *Deadline) String() string {
|
||||
return fmt.Sprintf("%.3f seconds (elapsed %.3f seconds); the timeout can be adjusted with `%s` command-line flag", d.timeout.Seconds(), elapsed.Seconds(), d.flagHint)
|
||||
}
|
||||
|
||||
// GetEnforcedTagFiltersFromRequest returns additional filters from request.
|
||||
func GetEnforcedTagFiltersFromRequest(r *http.Request) ([]storage.TagFilter, error) {
|
||||
// fast path.
|
||||
extraLabels := r.Form["extra_label"]
|
||||
if len(extraLabels) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
tagFilters := make([]storage.TagFilter, 0, len(extraLabels))
|
||||
for _, match := range extraLabels {
|
||||
// GetExtraTagFilters returns additional label filters from request.
|
||||
//
|
||||
// Label filters can be present in extra_label and extra_filters[] query args.
|
||||
// They are combined. For example, the following query args:
|
||||
// extra_label=t1=v1&extra_label=t2=v2&extra_filters[]={env="prod",team="devops"}&extra_filters={env=~"dev|staging",team!="devops"}
|
||||
// should be translated to the following filters joined with "or":
|
||||
// {env="prod",team="devops",t1="v1",t2="v2"}
|
||||
// {env=~"dev|staging",team!="devops",t1="v1",t2="v2"}
|
||||
func GetExtraTagFilters(r *http.Request) ([][]storage.TagFilter, error) {
|
||||
var tagFilters []storage.TagFilter
|
||||
for _, match := range r.Form["extra_label"] {
|
||||
tmp := strings.SplitN(match, "=", 2)
|
||||
if len(tmp) != 2 {
|
||||
return nil, fmt.Errorf("`extra_label` query arg must have the format `name=value`; got %q", match)
|
||||
@ -216,5 +217,79 @@ func GetEnforcedTagFiltersFromRequest(r *http.Request) ([]storage.TagFilter, err
|
||||
Value: []byte(tmp[1]),
|
||||
})
|
||||
}
|
||||
return tagFilters, nil
|
||||
extraFilters := r.Form["extra_filters"]
|
||||
extraFilters = append(extraFilters, r.Form["extra_filters[]"]...)
|
||||
if len(extraFilters) == 0 {
|
||||
if len(tagFilters) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return [][]storage.TagFilter{tagFilters}, nil
|
||||
}
|
||||
var etfs [][]storage.TagFilter
|
||||
for _, extraFilter := range extraFilters {
|
||||
tfs, err := ParseMetricSelector(extraFilter)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot parse extra_filters=%s: %w", extraFilter, err)
|
||||
}
|
||||
tfs = append(tfs, tagFilters...)
|
||||
etfs = append(etfs, tfs)
|
||||
}
|
||||
return etfs, nil
|
||||
}
|
||||
|
||||
// JoinTagFilterss adds etfs to every src filter and returns the result.
|
||||
func JoinTagFilterss(src, etfs [][]storage.TagFilter) [][]storage.TagFilter {
|
||||
if len(src) == 0 {
|
||||
return etfs
|
||||
}
|
||||
if len(etfs) == 0 {
|
||||
return src
|
||||
}
|
||||
var dst [][]storage.TagFilter
|
||||
for _, tf := range src {
|
||||
for _, etf := range etfs {
|
||||
tfs := append([]storage.TagFilter{}, tf...)
|
||||
tfs = append(tfs, etf...)
|
||||
dst = append(dst, tfs)
|
||||
}
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// ParseMetricSelector parses s containing PromQL metric selector and returns the corresponding LabelFilters.
|
||||
func ParseMetricSelector(s string) ([]storage.TagFilter, error) {
|
||||
expr, err := metricsql.Parse(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
me, ok := expr.(*metricsql.MetricExpr)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expecting metricSelector; got %q", expr.AppendString(nil))
|
||||
}
|
||||
if len(me.LabelFilters) == 0 {
|
||||
return nil, fmt.Errorf("labelFilters cannot be empty")
|
||||
}
|
||||
tfs := ToTagFilters(me.LabelFilters)
|
||||
return tfs, nil
|
||||
}
|
||||
|
||||
// ToTagFilters converts lfs to a slice of storage.TagFilter
|
||||
func ToTagFilters(lfs []metricsql.LabelFilter) []storage.TagFilter {
|
||||
tfs := make([]storage.TagFilter, len(lfs))
|
||||
for i := range lfs {
|
||||
toTagFilter(&tfs[i], &lfs[i])
|
||||
}
|
||||
return tfs
|
||||
}
|
||||
|
||||
func toTagFilter(dst *storage.TagFilter, src *metricsql.LabelFilter) {
|
||||
if src.Label != "__name__" {
|
||||
dst.Key = []byte(src.Label)
|
||||
} else {
|
||||
// This is required for storage.Search.
|
||||
dst.Key = nil
|
||||
}
|
||||
dst.Value = []byte(src.Value)
|
||||
dst.IsRegexp = src.IsRegexp
|
||||
dst.IsNegative = src.IsNegative
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
||||
@ -80,47 +81,238 @@ func TestGetTimeError(t *testing.T) {
|
||||
f("292277025-08-18T07:12:54.999999998Z")
|
||||
}
|
||||
|
||||
// helper for tests
|
||||
func tfFromKV(k, v string) storage.TagFilter {
|
||||
return storage.TagFilter{
|
||||
Key: []byte(k),
|
||||
Value: []byte(v),
|
||||
func TestGetExtraTagFilters(t *testing.T) {
|
||||
httpReqWithForm := func(qs string) *http.Request {
|
||||
q, err := url.ParseQuery(qs)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetEnforcedTagFiltersFromRequest(t *testing.T) {
|
||||
httpReqWithForm := func(tfs []string) *http.Request {
|
||||
return &http.Request{
|
||||
Form: map[string][]string{
|
||||
"extra_label": tfs,
|
||||
},
|
||||
Form: q,
|
||||
}
|
||||
}
|
||||
f := func(t *testing.T, r *http.Request, want []storage.TagFilter, wantErr bool) {
|
||||
f := func(t *testing.T, r *http.Request, want []string, wantErr bool) {
|
||||
t.Helper()
|
||||
got, err := GetEnforcedTagFiltersFromRequest(r)
|
||||
result, err := GetExtraTagFilters(r)
|
||||
if (err != nil) != wantErr {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
got := tagFilterssToStrings(result)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("unxpected result for getEnforcedTagFiltersFromRequest, \ngot: %v,\n want: %v", want, got)
|
||||
t.Fatalf("unxpected result for GetExtraTagFilters\ngot: %s\nwant: %s", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
f(t, httpReqWithForm([]string{"label=value"}),
|
||||
[]storage.TagFilter{
|
||||
tfFromKV("label", "value"),
|
||||
},
|
||||
false)
|
||||
|
||||
f(t, httpReqWithForm([]string{"job=vmagent", "dc=gce"}),
|
||||
[]storage.TagFilter{tfFromKV("job", "vmagent"), tfFromKV("dc", "gce")},
|
||||
f(t, httpReqWithForm("extra_label=label=value"),
|
||||
[]string{`{label="value"}`},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm([]string{"bad_filter"}),
|
||||
f(t, httpReqWithForm("extra_label=job=vmagent&extra_label=dc=gce"),
|
||||
[]string{`{job="vmagent",dc="gce"}`},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_filters={foo="bar"}`),
|
||||
[]string{`{foo="bar"}`},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_filters={foo="bar"}&extra_filters[]={baz!~"aa",x=~"y"}`),
|
||||
[]string{
|
||||
`{foo="bar"}`,
|
||||
`{baz!~"aa",x=~"y"}`,
|
||||
},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_label=job=vmagent&extra_label=dc=gce&extra_filters={foo="bar"}`),
|
||||
[]string{`{foo="bar",job="vmagent",dc="gce"}`},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_label=job=vmagent&extra_label=dc=gce&extra_filters[]={foo="bar"}&extra_filters[]={x=~"y|z",a="b"}`),
|
||||
[]string{
|
||||
`{foo="bar",job="vmagent",dc="gce"}`,
|
||||
`{x=~"y|z",a="b",job="vmagent",dc="gce"}`,
|
||||
},
|
||||
false,
|
||||
)
|
||||
f(t, httpReqWithForm("extra_label=bad_filter"),
|
||||
nil,
|
||||
true,
|
||||
)
|
||||
f(t, &http.Request{},
|
||||
nil, false)
|
||||
f(t, httpReqWithForm(`extra_filters={bad_filter}`),
|
||||
nil,
|
||||
true,
|
||||
)
|
||||
f(t, httpReqWithForm(`extra_filters[]={bad_filter}`),
|
||||
nil,
|
||||
true,
|
||||
)
|
||||
f(t, httpReqWithForm(""),
|
||||
nil,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
func TestParseMetricSelectorSuccess(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
tfs, err := ParseMetricSelector(s)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error when parsing %q: %s", s, err)
|
||||
}
|
||||
if tfs == nil {
|
||||
t.Fatalf("expecting non-nil tfs when parsing %q", s)
|
||||
}
|
||||
}
|
||||
f("foo")
|
||||
f(":foo")
|
||||
f(" :fo:bar.baz")
|
||||
f(`a{}`)
|
||||
f(`{foo="bar"}`)
|
||||
f(`{:f:oo=~"bar.+"}`)
|
||||
f(`foo {bar != "baz"}`)
|
||||
f(` foo { bar !~ "^ddd(x+)$", a="ss", __name__="sffd"} `)
|
||||
f(`(foo)`)
|
||||
f(`\п\р\и\в\е\т{\ы="111"}`)
|
||||
}
|
||||
|
||||
func TestParseMetricSelectorError(t *testing.T) {
|
||||
f := func(s string) {
|
||||
t.Helper()
|
||||
tfs, err := ParseMetricSelector(s)
|
||||
if err == nil {
|
||||
t.Fatalf("expecting non-nil error when parsing %q", s)
|
||||
}
|
||||
if tfs != nil {
|
||||
t.Fatalf("expecting nil tfs when parsing %q", s)
|
||||
}
|
||||
}
|
||||
f("")
|
||||
f(`{}`)
|
||||
f(`foo bar`)
|
||||
f(`foo+bar`)
|
||||
f(`sum(bar)`)
|
||||
f(`x{y}`)
|
||||
f(`x{y+z}`)
|
||||
f(`foo[5m]`)
|
||||
f(`foo offset 5m`)
|
||||
}
|
||||
|
||||
func TestJoinTagFilterss(t *testing.T) {
|
||||
f := func(t *testing.T, src, etfs [][]storage.TagFilter, want []string) {
|
||||
t.Helper()
|
||||
result := JoinTagFilterss(src, etfs)
|
||||
got := tagFilterssToStrings(result)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("unxpected result for JoinTagFilterss\ngot: %s\nwant: %v", got, want)
|
||||
}
|
||||
}
|
||||
// Single tag filter
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
}, nil, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||
})
|
||||
// Miltiple tag filters
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, nil, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||
`{k5=~"v5"}`,
|
||||
})
|
||||
// Single extra filter
|
||||
f(t, nil, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||
})
|
||||
// Multiple extra filters
|
||||
f(t, nil, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`,
|
||||
`{k5=~"v5"}`,
|
||||
})
|
||||
// Single tag filter and a single extra filter
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
}, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k5=~"v5"}`,
|
||||
})
|
||||
// Multiple tag filters and a single extra filter
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k6=~"v6"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k6=~"v6"}`,
|
||||
`{k5=~"v5",k6=~"v6"}`,
|
||||
})
|
||||
// Single tag filter and multiple extra filters
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
}, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
mustParseMetricSelector(`{k6=~"v6"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k5=~"v5"}`,
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k6=~"v6"}`,
|
||||
})
|
||||
// Multiple tag filters and multiple extra filters
|
||||
f(t, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4"}`),
|
||||
mustParseMetricSelector(`{k5=~"v5"}`),
|
||||
}, [][]storage.TagFilter{
|
||||
mustParseMetricSelector(`{k6=~"v6"}`),
|
||||
mustParseMetricSelector(`{k7=~"v7"}`),
|
||||
}, []string{
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k6=~"v6"}`,
|
||||
`{k1="v1",k2=~"v2",k3!="v3",k4!~"v4",k7=~"v7"}`,
|
||||
`{k5=~"v5",k6=~"v6"}`,
|
||||
`{k5=~"v5",k7=~"v7"}`,
|
||||
})
|
||||
}
|
||||
|
||||
func mustParseMetricSelector(s string) []storage.TagFilter {
|
||||
tf, err := ParseMetricSelector(s)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("cannot parse %q: %w", s, err))
|
||||
}
|
||||
return tf
|
||||
}
|
||||
|
||||
func tagFilterssToStrings(tfss [][]storage.TagFilter) []string {
|
||||
var a []string
|
||||
for _, tfs := range tfss {
|
||||
a = append(a, tagFiltersToString(tfs))
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func tagFiltersToString(tfs []storage.TagFilter) string {
|
||||
b := []byte("{")
|
||||
for i, tf := range tfs {
|
||||
b = append(b, tf.Key...)
|
||||
if tf.IsNegative {
|
||||
if tf.IsRegexp {
|
||||
b = append(b, "!~"...)
|
||||
} else {
|
||||
b = append(b, "!="...)
|
||||
}
|
||||
} else {
|
||||
if tf.IsRegexp {
|
||||
b = append(b, "=~"...)
|
||||
} else {
|
||||
b = append(b, "="...)
|
||||
}
|
||||
}
|
||||
b = strconv.AppendQuote(b, string(tf.Value))
|
||||
if i+1 < len(tfs) {
|
||||
b = append(b, ',')
|
||||
}
|
||||
}
|
||||
b = append(b, '}')
|
||||
return string(b)
|
||||
}
|
||||
|
@ -6,6 +6,7 @@ sort: 15
|
||||
|
||||
## tip
|
||||
|
||||
* FEATURE: accept optional `extra_filters[]=series_selector` query args at Prometheus query APIs additionally to `extra_label` query args. This allows enforcing additional filters for all the Prometheus query APIs by using [vmgateway](https://docs.victoriametrics.com/vmgateway.html) or [vmauth](https://docs.victoriametrics.com/vmauth.html). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1863).
|
||||
* FEATURE: [vmauth](https://docs.victoriametrics.com/vmauth.html): allow specifying `http` and `https` urls in `-auth.config` command-line flag. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1898). Thanks for @TFM93 .
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow specifying `http` and `https` urls in the following command-line flags: `-promscrape.config`, `-remoteWrite.relabelConfig` and `-remoteWrite.urlRelabelConfig`.
|
||||
* FEATURE: vminsert: allow specifying `http` and `https` urls in `-relabelConfig` command-line flag.
|
||||
|
@ -517,9 +517,10 @@ All the Prometheus querying API handlers can be prepended with `/prometheus` pre
|
||||
### Prometheus querying API enhancements
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_label=user_id=123&query=<query>` would automatically add `{user_id="123"}` label filter to the given `<query>`. This functionality can be used
|
||||
for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting
|
||||
in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
`/api/v1/query_range?extra_label=user_id=123&extra_label=group_id=456&query=<query>` would automatically add `{user_id="123",group_id="456"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_filters[]=series_selector` query arg, which can be used for enforcing arbitrary label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_filters[]={env=~"prod|staging",user="xyz"}&query=<query>` would automatically add `{env=~"prod|staging",user="xyz"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_filters[]` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
||||
@ -556,8 +557,8 @@ VictoriaMetrics supports the following Graphite APIs, which are needed for [Grap
|
||||
|
||||
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg for all the Graphite APIs. This arg can be used for limiting the scope of time series
|
||||
visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics.
|
||||
VictoriaMetrics accepts optional query args: `extra_label=<label_name>=<label_value>` and `extra_filters[]=series_selector` query args for all the Graphite APIs. These args can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||
|
@ -521,9 +521,10 @@ All the Prometheus querying API handlers can be prepended with `/prometheus` pre
|
||||
### Prometheus querying API enhancements
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg, which can be used for enforcing additional label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_label=user_id=123&query=<query>` would automatically add `{user_id="123"}` label filter to the given `<query>`. This functionality can be used
|
||||
for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting
|
||||
in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
`/api/v1/query_range?extra_label=user_id=123&extra_label=group_id=456&query=<query>` would automatically add `{user_id="123",group_id="456"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_filters[]=series_selector` query arg, which can be used for enforcing arbitrary label filters for queries. For example,
|
||||
`/api/v1/query_range?extra_filters[]={env=~"prod|staging",user="xyz"}&query=<query>` would automatically add `{env=~"prod|staging",user="xyz"}` label filters to the given `<query>`. This functionality can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_filters[]` query args are automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
VictoriaMetrics accepts relative times in `time`, `start` and `end` query args additionally to unix timestamps and [RFC3339](https://www.ietf.org/rfc/rfc3339.txt).
|
||||
For example, the following query would return data for the last 30 minutes: `/api/v1/query_range?start=-30m&query=...`.
|
||||
@ -560,8 +561,8 @@ VictoriaMetrics supports the following Graphite APIs, which are needed for [Grap
|
||||
|
||||
All the Graphite handlers can be pre-pended with `/graphite` prefix. For example, both `/graphite/metrics/find` and `/metrics/find` should work.
|
||||
|
||||
VictoriaMetrics accepts optional `extra_label=<label_name>=<label_value>` query arg for all the Graphite APIs. This arg can be used for limiting the scope of time series
|
||||
visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics.
|
||||
VictoriaMetrics accepts optional query args: `extra_label=<label_name>=<label_value>` and `extra_filters[]=series_selector` query args for all the Graphite APIs. These args can be used for limiting the scope of time series visible to the given tenant. It is expected that the `extra_label` query arg is automatically set by auth proxy sitting in front of VictoriaMetrics. See [vmauth](https://docs.victoriametrics.com/vmauth.html) and [vmgateway](https://docs.victoriametrics.com/vmgateway.html) as examples of such proxies.
|
||||
|
||||
[Contact us](mailto:sales@victoriametrics.com) if you need assistance with such a proxy.
|
||||
|
||||
VictoriaMetrics supports `__graphite__` pseudo-label for filtering time series with Graphite-compatible filters in [MetricsQL](https://docs.victoriametrics.com/MetricsQL.html).
|
||||
|
Loading…
Reference in New Issue
Block a user