mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-12 12:46:23 +01:00
vmalert: support extra_filter_labels
setting per-group (#1319)
The new setting `extra_filter_labels` may be assigned to group. If it is, then all rules within a group will automatically filter for configured labels. The feature is well-described here https://docs.victoriametrics.com#prometheus-querying-api-enhancements New setting is compatible only with VM datasource.
This commit is contained in:
parent
71ff7ee18d
commit
beee24ecee
@ -85,6 +85,12 @@ name: <string>
|
|||||||
# By default "prometheus" rule type is used.
|
# By default "prometheus" rule type is used.
|
||||||
[ type: <string> ]
|
[ type: <string> ]
|
||||||
|
|
||||||
|
# Optional list of label filters applied to every rule's
|
||||||
|
# request withing a group. Is compatible only with VM datasource.
|
||||||
|
# See more details at https://docs.victoriametrics.com#prometheus-querying-api-enhancements
|
||||||
|
extra_filter_labels:
|
||||||
|
[ <labelname>: <labelvalue> ... ]
|
||||||
|
|
||||||
rules:
|
rules:
|
||||||
[ - <rule> ... ]
|
[ - <rule> ... ]
|
||||||
```
|
```
|
||||||
|
@ -65,6 +65,7 @@ func newAlertingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rule
|
|||||||
q: qb.BuildWithParams(datasource.QuerierParams{
|
q: qb.BuildWithParams(datasource.QuerierParams{
|
||||||
DataSourceType: &cfg.Type,
|
DataSourceType: &cfg.Type,
|
||||||
EvaluationInterval: group.Interval,
|
EvaluationInterval: group.Interval,
|
||||||
|
ExtraLabels: group.ExtraFilterLabels,
|
||||||
}),
|
}),
|
||||||
alerts: make(map[uint64]*notifier.Alert),
|
alerts: make(map[uint64]*notifier.Alert),
|
||||||
metrics: &alertingRuleMetrics{},
|
metrics: &alertingRuleMetrics{},
|
||||||
@ -250,6 +251,7 @@ func (ar *AlertingRule) UpdateWith(r Rule) error {
|
|||||||
ar.For = nr.For
|
ar.For = nr.For
|
||||||
ar.Labels = nr.Labels
|
ar.Labels = nr.Labels
|
||||||
ar.Annotations = nr.Annotations
|
ar.Annotations = nr.Annotations
|
||||||
|
ar.q = nr.q
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,6 +29,10 @@ type Group struct {
|
|||||||
Interval time.Duration `yaml:"interval,omitempty"`
|
Interval time.Duration `yaml:"interval,omitempty"`
|
||||||
Rules []Rule `yaml:"rules"`
|
Rules []Rule `yaml:"rules"`
|
||||||
Concurrency int `yaml:"concurrency"`
|
Concurrency int `yaml:"concurrency"`
|
||||||
|
// ExtraFilterLabels is a list label filters applied to every rule
|
||||||
|
// request withing a group. Is compatible only with VM datasources.
|
||||||
|
// See https://docs.victoriametrics.com#prometheus-querying-api-enhancements
|
||||||
|
ExtraFilterLabels map[string]string `yaml:"extra_filter_labels"`
|
||||||
// Checksum stores the hash of yaml definition for this group.
|
// Checksum stores the hash of yaml definition for this group.
|
||||||
// May be used to detect any changes like rules re-ordering etc.
|
// May be used to detect any changes like rules re-ordering etc.
|
||||||
Checksum string
|
Checksum string
|
||||||
|
@ -2,6 +2,8 @@ groups:
|
|||||||
- name: TestGroup
|
- name: TestGroup
|
||||||
interval: 2s
|
interval: 2s
|
||||||
concurrency: 2
|
concurrency: 2
|
||||||
|
extra_filter_labels:
|
||||||
|
job: victoriametrics
|
||||||
rules:
|
rules:
|
||||||
- alert: Conns
|
- alert: Conns
|
||||||
expr: sum(vm_tcplistener_conns) by(instance) > 1
|
expr: sum(vm_tcplistener_conns) by(instance) > 1
|
||||||
|
@ -85,6 +85,7 @@ type VMStorage struct {
|
|||||||
|
|
||||||
dataSourceType Type
|
dataSourceType Type
|
||||||
evaluationInterval time.Duration
|
evaluationInterval time.Duration
|
||||||
|
extraLabels []string
|
||||||
}
|
}
|
||||||
|
|
||||||
const queryPath = "/api/v1/query"
|
const queryPath = "/api/v1/query"
|
||||||
@ -97,6 +98,8 @@ const graphitePrefix = "/graphite"
|
|||||||
type QuerierParams struct {
|
type QuerierParams struct {
|
||||||
DataSourceType *Type
|
DataSourceType *Type
|
||||||
EvaluationInterval time.Duration
|
EvaluationInterval time.Duration
|
||||||
|
// see https://docs.victoriametrics.com/#prometheus-querying-api-enhancements
|
||||||
|
ExtraLabels map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clone makes clone of VMStorage, shares http client.
|
// Clone makes clone of VMStorage, shares http client.
|
||||||
@ -119,6 +122,9 @@ func (s *VMStorage) ApplyParams(params QuerierParams) *VMStorage {
|
|||||||
s.dataSourceType = *params.DataSourceType
|
s.dataSourceType = *params.DataSourceType
|
||||||
}
|
}
|
||||||
s.evaluationInterval = params.EvaluationInterval
|
s.evaluationInterval = params.EvaluationInterval
|
||||||
|
for k, v := range params.ExtraLabels {
|
||||||
|
s.extraLabels = append(s.extraLabels, fmt.Sprintf("%s=%s", k, v))
|
||||||
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -222,6 +228,9 @@ func (s *VMStorage) setPrometheusReqParams(r *http.Request, query string, timest
|
|||||||
if s.roundDigits != "" {
|
if s.roundDigits != "" {
|
||||||
q.Set("round_digits", s.roundDigits)
|
q.Set("round_digits", s.roundDigits)
|
||||||
}
|
}
|
||||||
|
for _, l := range s.extraLabels {
|
||||||
|
q.Add("extra_label", l)
|
||||||
|
}
|
||||||
r.URL.RawQuery = q.Encode()
|
r.URL.RawQuery = q.Encode()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -253,6 +253,19 @@ func TestPrepareReq(t *testing.T) {
|
|||||||
checkEqualString(t, exp, r.URL.RawQuery)
|
checkEqualString(t, exp, r.URL.RawQuery)
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"extra labels",
|
||||||
|
&VMStorage{
|
||||||
|
extraLabels: []string{
|
||||||
|
"env=prod",
|
||||||
|
"query=es=cape",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
func(t *testing.T, r *http.Request) {
|
||||||
|
exp := fmt.Sprintf("extra_label=env%%3Dprod&extra_label=query%%3Des%%3Dcape&query=%s&time=%d", query, timestamp.Unix())
|
||||||
|
checkEqualString(t, exp, r.URL.RawQuery)
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
|
@ -18,14 +18,15 @@ import (
|
|||||||
|
|
||||||
// Group is an entity for grouping rules
|
// Group is an entity for grouping rules
|
||||||
type Group struct {
|
type Group struct {
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
Name string
|
Name string
|
||||||
File string
|
File string
|
||||||
Rules []Rule
|
Rules []Rule
|
||||||
Type datasource.Type
|
Type datasource.Type
|
||||||
Interval time.Duration
|
Interval time.Duration
|
||||||
Concurrency int
|
Concurrency int
|
||||||
Checksum string
|
Checksum string
|
||||||
|
ExtraFilterLabels map[string]string
|
||||||
|
|
||||||
doneCh chan struct{}
|
doneCh chan struct{}
|
||||||
finishedCh chan struct{}
|
finishedCh chan struct{}
|
||||||
@ -51,15 +52,17 @@ func newGroupMetrics(name, file string) *groupMetrics {
|
|||||||
|
|
||||||
func newGroup(cfg config.Group, qb datasource.QuerierBuilder, defaultInterval time.Duration, labels map[string]string) *Group {
|
func newGroup(cfg config.Group, qb datasource.QuerierBuilder, defaultInterval time.Duration, labels map[string]string) *Group {
|
||||||
g := &Group{
|
g := &Group{
|
||||||
Type: cfg.Type,
|
Type: cfg.Type,
|
||||||
Name: cfg.Name,
|
Name: cfg.Name,
|
||||||
File: cfg.File,
|
File: cfg.File,
|
||||||
Interval: cfg.Interval,
|
Interval: cfg.Interval,
|
||||||
Concurrency: cfg.Concurrency,
|
Concurrency: cfg.Concurrency,
|
||||||
Checksum: cfg.Checksum,
|
Checksum: cfg.Checksum,
|
||||||
doneCh: make(chan struct{}),
|
ExtraFilterLabels: cfg.ExtraFilterLabels,
|
||||||
finishedCh: make(chan struct{}),
|
|
||||||
updateCh: make(chan *Group),
|
doneCh: make(chan struct{}),
|
||||||
|
finishedCh: make(chan struct{}),
|
||||||
|
updateCh: make(chan *Group),
|
||||||
}
|
}
|
||||||
g.metrics = newGroupMetrics(g.Name, g.File)
|
g.metrics = newGroupMetrics(g.Name, g.File)
|
||||||
if g.Interval == 0 {
|
if g.Interval == 0 {
|
||||||
@ -115,6 +118,8 @@ func (g *Group) Restore(ctx context.Context, qb datasource.QuerierBuilder, lookb
|
|||||||
if rr.For < 1 {
|
if rr.For < 1 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
// ignore g.ExtraFilterLabels on purpose, so it
|
||||||
|
// won't affect the restore procedure.
|
||||||
q := qb.BuildWithParams(datasource.QuerierParams{})
|
q := qb.BuildWithParams(datasource.QuerierParams{})
|
||||||
if err := rr.Restore(ctx, q, lookback, labels); err != nil {
|
if err := rr.Restore(ctx, q, lookback, labels); err != nil {
|
||||||
return fmt.Errorf("error while restoring rule %q: %w", rule, err)
|
return fmt.Errorf("error while restoring rule %q: %w", rule, err)
|
||||||
@ -163,6 +168,7 @@ func (g *Group) updateWith(newGroup *Group) error {
|
|||||||
}
|
}
|
||||||
g.Type = newGroup.Type
|
g.Type = newGroup.Type
|
||||||
g.Concurrency = newGroup.Concurrency
|
g.Concurrency = newGroup.Concurrency
|
||||||
|
g.ExtraFilterLabels = newGroup.ExtraFilterLabels
|
||||||
g.Checksum = newGroup.Checksum
|
g.Checksum = newGroup.Checksum
|
||||||
g.Rules = newRules
|
g.Rules = newRules
|
||||||
return nil
|
return nil
|
||||||
|
@ -147,12 +147,14 @@ func (g *Group) toAPI() APIGroup {
|
|||||||
|
|
||||||
ag := APIGroup{
|
ag := APIGroup{
|
||||||
// encode as string to avoid rounding
|
// encode as string to avoid rounding
|
||||||
ID: fmt.Sprintf("%d", g.ID()),
|
ID: fmt.Sprintf("%d", g.ID()),
|
||||||
Name: g.Name,
|
|
||||||
Type: g.Type.String(),
|
Name: g.Name,
|
||||||
File: g.File,
|
Type: g.Type.String(),
|
||||||
Interval: g.Interval.String(),
|
File: g.File,
|
||||||
Concurrency: g.Concurrency,
|
Interval: g.Interval.String(),
|
||||||
|
Concurrency: g.Concurrency,
|
||||||
|
ExtraFilterLabels: g.ExtraFilterLabels,
|
||||||
}
|
}
|
||||||
for _, r := range g.Rules {
|
for _, r := range g.Rules {
|
||||||
switch v := r.(type) {
|
switch v := r.(type) {
|
||||||
|
@ -66,6 +66,7 @@ func newRecordingRule(qb datasource.QuerierBuilder, group *Group, cfg config.Rul
|
|||||||
q: qb.BuildWithParams(datasource.QuerierParams{
|
q: qb.BuildWithParams(datasource.QuerierParams{
|
||||||
DataSourceType: &cfg.Type,
|
DataSourceType: &cfg.Type,
|
||||||
EvaluationInterval: group.Interval,
|
EvaluationInterval: group.Interval,
|
||||||
|
ExtraLabels: group.ExtraFilterLabels,
|
||||||
}),
|
}),
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,8 +152,6 @@ func (rr *RecordingRule) toTimeSeries(m datasource.Metric, timestamp time.Time)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// UpdateWith copies all significant fields.
|
// UpdateWith copies all significant fields.
|
||||||
// alerts state isn't copied since
|
|
||||||
// it should be updated in next 2 Execs
|
|
||||||
func (rr *RecordingRule) UpdateWith(r Rule) error {
|
func (rr *RecordingRule) UpdateWith(r Rule) error {
|
||||||
nr, ok := r.(*RecordingRule)
|
nr, ok := r.(*RecordingRule)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -160,6 +159,7 @@ func (rr *RecordingRule) UpdateWith(r Rule) error {
|
|||||||
}
|
}
|
||||||
rr.Expr = nr.Expr
|
rr.Expr = nr.Expr
|
||||||
rr.Labels = nr.Labels
|
rr.Labels = nr.Labels
|
||||||
|
rr.q = nr.q
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,14 +20,15 @@ type APIAlert struct {
|
|||||||
|
|
||||||
// APIGroup represents Group for WEB view
|
// APIGroup represents Group for WEB view
|
||||||
type APIGroup struct {
|
type APIGroup struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
File string `json:"file"`
|
File string `json:"file"`
|
||||||
Interval string `json:"interval"`
|
Interval string `json:"interval"`
|
||||||
Concurrency int `json:"concurrency"`
|
Concurrency int `json:"concurrency"`
|
||||||
AlertingRules []APIAlertingRule `json:"alerting_rules"`
|
ExtraFilterLabels map[string]string `json:"extra_filter_labels"`
|
||||||
RecordingRules []APIRecordingRule `json:"recording_rules"`
|
AlertingRules []APIAlertingRule `json:"alerting_rules"`
|
||||||
|
RecordingRules []APIRecordingRule `json:"recording_rules"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// APIAlertingRule represents AlertingRule for WEB view
|
// APIAlertingRule represents AlertingRule for WEB view
|
||||||
|
Loading…
Reference in New Issue
Block a user