2020-05-10 18:58:17 +02:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2021-09-13 14:48:18 +02:00
|
|
|
"fmt"
|
2022-03-29 15:09:07 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
|
|
|
"reflect"
|
2020-05-10 18:58:17 +02:00
|
|
|
"sort"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2020-06-01 12:46:37 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/config"
|
2020-05-10 18:58:17 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmalert/notifier"
|
2022-02-11 15:17:00 +01:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
|
2020-05-10 18:58:17 +02:00
|
|
|
)
|
|
|
|
|
2020-09-03 00:00:55 +02:00
|
|
|
func init() {
|
|
|
|
// Disable rand sleep on group start during tests in order to speed up test execution.
|
|
|
|
// Rand sleep is needed only in prod code.
|
|
|
|
skipRandSleepOnGroupStart = true
|
|
|
|
}
|
|
|
|
|
2020-05-10 18:58:17 +02:00
|
|
|
func TestUpdateWith(t *testing.T) {
|
|
|
|
testCases := []struct {
|
|
|
|
name string
|
2020-06-15 21:15:47 +02:00
|
|
|
currentRules []config.Rule
|
|
|
|
newRules []config.Rule
|
2020-05-10 18:58:17 +02:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
"new rule",
|
2020-06-15 21:15:47 +02:00
|
|
|
nil,
|
|
|
|
[]config.Rule{{Alert: "bar"}},
|
2020-05-10 18:58:17 +02:00
|
|
|
},
|
|
|
|
{
|
2020-06-01 12:46:37 +02:00
|
|
|
"update alerting rule",
|
2020-06-15 21:15:47 +02:00
|
|
|
[]config.Rule{{
|
|
|
|
Alert: "foo",
|
|
|
|
Expr: "up > 0",
|
2022-02-11 15:17:00 +01:00
|
|
|
For: promutils.NewDuration(time.Second),
|
2020-05-10 18:58:17 +02:00
|
|
|
Labels: map[string]string{
|
|
|
|
"bar": "baz",
|
|
|
|
},
|
|
|
|
Annotations: map[string]string{
|
|
|
|
"summary": "{{ $value|humanize }}",
|
|
|
|
"description": "{{$labels}}",
|
|
|
|
},
|
|
|
|
}},
|
2020-06-15 21:15:47 +02:00
|
|
|
[]config.Rule{{
|
|
|
|
Alert: "foo",
|
|
|
|
Expr: "up > 10",
|
2022-02-11 15:17:00 +01:00
|
|
|
For: promutils.NewDuration(time.Second),
|
2020-05-10 18:58:17 +02:00
|
|
|
Labels: map[string]string{
|
|
|
|
"baz": "bar",
|
|
|
|
},
|
|
|
|
Annotations: map[string]string{
|
|
|
|
"summary": "none",
|
|
|
|
},
|
|
|
|
}},
|
|
|
|
},
|
2020-06-01 12:46:37 +02:00
|
|
|
{
|
|
|
|
"update recording rule",
|
2020-06-15 21:15:47 +02:00
|
|
|
[]config.Rule{{
|
|
|
|
Record: "foo",
|
|
|
|
Expr: "max(up)",
|
2020-06-01 12:46:37 +02:00
|
|
|
Labels: map[string]string{
|
|
|
|
"bar": "baz",
|
|
|
|
},
|
|
|
|
}},
|
2020-06-15 21:15:47 +02:00
|
|
|
[]config.Rule{{
|
|
|
|
Record: "foo",
|
|
|
|
Expr: "min(up)",
|
2020-06-01 12:46:37 +02:00
|
|
|
Labels: map[string]string{
|
|
|
|
"baz": "bar",
|
|
|
|
},
|
|
|
|
}},
|
|
|
|
},
|
2020-05-10 18:58:17 +02:00
|
|
|
{
|
|
|
|
"empty rule",
|
2020-06-15 21:15:47 +02:00
|
|
|
[]config.Rule{{Alert: "foo"}, {Record: "bar"}},
|
|
|
|
nil,
|
2020-05-10 18:58:17 +02:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"multiple rules",
|
2020-06-15 21:15:47 +02:00
|
|
|
[]config.Rule{
|
|
|
|
{Alert: "bar"},
|
|
|
|
{Alert: "baz"},
|
|
|
|
{Alert: "foo"},
|
2020-06-01 12:46:37 +02:00
|
|
|
},
|
2020-06-15 21:15:47 +02:00
|
|
|
[]config.Rule{
|
|
|
|
{Alert: "baz"},
|
|
|
|
{Record: "foo"},
|
2020-06-01 12:46:37 +02:00
|
|
|
},
|
2020-05-15 08:55:22 +02:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"replace rule",
|
2020-06-15 21:15:47 +02:00
|
|
|
[]config.Rule{{Alert: "foo1"}},
|
|
|
|
[]config.Rule{{Alert: "foo2"}},
|
2020-05-15 08:55:22 +02:00
|
|
|
},
|
|
|
|
{
|
|
|
|
"replace multiple rules",
|
2020-06-15 21:15:47 +02:00
|
|
|
[]config.Rule{
|
|
|
|
{Alert: "foo1"},
|
|
|
|
{Record: "foo2"},
|
|
|
|
{Alert: "foo3"},
|
2020-06-01 12:46:37 +02:00
|
|
|
},
|
2020-06-15 21:15:47 +02:00
|
|
|
[]config.Rule{
|
|
|
|
{Alert: "foo3"},
|
|
|
|
{Alert: "foo4"},
|
|
|
|
{Record: "foo5"},
|
2020-06-01 12:46:37 +02:00
|
|
|
},
|
2020-05-10 18:58:17 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(tc.name, func(t *testing.T) {
|
2020-06-15 21:15:47 +02:00
|
|
|
g := &Group{Name: "test"}
|
2021-04-28 22:41:15 +02:00
|
|
|
qb := &fakeQuerier{}
|
2020-06-15 21:15:47 +02:00
|
|
|
for _, r := range tc.currentRules {
|
|
|
|
r.ID = config.HashRule(r)
|
2021-04-28 22:41:15 +02:00
|
|
|
g.Rules = append(g.Rules, g.newRule(qb, r))
|
2020-06-15 21:15:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ng := &Group{Name: "test"}
|
|
|
|
for _, r := range tc.newRules {
|
|
|
|
r.ID = config.HashRule(r)
|
2021-04-28 22:41:15 +02:00
|
|
|
ng.Rules = append(ng.Rules, ng.newRule(qb, r))
|
2020-06-15 21:15:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
err := g.updateWith(ng)
|
2020-06-01 12:46:37 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-05-10 18:58:17 +02:00
|
|
|
|
|
|
|
if len(g.Rules) != len(tc.newRules) {
|
|
|
|
t.Fatalf("expected to have %d rules; got: %d",
|
|
|
|
len(g.Rules), len(tc.newRules))
|
|
|
|
}
|
2020-05-15 08:55:22 +02:00
|
|
|
sort.Slice(g.Rules, func(i, j int) bool {
|
2020-06-01 12:46:37 +02:00
|
|
|
return g.Rules[i].ID() < g.Rules[j].ID()
|
2020-05-15 08:55:22 +02:00
|
|
|
})
|
2020-06-15 21:15:47 +02:00
|
|
|
sort.Slice(ng.Rules, func(i, j int) bool {
|
|
|
|
return ng.Rules[i].ID() < ng.Rules[j].ID()
|
|
|
|
})
|
2020-05-10 18:58:17 +02:00
|
|
|
for i, r := range g.Rules {
|
2020-06-15 21:15:47 +02:00
|
|
|
got, want := r, ng.Rules[i]
|
2020-06-01 12:46:37 +02:00
|
|
|
if got.ID() != want.ID() {
|
|
|
|
t.Fatalf("expected to have rule %q; got %q", want, got)
|
2020-05-10 18:58:17 +02:00
|
|
|
}
|
2020-06-01 12:46:37 +02:00
|
|
|
if err := compareRules(t, got, want); err != nil {
|
|
|
|
t.Fatalf("comparsion error: %s", err)
|
2020-05-10 18:58:17 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestGroupStart(t *testing.T) {
|
|
|
|
// TODO: make parsing from string instead of file
|
2020-06-06 22:27:09 +02:00
|
|
|
groups, err := config.Parse([]string{"config/testdata/rules1-good.rules"}, true, true)
|
2020-05-10 18:58:17 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("failed to parse rules: %s", err)
|
|
|
|
}
|
2022-03-16 16:26:33 +01:00
|
|
|
|
2020-05-10 18:58:17 +02:00
|
|
|
fs := &fakeQuerier{}
|
2021-04-28 22:41:15 +02:00
|
|
|
fn := &fakeNotifier{}
|
|
|
|
|
2022-03-16 16:26:33 +01:00
|
|
|
const evalInterval = time.Millisecond
|
2021-04-28 22:41:15 +02:00
|
|
|
g := newGroup(groups[0], fs, evalInterval, map[string]string{"cluster": "east-1"})
|
|
|
|
g.Concurrency = 2
|
2020-05-10 18:58:17 +02:00
|
|
|
|
|
|
|
const inst1, inst2, job = "foo", "bar", "baz"
|
|
|
|
m1 := metricWithLabels(t, "instance", inst1, "job", job)
|
|
|
|
m2 := metricWithLabels(t, "instance", inst2, "job", job)
|
|
|
|
|
2020-06-01 12:46:37 +02:00
|
|
|
r := g.Rules[0].(*AlertingRule)
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 20:24:45 +02:00
|
|
|
alert1, err := r.newAlert(m1, nil, time.Now(), nil)
|
2020-05-10 18:58:17 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("faield to create alert: %s", err)
|
|
|
|
}
|
|
|
|
alert1.State = notifier.StateFiring
|
2020-11-09 23:27:32 +01:00
|
|
|
// add external label
|
|
|
|
alert1.Labels["cluster"] = "east-1"
|
|
|
|
// add rule labels - see config/testdata/rules1-good.rules
|
|
|
|
alert1.Labels["label"] = "bar"
|
|
|
|
alert1.Labels["host"] = inst1
|
2021-10-22 11:30:38 +02:00
|
|
|
// add service labels
|
|
|
|
alert1.Labels[alertNameLabel] = alert1.Name
|
|
|
|
alert1.Labels[alertGroupNameLabel] = g.Name
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 20:24:45 +02:00
|
|
|
alert1.ID = hash(alert1.Labels)
|
2020-05-10 18:58:17 +02:00
|
|
|
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 20:24:45 +02:00
|
|
|
alert2, err := r.newAlert(m2, nil, time.Now(), nil)
|
2020-05-10 18:58:17 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("faield to create alert: %s", err)
|
|
|
|
}
|
|
|
|
alert2.State = notifier.StateFiring
|
2020-11-09 23:27:32 +01:00
|
|
|
// add external label
|
|
|
|
alert2.Labels["cluster"] = "east-1"
|
|
|
|
// add rule labels - see config/testdata/rules1-good.rules
|
|
|
|
alert2.Labels["label"] = "bar"
|
|
|
|
alert2.Labels["host"] = inst2
|
2021-10-22 11:30:38 +02:00
|
|
|
// add service labels
|
|
|
|
alert2.Labels[alertNameLabel] = alert2.Name
|
|
|
|
alert2.Labels[alertGroupNameLabel] = g.Name
|
vmalert: fix labels and annotations processing for alerts (#2403)
To improve compatibility with Prometheus alerting the order of
templates processing has changed.
Before, vmalert did all labels processing beforehand. It meant
all extra labels (such as `alertname`, `alertgroup` or rule labels)
were available in templating. All collisions were resolved in favour
of extra labels.
In Prometheus, only labels from the received metric are available in
templating, so no collisions are possible.
This change makes vmalert's behaviour similar to Prometheus.
For example, consider alerting rule which is triggered by time series
with `alertname` label. In vmalert, this label would be overriden
by alerting rule's name everywhere: for alert labels, for annotations, etc.
In Prometheus, it would be overriden for alert's labels only, but in annotations
the original label value would be available.
See more details here https://github.com/prometheus/compliance/issues/80
Signed-off-by: hagen1778 <roman@victoriametrics.com>
2022-04-06 20:24:45 +02:00
|
|
|
alert2.ID = hash(alert2.Labels)
|
2020-05-10 18:58:17 +02:00
|
|
|
|
|
|
|
finished := make(chan struct{})
|
|
|
|
fs.add(m1)
|
|
|
|
fs.add(m2)
|
|
|
|
go func() {
|
2022-02-02 13:11:41 +01:00
|
|
|
g.start(context.Background(), func() []notifier.Notifier { return []notifier.Notifier{fn} }, nil)
|
2020-05-10 18:58:17 +02:00
|
|
|
close(finished)
|
|
|
|
}()
|
|
|
|
|
|
|
|
// wait for multiple evals
|
|
|
|
time.Sleep(20 * evalInterval)
|
|
|
|
|
|
|
|
gotAlerts := fn.getAlerts()
|
|
|
|
expectedAlerts := []notifier.Alert{*alert1, *alert2}
|
|
|
|
compareAlerts(t, expectedAlerts, gotAlerts)
|
|
|
|
|
2022-03-16 16:26:33 +01:00
|
|
|
gotAlertsNum := fn.getCounter()
|
|
|
|
if gotAlertsNum < len(expectedAlerts)*2 {
|
|
|
|
t.Fatalf("expected to receive at least %d alerts; got %d instead",
|
|
|
|
len(expectedAlerts)*2, gotAlertsNum)
|
|
|
|
}
|
|
|
|
|
2020-05-10 18:58:17 +02:00
|
|
|
// reset previous data
|
|
|
|
fs.reset()
|
|
|
|
// and set only one datapoint for response
|
|
|
|
fs.add(m1)
|
|
|
|
|
|
|
|
// wait for multiple evals
|
|
|
|
time.Sleep(20 * evalInterval)
|
|
|
|
|
|
|
|
gotAlerts = fn.getAlerts()
|
2022-03-29 15:09:07 +02:00
|
|
|
alert2.State = notifier.StateInactive
|
|
|
|
expectedAlerts = []notifier.Alert{*alert1, *alert2}
|
2020-05-10 18:58:17 +02:00
|
|
|
compareAlerts(t, expectedAlerts, gotAlerts)
|
|
|
|
|
|
|
|
g.close()
|
|
|
|
<-finished
|
|
|
|
}
|
2021-09-13 14:48:18 +02:00
|
|
|
|
|
|
|
func TestResolveDuration(t *testing.T) {
|
|
|
|
testCases := []struct {
|
|
|
|
groupInterval time.Duration
|
|
|
|
maxDuration time.Duration
|
2022-03-16 16:26:33 +01:00
|
|
|
resendDelay time.Duration
|
2021-09-13 14:48:18 +02:00
|
|
|
expected time.Duration
|
|
|
|
}{
|
2022-03-16 16:26:33 +01:00
|
|
|
{time.Minute, 0, 0, 4 * time.Minute},
|
|
|
|
{time.Minute, 0, 2 * time.Minute, 8 * time.Minute},
|
|
|
|
{time.Minute, 4 * time.Minute, 4 * time.Minute, 4 * time.Minute},
|
|
|
|
{2 * time.Minute, time.Minute, 2 * time.Minute, time.Minute},
|
|
|
|
{time.Minute, 2 * time.Minute, 1 * time.Minute, 2 * time.Minute},
|
|
|
|
{2 * time.Minute, 0, 1 * time.Minute, 8 * time.Minute},
|
|
|
|
{0, 0, 0, 0},
|
2021-09-13 14:48:18 +02:00
|
|
|
}
|
2022-03-16 16:26:33 +01:00
|
|
|
|
2021-09-13 14:48:18 +02:00
|
|
|
for _, tc := range testCases {
|
|
|
|
t.Run(fmt.Sprintf("%v-%v-%v", tc.groupInterval, tc.expected, tc.maxDuration), func(t *testing.T) {
|
2022-03-29 15:09:07 +02:00
|
|
|
got := getResolveDuration(tc.groupInterval, tc.resendDelay, tc.maxDuration)
|
2021-09-13 14:48:18 +02:00
|
|
|
if got != tc.expected {
|
|
|
|
t.Errorf("expected to have %v; got %v", tc.expected, got)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2022-03-29 15:09:07 +02:00
|
|
|
|
|
|
|
func TestGetStaleSeries(t *testing.T) {
|
|
|
|
ts := time.Now()
|
|
|
|
e := &executor{
|
|
|
|
previouslySentSeriesToRW: make(map[uint64]map[string][]prompbmarshal.Label),
|
|
|
|
}
|
|
|
|
f := func(rule Rule, labels, expLabels [][]prompbmarshal.Label) {
|
|
|
|
t.Helper()
|
|
|
|
var tss []prompbmarshal.TimeSeries
|
|
|
|
for _, l := range labels {
|
|
|
|
tss = append(tss, newTimeSeriesPB([]float64{1}, []int64{ts.Unix()}, l))
|
|
|
|
}
|
|
|
|
staleS := e.getStaleSeries(rule, tss, ts)
|
|
|
|
if staleS == nil && expLabels == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if len(staleS) != len(expLabels) {
|
|
|
|
t.Fatalf("expected to get %d stale series, got %d",
|
|
|
|
len(expLabels), len(staleS))
|
|
|
|
}
|
|
|
|
for i, exp := range expLabels {
|
|
|
|
got := staleS[i]
|
|
|
|
if !reflect.DeepEqual(exp, got.Labels) {
|
|
|
|
t.Fatalf("expected to get labels: \n%v;\ngot instead: \n%v",
|
|
|
|
exp, got.Labels)
|
|
|
|
}
|
|
|
|
if len(got.Samples) != 1 {
|
|
|
|
t.Fatalf("expected to have 1 sample; got %d", len(got.Samples))
|
|
|
|
}
|
|
|
|
if !decimal.IsStaleNaN(got.Samples[0].Value) {
|
|
|
|
t.Fatalf("expected sample value to be %v; got %v", decimal.StaleNaN, got.Samples[0].Value)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// warn: keep in mind, that executor holds the state, so sequence of f calls matters
|
|
|
|
|
|
|
|
// single series
|
|
|
|
f(&AlertingRule{RuleID: 1},
|
|
|
|
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "foo")},
|
|
|
|
nil)
|
|
|
|
f(&AlertingRule{RuleID: 1},
|
|
|
|
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "foo")},
|
|
|
|
nil)
|
|
|
|
f(&AlertingRule{RuleID: 1},
|
|
|
|
nil,
|
|
|
|
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "foo")})
|
|
|
|
f(&AlertingRule{RuleID: 1},
|
|
|
|
nil,
|
|
|
|
nil)
|
|
|
|
|
|
|
|
// multiple series
|
|
|
|
f(&AlertingRule{RuleID: 1},
|
|
|
|
[][]prompbmarshal.Label{
|
|
|
|
toPromLabels(t, "__name__", "job:foo", "job", "foo"),
|
|
|
|
toPromLabels(t, "__name__", "job:foo", "job", "bar"),
|
|
|
|
},
|
|
|
|
nil)
|
|
|
|
f(&AlertingRule{RuleID: 1},
|
|
|
|
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "bar")},
|
|
|
|
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "foo")})
|
|
|
|
f(&AlertingRule{RuleID: 1},
|
|
|
|
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "bar")},
|
|
|
|
nil)
|
|
|
|
f(&AlertingRule{RuleID: 1},
|
|
|
|
nil,
|
|
|
|
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "bar")})
|
|
|
|
|
|
|
|
// multiple rules and series
|
|
|
|
f(&AlertingRule{RuleID: 1},
|
|
|
|
[][]prompbmarshal.Label{
|
|
|
|
toPromLabels(t, "__name__", "job:foo", "job", "foo"),
|
|
|
|
toPromLabels(t, "__name__", "job:foo", "job", "bar"),
|
|
|
|
},
|
|
|
|
nil)
|
|
|
|
f(&AlertingRule{RuleID: 2},
|
|
|
|
[][]prompbmarshal.Label{
|
|
|
|
toPromLabels(t, "__name__", "job:foo", "job", "foo"),
|
|
|
|
toPromLabels(t, "__name__", "job:foo", "job", "bar"),
|
|
|
|
},
|
|
|
|
nil)
|
|
|
|
f(&AlertingRule{RuleID: 1},
|
|
|
|
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "bar")},
|
|
|
|
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "foo")})
|
|
|
|
f(&AlertingRule{RuleID: 1},
|
|
|
|
[][]prompbmarshal.Label{toPromLabels(t, "__name__", "job:foo", "job", "bar")},
|
|
|
|
nil)
|
|
|
|
}
|