2019-05-22 23:16:55 +02:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"math/rand"
|
|
|
|
"os"
|
|
|
|
"reflect"
|
2020-11-15 23:42:27 +01:00
|
|
|
"sort"
|
2019-05-22 23:16:55 +02:00
|
|
|
"strings"
|
|
|
|
"testing"
|
|
|
|
"testing/quick"
|
|
|
|
"time"
|
2019-09-24 20:10:22 +02:00
|
|
|
|
2022-11-07 13:04:06 +01:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
2019-09-24 20:10:22 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
|
2019-05-22 23:16:55 +02:00
|
|
|
)
|
|
|
|
|
2021-12-14 18:51:46 +01:00
|
|
|
func TestReplaceAlternateRegexpsWithGraphiteWildcards(t *testing.T) {
|
|
|
|
f := func(q, resultExpected string) {
|
|
|
|
t.Helper()
|
|
|
|
result := replaceAlternateRegexpsWithGraphiteWildcards([]byte(q))
|
|
|
|
if string(result) != resultExpected {
|
|
|
|
t.Fatalf("unexpected result for %s\ngot\n%s\nwant\n%s", q, result, resultExpected)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
f("", "")
|
|
|
|
f("foo", "foo")
|
|
|
|
f("foo(bar", "foo(bar")
|
|
|
|
f("foo.(bar|baz", "foo.(bar|baz")
|
|
|
|
f("foo.(bar).x", "foo.{bar}.x")
|
|
|
|
f("foo.(bar|baz).*.{x,y}", "foo.{bar,baz}.*.{x,y}")
|
|
|
|
f("foo.(bar|baz).*.{x,y}(z|aa)", "foo.{bar,baz}.*.{x,y}{z,aa}")
|
|
|
|
f("foo(.*)", "foo*")
|
|
|
|
}
|
|
|
|
|
2021-02-02 23:24:05 +01:00
|
|
|
func TestGetRegexpForGraphiteNodeQuery(t *testing.T) {
|
|
|
|
f := func(q, expectedRegexp string) {
|
|
|
|
t.Helper()
|
2021-02-03 19:12:17 +01:00
|
|
|
re, err := getRegexpForGraphiteQuery(q)
|
2021-02-02 23:24:05 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error for query=%q: %s", q, err)
|
|
|
|
}
|
|
|
|
reStr := re.String()
|
|
|
|
if reStr != expectedRegexp {
|
|
|
|
t.Fatalf("unexpected regexp for query %q; got %q want %q", q, reStr, expectedRegexp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
f(``, `^$`)
|
|
|
|
f(`*`, `^[^.]*$`)
|
|
|
|
f(`foo.`, `^foo\.$`)
|
|
|
|
f(`foo.bar`, `^foo\.bar$`)
|
|
|
|
f(`{foo,b*ar,b[a-z]}`, `^(?:foo|b[^.]*ar|b[a-z])$`)
|
|
|
|
f(`[-a-zx.]`, `^[-a-zx.]$`)
|
|
|
|
f(`**`, `^[^.]*[^.]*$`)
|
|
|
|
f(`a*[de]{x,y}z`, `^a[^.]*[de](?:x|y)z$`)
|
2021-02-03 19:12:17 +01:00
|
|
|
f(`foo{bar`, `^foo\{bar$`)
|
|
|
|
f(`foo{ba,r`, `^foo\{ba,r$`)
|
|
|
|
f(`foo[bar`, `^foo\[bar$`)
|
|
|
|
f(`foo{bar}`, `^foobar$`)
|
|
|
|
f(`foo{bar,,b{{a,b*},z},[x-y]*z}a`, `^foo(?:bar||b(?:(?:a|b[^.]*)|z)|[x-y][^.]*z)a$`)
|
2021-02-02 23:24:05 +01:00
|
|
|
}
|
|
|
|
|
2019-11-11 12:21:05 +01:00
|
|
|
func TestDateMetricIDCacheSerial(t *testing.T) {
|
|
|
|
c := newDateMetricIDCache()
|
|
|
|
if err := testDateMetricIDCache(c, false); err != nil {
|
|
|
|
t.Fatalf("unexpected error: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestDateMetricIDCacheConcurrent(t *testing.T) {
|
|
|
|
c := newDateMetricIDCache()
|
|
|
|
ch := make(chan error, 5)
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
go func() {
|
|
|
|
ch <- testDateMetricIDCache(c, true)
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
select {
|
|
|
|
case err := <-ch:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error: %s", err)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Second * 5):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func testDateMetricIDCache(c *dateMetricIDCache, concurrent bool) error {
|
|
|
|
type dmk struct {
|
|
|
|
date uint64
|
|
|
|
metricID uint64
|
|
|
|
}
|
|
|
|
m := make(map[dmk]bool)
|
|
|
|
for i := 0; i < 1e5; i++ {
|
|
|
|
date := uint64(i) % 3
|
|
|
|
metricID := uint64(i) % 1237
|
|
|
|
if !concurrent && c.Has(date, metricID) {
|
|
|
|
if !m[dmk{date, metricID}] {
|
|
|
|
return fmt.Errorf("c.Has(%d, %d) must return false, but returned true", date, metricID)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
c.Set(date, metricID)
|
|
|
|
m[dmk{date, metricID}] = true
|
|
|
|
if !concurrent && !c.Has(date, metricID) {
|
|
|
|
return fmt.Errorf("c.Has(%d, %d) must return true, but returned false", date, metricID)
|
|
|
|
}
|
|
|
|
if i%11234 == 0 {
|
2021-06-03 15:19:58 +02:00
|
|
|
c.mu.Lock()
|
|
|
|
c.syncLocked()
|
|
|
|
c.mu.Unlock()
|
2019-11-11 12:21:05 +01:00
|
|
|
}
|
|
|
|
if i%34323 == 0 {
|
|
|
|
c.Reset()
|
|
|
|
m = make(map[dmk]bool)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify fast path after sync.
|
|
|
|
for i := 0; i < 1e5; i++ {
|
|
|
|
date := uint64(i) % 3
|
|
|
|
metricID := uint64(i) % 123
|
|
|
|
c.Set(date, metricID)
|
|
|
|
}
|
2021-06-03 15:19:58 +02:00
|
|
|
c.mu.Lock()
|
|
|
|
c.syncLocked()
|
|
|
|
c.mu.Unlock()
|
2019-11-11 12:21:05 +01:00
|
|
|
for i := 0; i < 1e5; i++ {
|
|
|
|
date := uint64(i) % 3
|
|
|
|
metricID := uint64(i) % 123
|
|
|
|
if !concurrent && !c.Has(date, metricID) {
|
|
|
|
return fmt.Errorf("c.Has(%d, %d) must return true after sync", date, metricID)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify c.Reset
|
|
|
|
if n := c.EntriesCount(); !concurrent && n < 123 {
|
|
|
|
return fmt.Errorf("c.EntriesCount must return at least 123; returned %d", n)
|
|
|
|
}
|
|
|
|
c.Reset()
|
|
|
|
if n := c.EntriesCount(); !concurrent && n > 0 {
|
|
|
|
return fmt.Errorf("c.EntriesCount must return 0 after reset; returned %d", n)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-09 18:06:53 +02:00
|
|
|
func TestUpdateCurrHourMetricIDs(t *testing.T) {
|
|
|
|
newStorage := func() *Storage {
|
|
|
|
var s Storage
|
|
|
|
s.currHourMetricIDs.Store(&hourMetricIDs{})
|
|
|
|
s.prevHourMetricIDs.Store(&hourMetricIDs{})
|
|
|
|
return &s
|
|
|
|
}
|
2019-10-31 14:50:58 +01:00
|
|
|
t.Run("empty_pending_metric_ids_stale_curr_hour", func(t *testing.T) {
|
2019-06-09 18:06:53 +02:00
|
|
|
s := newStorage()
|
2022-11-07 13:04:06 +01:00
|
|
|
hour := fasttime.UnixHour()
|
2019-06-09 18:06:53 +02:00
|
|
|
hmOrig := &hourMetricIDs{
|
2019-09-24 20:10:22 +02:00
|
|
|
m: &uint64set.Set{},
|
2019-06-09 18:06:53 +02:00
|
|
|
hour: 123,
|
|
|
|
}
|
2019-09-24 20:10:22 +02:00
|
|
|
hmOrig.m.Add(12)
|
|
|
|
hmOrig.m.Add(34)
|
2019-06-09 18:06:53 +02:00
|
|
|
s.currHourMetricIDs.Store(hmOrig)
|
2022-11-07 13:04:06 +01:00
|
|
|
s.updateCurrHourMetricIDs(hour)
|
2019-06-09 18:06:53 +02:00
|
|
|
hmCurr := s.currHourMetricIDs.Load().(*hourMetricIDs)
|
|
|
|
if hmCurr.hour != hour {
|
2019-11-11 23:16:42 +01:00
|
|
|
// It is possible new hour occurred. Update the hour and verify it again.
|
2019-06-09 18:06:53 +02:00
|
|
|
hour = uint64(timestampFromTime(time.Now())) / msecPerHour
|
|
|
|
if hmCurr.hour != hour {
|
|
|
|
t.Fatalf("unexpected hmCurr.hour; got %d; want %d", hmCurr.hour, hour)
|
|
|
|
}
|
|
|
|
}
|
2019-09-24 20:10:22 +02:00
|
|
|
if hmCurr.m.Len() != 0 {
|
|
|
|
t.Fatalf("unexpected length of hm.m; got %d; want %d", hmCurr.m.Len(), 0)
|
2019-06-09 18:06:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
hmPrev := s.prevHourMetricIDs.Load().(*hourMetricIDs)
|
|
|
|
if !reflect.DeepEqual(hmPrev, hmOrig) {
|
|
|
|
t.Fatalf("unexpected hmPrev; got %v; want %v", hmPrev, hmOrig)
|
|
|
|
}
|
|
|
|
|
2019-11-08 12:16:40 +01:00
|
|
|
if len(s.pendingHourEntries) != 0 {
|
|
|
|
t.Fatalf("unexpected len(s.pendingHourEntries); got %d; want %d", len(s.pendingHourEntries), 0)
|
2019-06-09 18:06:53 +02:00
|
|
|
}
|
|
|
|
})
|
2019-10-31 14:50:58 +01:00
|
|
|
t.Run("empty_pending_metric_ids_valid_curr_hour", func(t *testing.T) {
|
2019-06-09 18:06:53 +02:00
|
|
|
s := newStorage()
|
2022-11-07 13:04:06 +01:00
|
|
|
hour := fasttime.UnixHour()
|
2019-06-09 18:06:53 +02:00
|
|
|
hmOrig := &hourMetricIDs{
|
2019-09-24 20:10:22 +02:00
|
|
|
m: &uint64set.Set{},
|
2019-06-09 18:06:53 +02:00
|
|
|
hour: hour,
|
|
|
|
}
|
2019-09-24 20:10:22 +02:00
|
|
|
hmOrig.m.Add(12)
|
|
|
|
hmOrig.m.Add(34)
|
2019-06-09 18:06:53 +02:00
|
|
|
s.currHourMetricIDs.Store(hmOrig)
|
2022-11-07 13:04:06 +01:00
|
|
|
s.updateCurrHourMetricIDs(hour)
|
2019-06-09 18:06:53 +02:00
|
|
|
hmCurr := s.currHourMetricIDs.Load().(*hourMetricIDs)
|
|
|
|
if hmCurr.hour != hour {
|
2019-11-11 23:16:42 +01:00
|
|
|
// It is possible new hour occurred. Update the hour and verify it again.
|
2019-06-09 18:06:53 +02:00
|
|
|
hour = uint64(timestampFromTime(time.Now())) / msecPerHour
|
|
|
|
if hmCurr.hour != hour {
|
|
|
|
t.Fatalf("unexpected hmCurr.hour; got %d; want %d", hmCurr.hour, hour)
|
|
|
|
}
|
|
|
|
// Do not run other checks, since they may fail.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(hmCurr, hmOrig) {
|
|
|
|
t.Fatalf("unexpected hmCurr; got %v; want %v", hmCurr, hmOrig)
|
|
|
|
}
|
|
|
|
|
|
|
|
hmPrev := s.prevHourMetricIDs.Load().(*hourMetricIDs)
|
|
|
|
hmEmpty := &hourMetricIDs{}
|
|
|
|
if !reflect.DeepEqual(hmPrev, hmEmpty) {
|
|
|
|
t.Fatalf("unexpected hmPrev; got %v; want %v", hmPrev, hmEmpty)
|
|
|
|
}
|
2019-11-08 12:16:40 +01:00
|
|
|
if len(s.pendingHourEntries) != 0 {
|
|
|
|
t.Fatalf("unexpected len(s.pendingHourEntries); got %d; want %d", len(s.pendingHourEntries), 0)
|
2019-06-09 18:06:53 +02:00
|
|
|
}
|
|
|
|
})
|
|
|
|
t.Run("nonempty_pending_metric_ids_stale_curr_hour", func(t *testing.T) {
|
|
|
|
s := newStorage()
|
2019-11-08 12:16:40 +01:00
|
|
|
s.pendingHourEntries = []pendingHourMetricIDEntry{
|
2019-10-31 14:50:58 +01:00
|
|
|
{AccountID: 123, ProjectID: 431, MetricID: 343},
|
|
|
|
{AccountID: 123, ProjectID: 431, MetricID: 32424},
|
|
|
|
{AccountID: 1, ProjectID: 2, MetricID: 8293432},
|
|
|
|
}
|
|
|
|
mExpected := &uint64set.Set{}
|
2019-11-08 12:16:40 +01:00
|
|
|
for _, e := range s.pendingHourEntries {
|
2019-10-31 14:50:58 +01:00
|
|
|
mExpected.Add(e.MetricID)
|
|
|
|
}
|
|
|
|
byTenantExpected := make(map[accountProjectKey]*uint64set.Set)
|
2019-11-08 12:16:40 +01:00
|
|
|
for _, e := range s.pendingHourEntries {
|
2019-10-31 14:50:58 +01:00
|
|
|
k := accountProjectKey{
|
|
|
|
AccountID: e.AccountID,
|
|
|
|
ProjectID: e.ProjectID,
|
|
|
|
}
|
|
|
|
x := byTenantExpected[k]
|
|
|
|
if x == nil {
|
|
|
|
x = &uint64set.Set{}
|
|
|
|
byTenantExpected[k] = x
|
|
|
|
}
|
|
|
|
x.Add(e.MetricID)
|
|
|
|
}
|
2019-06-09 18:06:53 +02:00
|
|
|
|
2022-11-07 13:04:06 +01:00
|
|
|
hour := fasttime.UnixHour()
|
2019-06-09 18:06:53 +02:00
|
|
|
hmOrig := &hourMetricIDs{
|
2019-09-24 20:10:22 +02:00
|
|
|
m: &uint64set.Set{},
|
2019-06-09 18:06:53 +02:00
|
|
|
hour: 123,
|
|
|
|
}
|
2019-09-24 20:10:22 +02:00
|
|
|
hmOrig.m.Add(12)
|
|
|
|
hmOrig.m.Add(34)
|
2019-06-09 18:06:53 +02:00
|
|
|
s.currHourMetricIDs.Store(hmOrig)
|
2022-11-07 13:04:06 +01:00
|
|
|
s.updateCurrHourMetricIDs(hour)
|
2019-06-09 18:06:53 +02:00
|
|
|
hmCurr := s.currHourMetricIDs.Load().(*hourMetricIDs)
|
|
|
|
if hmCurr.hour != hour {
|
2019-11-11 23:16:42 +01:00
|
|
|
// It is possible new hour occurred. Update the hour and verify it again.
|
2019-06-09 18:06:53 +02:00
|
|
|
hour = uint64(timestampFromTime(time.Now())) / msecPerHour
|
|
|
|
if hmCurr.hour != hour {
|
|
|
|
t.Fatalf("unexpected hmCurr.hour; got %d; want %d", hmCurr.hour, hour)
|
|
|
|
}
|
|
|
|
}
|
2019-11-08 12:16:40 +01:00
|
|
|
if !hmCurr.m.Equal(mExpected) {
|
2019-10-31 14:50:58 +01:00
|
|
|
t.Fatalf("unexpected hm.m; got %v; want %v", hmCurr.m, mExpected)
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(hmCurr.byTenant, byTenantExpected) {
|
|
|
|
t.Fatalf("unexpected hmPrev.byTenant; got %v; want %v", hmCurr.byTenant, byTenantExpected)
|
2019-06-09 18:06:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
hmPrev := s.prevHourMetricIDs.Load().(*hourMetricIDs)
|
|
|
|
if !reflect.DeepEqual(hmPrev, hmOrig) {
|
|
|
|
t.Fatalf("unexpected hmPrev; got %v; want %v", hmPrev, hmOrig)
|
|
|
|
}
|
2019-11-08 12:16:40 +01:00
|
|
|
if len(s.pendingHourEntries) != 0 {
|
|
|
|
t.Fatalf("unexpected len(s.pendingHourEntries); got %d; want %d", len(s.pendingHourEntries), 0)
|
2019-06-09 18:06:53 +02:00
|
|
|
}
|
|
|
|
})
|
|
|
|
t.Run("nonempty_pending_metric_ids_valid_curr_hour", func(t *testing.T) {
|
|
|
|
s := newStorage()
|
2019-11-08 12:16:40 +01:00
|
|
|
s.pendingHourEntries = []pendingHourMetricIDEntry{
|
2019-10-31 14:50:58 +01:00
|
|
|
{AccountID: 123, ProjectID: 431, MetricID: 343},
|
|
|
|
{AccountID: 123, ProjectID: 431, MetricID: 32424},
|
|
|
|
{AccountID: 1, ProjectID: 2, MetricID: 8293432},
|
|
|
|
}
|
|
|
|
mExpected := &uint64set.Set{}
|
2019-11-08 12:16:40 +01:00
|
|
|
for _, e := range s.pendingHourEntries {
|
2019-10-31 14:50:58 +01:00
|
|
|
mExpected.Add(e.MetricID)
|
|
|
|
}
|
|
|
|
byTenantExpected := make(map[accountProjectKey]*uint64set.Set)
|
2019-11-08 12:16:40 +01:00
|
|
|
for _, e := range s.pendingHourEntries {
|
2019-10-31 14:50:58 +01:00
|
|
|
k := accountProjectKey{
|
|
|
|
AccountID: e.AccountID,
|
|
|
|
ProjectID: e.ProjectID,
|
|
|
|
}
|
|
|
|
x := byTenantExpected[k]
|
|
|
|
if x == nil {
|
|
|
|
x = &uint64set.Set{}
|
|
|
|
byTenantExpected[k] = x
|
|
|
|
}
|
|
|
|
x.Add(e.MetricID)
|
|
|
|
}
|
2019-06-09 18:06:53 +02:00
|
|
|
|
2022-11-07 13:04:06 +01:00
|
|
|
hour := fasttime.UnixHour()
|
2019-06-09 18:06:53 +02:00
|
|
|
hmOrig := &hourMetricIDs{
|
2019-09-24 20:10:22 +02:00
|
|
|
m: &uint64set.Set{},
|
2019-06-09 18:06:53 +02:00
|
|
|
hour: hour,
|
|
|
|
}
|
2019-09-24 20:10:22 +02:00
|
|
|
hmOrig.m.Add(12)
|
|
|
|
hmOrig.m.Add(34)
|
2019-06-09 18:06:53 +02:00
|
|
|
s.currHourMetricIDs.Store(hmOrig)
|
2022-11-07 13:04:06 +01:00
|
|
|
s.updateCurrHourMetricIDs(hour)
|
2019-06-09 18:06:53 +02:00
|
|
|
hmCurr := s.currHourMetricIDs.Load().(*hourMetricIDs)
|
|
|
|
if hmCurr.hour != hour {
|
2019-11-11 23:16:42 +01:00
|
|
|
// It is possible new hour occurred. Update the hour and verify it again.
|
2019-06-09 18:06:53 +02:00
|
|
|
hour = uint64(timestampFromTime(time.Now())) / msecPerHour
|
|
|
|
if hmCurr.hour != hour {
|
|
|
|
t.Fatalf("unexpected hmCurr.hour; got %d; want %d", hmCurr.hour, hour)
|
|
|
|
}
|
|
|
|
// Do not run other checks, since they may fail.
|
|
|
|
return
|
|
|
|
}
|
2019-10-31 14:50:58 +01:00
|
|
|
m := mExpected.Clone()
|
2020-01-15 11:12:46 +01:00
|
|
|
hmOrig.m.ForEach(func(part []uint64) bool {
|
|
|
|
for _, metricID := range part {
|
|
|
|
m.Add(metricID)
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
})
|
2019-11-08 12:16:40 +01:00
|
|
|
if !hmCurr.m.Equal(m) {
|
2019-06-09 18:06:53 +02:00
|
|
|
t.Fatalf("unexpected hm.m; got %v; want %v", hmCurr.m, m)
|
|
|
|
}
|
2019-10-31 14:50:58 +01:00
|
|
|
if !reflect.DeepEqual(hmCurr.byTenant, byTenantExpected) {
|
|
|
|
t.Fatalf("unexpected hmPrev.byTenant; got %v; want %v", hmCurr.byTenant, byTenantExpected)
|
|
|
|
}
|
2019-06-09 18:06:53 +02:00
|
|
|
|
|
|
|
hmPrev := s.prevHourMetricIDs.Load().(*hourMetricIDs)
|
|
|
|
hmEmpty := &hourMetricIDs{}
|
|
|
|
if !reflect.DeepEqual(hmPrev, hmEmpty) {
|
|
|
|
t.Fatalf("unexpected hmPrev; got %v; want %v", hmPrev, hmEmpty)
|
|
|
|
}
|
2019-11-08 12:16:40 +01:00
|
|
|
if len(s.pendingHourEntries) != 0 {
|
|
|
|
t.Fatalf("unexpected s.pendingHourEntries.Len(); got %d; want %d", len(s.pendingHourEntries), 0)
|
2019-06-09 18:06:53 +02:00
|
|
|
}
|
|
|
|
})
|
2022-11-07 13:04:06 +01:00
|
|
|
t.Run("nonempty_pending_metric_ids_from_previous_hour_new_day", func(t *testing.T) {
|
|
|
|
s := newStorage()
|
|
|
|
|
|
|
|
hour := fasttime.UnixHour()
|
|
|
|
hour -= hour % 24
|
|
|
|
|
|
|
|
s.pendingHourEntries = []pendingHourMetricIDEntry{
|
|
|
|
{AccountID: 123, ProjectID: 431, MetricID: 343},
|
|
|
|
{AccountID: 123, ProjectID: 431, MetricID: 32424},
|
|
|
|
{AccountID: 1, ProjectID: 2, MetricID: 8293432},
|
|
|
|
}
|
|
|
|
|
|
|
|
hmOrig := &hourMetricIDs{
|
|
|
|
m: &uint64set.Set{},
|
|
|
|
hour: hour - 1,
|
|
|
|
}
|
|
|
|
s.currHourMetricIDs.Store(hmOrig)
|
|
|
|
s.updateCurrHourMetricIDs(hour)
|
|
|
|
hmCurr := s.currHourMetricIDs.Load().(*hourMetricIDs)
|
|
|
|
if hmCurr.hour != hour {
|
|
|
|
t.Fatalf("unexpected hmCurr.hour; got %d; want %d", hmCurr.hour, hour)
|
|
|
|
}
|
|
|
|
if hmCurr.m.Len() != 0 {
|
|
|
|
t.Fatalf("unexpected non-empty hmCurr.m; got %v", hmCurr.m.AppendTo(nil))
|
|
|
|
}
|
|
|
|
byTenantExpected := make(map[accountProjectKey]*uint64set.Set)
|
|
|
|
if !reflect.DeepEqual(hmCurr.byTenant, byTenantExpected) {
|
|
|
|
t.Fatalf("unexpected hmPrev.byTenant; got %v; want %v", hmCurr.byTenant, byTenantExpected)
|
|
|
|
}
|
|
|
|
hmPrev := s.prevHourMetricIDs.Load().(*hourMetricIDs)
|
|
|
|
if !reflect.DeepEqual(hmPrev, hmOrig) {
|
|
|
|
t.Fatalf("unexpected hmPrev; got %v; want %v", hmPrev, hmOrig)
|
|
|
|
}
|
|
|
|
if len(s.pendingHourEntries) != 0 {
|
|
|
|
t.Fatalf("unexpected s.pendingHourEntries.Len(); got %d; want %d", len(s.pendingHourEntries), 0)
|
|
|
|
}
|
|
|
|
})
|
2019-06-09 18:06:53 +02:00
|
|
|
}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
func TestMetricRowMarshalUnmarshal(t *testing.T) {
|
|
|
|
var buf []byte
|
|
|
|
typ := reflect.TypeOf(&MetricRow{})
|
|
|
|
rnd := rand.New(rand.NewSource(1))
|
|
|
|
|
|
|
|
for i := 0; i < 1000; i++ {
|
|
|
|
v, ok := quick.Value(typ, rnd)
|
|
|
|
if !ok {
|
|
|
|
t.Fatalf("cannot create random MetricRow via quick.Value")
|
|
|
|
}
|
|
|
|
mr1 := v.Interface().(*MetricRow)
|
|
|
|
if mr1 == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = mr1.Marshal(buf[:0])
|
|
|
|
var mr2 MetricRow
|
2021-05-08 16:55:44 +02:00
|
|
|
tail, err := mr2.UnmarshalX(buf)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("cannot unmarshal mr1=%s: %s", mr1, err)
|
|
|
|
}
|
|
|
|
if len(tail) > 0 {
|
|
|
|
t.Fatalf("non-empty tail returned after MetricRow.Unmarshal for mr1=%s", mr1)
|
|
|
|
}
|
|
|
|
if mr1.MetricNameRaw == nil {
|
|
|
|
mr1.MetricNameRaw = []byte{}
|
|
|
|
}
|
|
|
|
if mr2.MetricNameRaw == nil {
|
|
|
|
mr2.MetricNameRaw = []byte{}
|
|
|
|
}
|
|
|
|
if !reflect.DeepEqual(mr1, &mr2) {
|
|
|
|
t.Fatalf("mr1 should match mr2; got\nmr1=%s\nmr2=%s", mr1, &mr2)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestNextRetentionDuration(t *testing.T) {
|
2021-02-15 13:30:12 +01:00
|
|
|
for retentionMonths := float64(0.1); retentionMonths < 120; retentionMonths += 0.3 {
|
|
|
|
d := nextRetentionDuration(int64(retentionMonths * msecsPerMonth))
|
2019-12-02 13:42:26 +01:00
|
|
|
if d <= 0 {
|
|
|
|
currTime := time.Now().UTC()
|
2019-05-22 23:16:55 +02:00
|
|
|
nextTime := time.Now().UTC().Add(d)
|
2021-12-20 16:39:43 +01:00
|
|
|
t.Fatalf("unexpected retention duration for retentionMonths=%f; got %s; must be %s + %f months", retentionMonths, nextTime, currTime, retentionMonths)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestStorageOpenClose(t *testing.T) {
|
|
|
|
path := "TestStorageOpenClose"
|
|
|
|
for i := 0; i < 10; i++ {
|
2021-05-20 13:15:19 +02:00
|
|
|
s, err := OpenStorage(path, -1, 1e5, 1e6)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("cannot open storage: %s", err)
|
|
|
|
}
|
|
|
|
s.MustClose()
|
|
|
|
}
|
|
|
|
if err := os.RemoveAll(path); err != nil {
|
|
|
|
t.Fatalf("cannot remove %q: %s", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestStorageOpenMultipleTimes(t *testing.T) {
|
|
|
|
path := "TestStorageOpenMultipleTimes"
|
2021-05-20 13:15:19 +02:00
|
|
|
s1, err := OpenStorage(path, -1, 0, 0)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("cannot open storage the first time: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < 10; i++ {
|
2021-05-20 13:15:19 +02:00
|
|
|
s2, err := OpenStorage(path, -1, 0, 0)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err == nil {
|
|
|
|
s2.MustClose()
|
|
|
|
t.Fatalf("expecting non-nil error when opening already opened storage")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s1.MustClose()
|
|
|
|
if err := os.RemoveAll(path); err != nil {
|
|
|
|
t.Fatalf("cannot remove %q: %s", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestStorageRandTimestamps(t *testing.T) {
|
|
|
|
path := "TestStorageRandTimestamps"
|
2020-10-20 13:29:26 +02:00
|
|
|
retentionMsecs := int64(60 * msecsPerMonth)
|
2021-05-20 13:15:19 +02:00
|
|
|
s, err := OpenStorage(path, retentionMsecs, 0, 0)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("cannot open storage: %s", err)
|
|
|
|
}
|
|
|
|
t.Run("serial", func(t *testing.T) {
|
|
|
|
for i := 0; i < 3; i++ {
|
|
|
|
if err := testStorageRandTimestamps(s); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
s.MustClose()
|
2021-05-20 13:15:19 +02:00
|
|
|
s, err = OpenStorage(path, retentionMsecs, 0, 0)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
})
|
|
|
|
t.Run("concurrent", func(t *testing.T) {
|
|
|
|
ch := make(chan error, 3)
|
|
|
|
for i := 0; i < cap(ch); i++ {
|
|
|
|
go func() {
|
|
|
|
var err error
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
err = testStorageRandTimestamps(s)
|
|
|
|
}
|
|
|
|
ch <- err
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
for i := 0; i < cap(ch); i++ {
|
|
|
|
select {
|
|
|
|
case err := <-ch:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
case <-time.After(time.Second * 10):
|
|
|
|
t.Fatal("timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
s.MustClose()
|
|
|
|
if err := os.RemoveAll(path); err != nil {
|
|
|
|
t.Fatalf("cannot remove %q: %s", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func testStorageRandTimestamps(s *Storage) error {
|
|
|
|
const rowsPerAdd = 1e3
|
|
|
|
const addsCount = 2
|
|
|
|
typ := reflect.TypeOf(int64(0))
|
|
|
|
rnd := rand.New(rand.NewSource(1))
|
|
|
|
|
|
|
|
for i := 0; i < addsCount; i++ {
|
|
|
|
var mrs []MetricRow
|
|
|
|
var mn MetricName
|
|
|
|
mn.Tags = []Tag{
|
|
|
|
{[]byte("job"), []byte("webservice")},
|
|
|
|
{[]byte("instance"), []byte("1.2.3.4")},
|
|
|
|
}
|
|
|
|
for j := 0; j < rowsPerAdd; j++ {
|
|
|
|
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", rand.Intn(100)))
|
|
|
|
metricNameRaw := mn.marshalRaw(nil)
|
|
|
|
timestamp := int64(rnd.NormFloat64() * 1e12)
|
|
|
|
if j%2 == 0 {
|
|
|
|
ts, ok := quick.Value(typ, rnd)
|
|
|
|
if !ok {
|
|
|
|
return fmt.Errorf("cannot create random timestamp via quick.Value")
|
|
|
|
}
|
|
|
|
timestamp = ts.Interface().(int64)
|
|
|
|
}
|
|
|
|
value := rnd.NormFloat64() * 1e12
|
|
|
|
|
|
|
|
mr := MetricRow{
|
|
|
|
MetricNameRaw: metricNameRaw,
|
|
|
|
Timestamp: timestamp,
|
|
|
|
Value: value,
|
|
|
|
}
|
|
|
|
mrs = append(mrs, mr)
|
|
|
|
}
|
|
|
|
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
|
2019-08-25 14:28:32 +02:00
|
|
|
errStr := err.Error()
|
|
|
|
if !strings.Contains(errStr, "too big timestamp") && !strings.Contains(errStr, "too small timestamp") {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("unexpected error when adding mrs: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the storage contains rows.
|
|
|
|
var m Metrics
|
|
|
|
s.UpdateMetrics(&m)
|
|
|
|
if m.TableMetrics.SmallRowsCount == 0 {
|
|
|
|
return fmt.Errorf("expecting at least one row in the table")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-07-05 22:56:31 +02:00
|
|
|
func TestStorageDeleteSeries(t *testing.T) {
|
|
|
|
path := "TestStorageDeleteSeries"
|
2021-05-20 13:15:19 +02:00
|
|
|
s, err := OpenStorage(path, 0, 0, 0)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("cannot open storage: %s", err)
|
|
|
|
}
|
|
|
|
|
2022-06-12 03:32:13 +02:00
|
|
|
// Verify no label names exist
|
|
|
|
lns, err := s.SearchLabelNamesWithFiltersOnTimeRange(nil, 0, 0, nil, TimeRange{}, 1e5, 1e9, noDeadline)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
2022-06-12 03:32:13 +02:00
|
|
|
t.Fatalf("error in SearchLabelNamesWithFiltersOnTimeRange() at the start: %s", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-06-12 03:32:13 +02:00
|
|
|
if len(lns) != 0 {
|
|
|
|
t.Fatalf("found non-empty tag keys at the start: %q", lns)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("serial", func(t *testing.T) {
|
|
|
|
for i := 0; i < 3; i++ {
|
2022-07-05 22:56:31 +02:00
|
|
|
if err = testStorageDeleteSeries(s, 0); err != nil {
|
2019-11-09 22:17:42 +01:00
|
|
|
t.Fatalf("unexpected error on iteration %d: %s", i, err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Re-open the storage in order to check how deleted metricIDs
|
|
|
|
// are persisted.
|
|
|
|
s.MustClose()
|
2021-05-20 13:15:19 +02:00
|
|
|
s, err = OpenStorage(path, 0, 0, 0)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
2019-11-09 22:17:42 +01:00
|
|
|
t.Fatalf("cannot open storage after closing on iteration %d: %s", i, err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("concurrent", func(t *testing.T) {
|
|
|
|
ch := make(chan error, 3)
|
|
|
|
for i := 0; i < cap(ch); i++ {
|
|
|
|
go func(workerNum int) {
|
|
|
|
var err error
|
|
|
|
for j := 0; j < 2; j++ {
|
2022-07-05 22:56:31 +02:00
|
|
|
err = testStorageDeleteSeries(s, workerNum)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ch <- err
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
for i := 0; i < cap(ch); i++ {
|
|
|
|
select {
|
|
|
|
case err := <-ch:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error: %s", err)
|
|
|
|
}
|
|
|
|
case <-time.After(30 * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Verify no more tag keys exist
|
2022-06-12 03:32:13 +02:00
|
|
|
lns, err = s.SearchLabelNamesWithFiltersOnTimeRange(nil, 0, 0, nil, TimeRange{}, 1e5, 1e9, noDeadline)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
2022-06-12 03:32:13 +02:00
|
|
|
t.Fatalf("error in SearchLabelNamesWithFiltersOnTimeRange after the test: %s", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-06-12 03:32:13 +02:00
|
|
|
if len(lns) != 0 {
|
|
|
|
t.Fatalf("found non-empty tag keys after the test: %q", lns)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
s.MustClose()
|
|
|
|
if err := os.RemoveAll(path); err != nil {
|
|
|
|
t.Fatalf("cannot remove %q: %s", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-05 22:56:31 +02:00
|
|
|
func testStorageDeleteSeries(s *Storage, workerNum int) error {
|
2019-05-22 23:16:55 +02:00
|
|
|
const rowsPerMetric = 100
|
|
|
|
const metricsCount = 30
|
|
|
|
|
|
|
|
workerTag := []byte(fmt.Sprintf("workerTag_%d", workerNum))
|
2019-05-22 23:23:23 +02:00
|
|
|
accountID := uint32(workerNum)
|
|
|
|
projectID := uint32(123)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-06-12 03:32:13 +02:00
|
|
|
lnsAll := make(map[string]bool)
|
|
|
|
lnsAll["__name__"] = true
|
2019-05-22 23:16:55 +02:00
|
|
|
for i := 0; i < metricsCount; i++ {
|
|
|
|
var mrs []MetricRow
|
|
|
|
var mn MetricName
|
2019-05-22 23:23:23 +02:00
|
|
|
mn.AccountID = accountID
|
|
|
|
mn.ProjectID = projectID
|
2019-05-22 23:16:55 +02:00
|
|
|
job := fmt.Sprintf("job_%d_%d", i, workerNum)
|
|
|
|
instance := fmt.Sprintf("instance_%d_%d", i, workerNum)
|
|
|
|
mn.Tags = []Tag{
|
|
|
|
{[]byte("job"), []byte(job)},
|
|
|
|
{[]byte("instance"), []byte(instance)},
|
|
|
|
{workerTag, []byte("foobar")},
|
|
|
|
}
|
|
|
|
for i := range mn.Tags {
|
2022-06-12 03:32:13 +02:00
|
|
|
lnsAll[string(mn.Tags[i].Key)] = true
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d_%d", i, workerNum))
|
|
|
|
metricNameRaw := mn.marshalRaw(nil)
|
|
|
|
|
|
|
|
for j := 0; j < rowsPerMetric; j++ {
|
|
|
|
timestamp := rand.Int63n(1e10)
|
|
|
|
value := rand.NormFloat64() * 1e6
|
|
|
|
|
|
|
|
mr := MetricRow{
|
|
|
|
MetricNameRaw: metricNameRaw,
|
|
|
|
Timestamp: timestamp,
|
|
|
|
Value: value,
|
|
|
|
}
|
|
|
|
mrs = append(mrs, mr)
|
|
|
|
}
|
|
|
|
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("unexpected error when adding mrs: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
}
|
2020-11-11 13:40:27 +01:00
|
|
|
s.DebugFlush()
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
// Verify tag values exist
|
2022-06-12 03:32:13 +02:00
|
|
|
tvs, err := s.SearchLabelValuesWithFiltersOnTimeRange(nil, accountID, projectID, string(workerTag), nil, TimeRange{}, 1e5, 1e9, noDeadline)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
2022-06-12 03:32:13 +02:00
|
|
|
return fmt.Errorf("error in SearchLabelValuesWithFiltersOnTimeRange before metrics removal: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
if len(tvs) == 0 {
|
|
|
|
return fmt.Errorf("unexpected empty number of tag values for workerTag")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify tag keys exist
|
2022-06-12 03:32:13 +02:00
|
|
|
lns, err := s.SearchLabelNamesWithFiltersOnTimeRange(nil, accountID, projectID, nil, TimeRange{}, 1e5, 1e9, noDeadline)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
2022-06-12 03:32:13 +02:00
|
|
|
return fmt.Errorf("error in SearchLabelNamesWithFiltersOnTimeRange before metrics removal: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-06-12 03:32:13 +02:00
|
|
|
if err := checkLabelNames(lns, lnsAll); err != nil {
|
|
|
|
return fmt.Errorf("unexpected label names before metrics removal: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
var sr Search
|
|
|
|
tr := TimeRange{
|
|
|
|
MinTimestamp: 0,
|
|
|
|
MaxTimestamp: 2e10,
|
|
|
|
}
|
|
|
|
metricBlocksCount := func(tfs *TagFilters) int {
|
2020-04-27 07:13:41 +02:00
|
|
|
// Verify the number of blocks
|
2019-05-22 23:16:55 +02:00
|
|
|
n := 0
|
2022-06-01 01:31:40 +02:00
|
|
|
sr.Init(nil, s, []*TagFilters{tfs}, tr, 1e5, noDeadline)
|
2019-05-22 23:16:55 +02:00
|
|
|
for sr.NextMetricBlock() {
|
|
|
|
n++
|
|
|
|
}
|
|
|
|
sr.MustClose()
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
for i := 0; i < metricsCount; i++ {
|
2019-05-22 23:23:23 +02:00
|
|
|
tfs := NewTagFilters(accountID, projectID)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err := tfs.Add(nil, []byte("metric_.+"), false, true); err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot add regexp tag filter: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
job := fmt.Sprintf("job_%d_%d", i, workerNum)
|
|
|
|
if err := tfs.Add([]byte("job"), []byte(job), false, false); err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot add job tag filter: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
if n := metricBlocksCount(tfs); n == 0 {
|
|
|
|
return fmt.Errorf("expecting non-zero number of metric blocks for tfs=%s", tfs)
|
|
|
|
}
|
2022-07-05 22:56:31 +02:00
|
|
|
deletedCount, err := s.DeleteSeries(nil, []*TagFilters{tfs})
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot delete metrics: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
if deletedCount == 0 {
|
2019-11-09 17:48:58 +01:00
|
|
|
return fmt.Errorf("expecting non-zero number of deleted metrics on iteration %d", i)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
if n := metricBlocksCount(tfs); n != 0 {
|
2022-07-05 22:56:31 +02:00
|
|
|
return fmt.Errorf("expecting zero metric blocks after DeleteSeries call for tfs=%s; got %d blocks", tfs, n)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try deleting empty tfss
|
2022-07-05 22:56:31 +02:00
|
|
|
deletedCount, err = s.DeleteSeries(nil, nil)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot delete empty tfss: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
if deletedCount != 0 {
|
|
|
|
return fmt.Errorf("expecting zero deleted metrics for empty tfss; got %d", deletedCount)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure no more metrics left for the given workerNum
|
2019-05-22 23:23:23 +02:00
|
|
|
tfs := NewTagFilters(accountID, projectID)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err := tfs.Add(nil, []byte(fmt.Sprintf("metric_.+_%d", workerNum)), false, true); err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot add regexp tag filter for worker metrics: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
if n := metricBlocksCount(tfs); n != 0 {
|
|
|
|
return fmt.Errorf("expecting zero metric blocks after deleting all the metrics; got %d blocks", n)
|
|
|
|
}
|
2022-06-12 03:32:13 +02:00
|
|
|
tvs, err = s.SearchLabelValuesWithFiltersOnTimeRange(nil, accountID, projectID, string(workerTag), nil, TimeRange{}, 1e5, 1e9, noDeadline)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
2022-06-12 03:32:13 +02:00
|
|
|
return fmt.Errorf("error in SearchLabelValuesWithFiltersOnTimeRange after all the metrics are removed: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
if len(tvs) != 0 {
|
|
|
|
return fmt.Errorf("found non-empty tag values for %q after metrics removal: %q", workerTag, tvs)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-06-12 03:32:13 +02:00
|
|
|
func checkLabelNames(lns []string, lnsExpected map[string]bool) error {
|
|
|
|
if len(lns) < len(lnsExpected) {
|
|
|
|
return fmt.Errorf("unexpected number of label names found; got %d; want at least %d; lns=%q, lnsExpected=%v", len(lns), len(lnsExpected), lns, lnsExpected)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-06-12 03:32:13 +02:00
|
|
|
hasItem := func(s string, lns []string) bool {
|
|
|
|
for _, labelName := range lns {
|
|
|
|
if s == labelName {
|
2019-05-22 23:16:55 +02:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
2022-06-12 03:32:13 +02:00
|
|
|
for labelName := range lnsExpected {
|
|
|
|
if !hasItem(labelName, lns) {
|
|
|
|
return fmt.Errorf("cannot find %q in label names %q", labelName, lns)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-11-15 23:42:27 +01:00
|
|
|
func TestStorageRegisterMetricNamesSerial(t *testing.T) {
|
|
|
|
path := "TestStorageRegisterMetricNamesSerial"
|
2021-05-20 13:15:19 +02:00
|
|
|
s, err := OpenStorage(path, 0, 0, 0)
|
2020-11-15 23:42:27 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("cannot open storage: %s", err)
|
|
|
|
}
|
|
|
|
if err := testStorageRegisterMetricNames(s); err != nil {
|
|
|
|
t.Fatalf("unexpected error: %s", err)
|
|
|
|
}
|
|
|
|
s.MustClose()
|
|
|
|
if err := os.RemoveAll(path); err != nil {
|
|
|
|
t.Fatalf("cannot remove %q: %s", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestStorageRegisterMetricNamesConcurrent(t *testing.T) {
|
|
|
|
path := "TestStorageRegisterMetricNamesConcurrent"
|
2021-05-20 13:15:19 +02:00
|
|
|
s, err := OpenStorage(path, 0, 0, 0)
|
2020-11-15 23:42:27 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("cannot open storage: %s", err)
|
|
|
|
}
|
|
|
|
ch := make(chan error, 3)
|
|
|
|
for i := 0; i < cap(ch); i++ {
|
|
|
|
go func() {
|
|
|
|
ch <- testStorageRegisterMetricNames(s)
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
for i := 0; i < cap(ch); i++ {
|
|
|
|
select {
|
|
|
|
case err := <-ch:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error: %s", err)
|
|
|
|
}
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.MustClose()
|
|
|
|
if err := os.RemoveAll(path); err != nil {
|
|
|
|
t.Fatalf("cannot remove %q: %s", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func testStorageRegisterMetricNames(s *Storage) error {
|
|
|
|
const metricsPerAdd = 1e3
|
|
|
|
const addsCount = 10
|
|
|
|
const accountID = 123
|
|
|
|
const projectID = 421
|
|
|
|
|
|
|
|
addIDsMap := make(map[string]struct{})
|
|
|
|
for i := 0; i < addsCount; i++ {
|
|
|
|
var mrs []MetricRow
|
|
|
|
var mn MetricName
|
|
|
|
addID := fmt.Sprintf("%d", i)
|
|
|
|
addIDsMap[addID] = struct{}{}
|
|
|
|
mn.AccountID = accountID
|
|
|
|
mn.ProjectID = projectID
|
|
|
|
mn.Tags = []Tag{
|
|
|
|
{[]byte("job"), []byte("webservice")},
|
|
|
|
{[]byte("instance"), []byte("1.2.3.4")},
|
|
|
|
{[]byte("add_id"), []byte(addID)},
|
|
|
|
}
|
|
|
|
now := timestampFromTime(time.Now())
|
|
|
|
for j := 0; j < metricsPerAdd; j++ {
|
2020-11-16 12:15:16 +01:00
|
|
|
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", j))
|
2020-11-15 23:42:27 +01:00
|
|
|
metricNameRaw := mn.marshalRaw(nil)
|
|
|
|
|
|
|
|
mr := MetricRow{
|
|
|
|
MetricNameRaw: metricNameRaw,
|
|
|
|
Timestamp: now,
|
|
|
|
}
|
|
|
|
mrs = append(mrs, mr)
|
|
|
|
}
|
2022-06-27 11:53:46 +02:00
|
|
|
if err := s.RegisterMetricNames(nil, mrs); err != nil {
|
|
|
|
return fmt.Errorf("unexpected error in RegisterMetricNames: %w", err)
|
2020-11-15 23:42:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
var addIDsExpected []string
|
|
|
|
for k := range addIDsMap {
|
|
|
|
addIDsExpected = append(addIDsExpected, k)
|
|
|
|
}
|
|
|
|
sort.Strings(addIDsExpected)
|
|
|
|
|
|
|
|
// Verify the storage contains the added metric names.
|
|
|
|
s.DebugFlush()
|
|
|
|
|
2022-06-12 03:32:13 +02:00
|
|
|
// Verify that SearchLabelNamesWithFiltersOnTimeRange returns correct result.
|
|
|
|
lnsExpected := []string{
|
|
|
|
"__name__",
|
2020-11-15 23:42:27 +01:00
|
|
|
"add_id",
|
|
|
|
"instance",
|
|
|
|
"job",
|
|
|
|
}
|
2022-06-12 03:32:13 +02:00
|
|
|
lns, err := s.SearchLabelNamesWithFiltersOnTimeRange(nil, accountID, projectID, nil, TimeRange{}, 100, 1e9, noDeadline)
|
2020-11-15 23:42:27 +01:00
|
|
|
if err != nil {
|
2022-06-12 03:32:13 +02:00
|
|
|
return fmt.Errorf("error in SearchLabelNamesWithFiltersOnTimeRange: %w", err)
|
2020-11-15 23:42:27 +01:00
|
|
|
}
|
2022-06-12 03:32:13 +02:00
|
|
|
sort.Strings(lns)
|
|
|
|
if !reflect.DeepEqual(lns, lnsExpected) {
|
|
|
|
return fmt.Errorf("unexpected label names returned from SearchLabelNamesWithFiltersOnTimeRange;\ngot\n%q\nwant\n%q", lns, lnsExpected)
|
2020-11-15 23:42:27 +01:00
|
|
|
}
|
|
|
|
|
2022-06-12 03:32:13 +02:00
|
|
|
// Verify that SearchLabelNamesWithFiltersOnTimeRange returns empty results for incorrect accountID, projectID
|
|
|
|
lns, err = s.SearchLabelNamesWithFiltersOnTimeRange(nil, accountID+1, projectID+1, nil, TimeRange{}, 100, 1e9, noDeadline)
|
2020-11-16 16:59:01 +01:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error in SearchTagKeys for incorrect accountID, projectID: %w", err)
|
|
|
|
}
|
2022-06-12 03:32:13 +02:00
|
|
|
if len(lns) > 0 {
|
|
|
|
return fmt.Errorf("SearchTagKeys with incorrect accountID, projectID returns unexpected non-empty result:\n%q", lns)
|
2020-11-16 16:59:01 +01:00
|
|
|
}
|
|
|
|
|
2022-06-12 13:17:44 +02:00
|
|
|
// Verify that SearchLabelNamesWithFiltersOnTimeRange with the specified time range returns correct result.
|
2020-11-15 23:42:27 +01:00
|
|
|
now := timestampFromTime(time.Now())
|
|
|
|
start := now - msecPerDay
|
|
|
|
end := now + 60*1000
|
|
|
|
tr := TimeRange{
|
|
|
|
MinTimestamp: start,
|
|
|
|
MaxTimestamp: end,
|
|
|
|
}
|
2022-06-12 03:32:13 +02:00
|
|
|
lns, err = s.SearchLabelNamesWithFiltersOnTimeRange(nil, accountID, projectID, nil, tr, 100, 1e9, noDeadline)
|
2020-11-15 23:42:27 +01:00
|
|
|
if err != nil {
|
2022-06-12 03:32:13 +02:00
|
|
|
return fmt.Errorf("error in SearchLabelNamesWithFiltersOnTimeRange: %w", err)
|
2020-11-15 23:42:27 +01:00
|
|
|
}
|
2022-06-12 03:32:13 +02:00
|
|
|
sort.Strings(lns)
|
|
|
|
if !reflect.DeepEqual(lns, lnsExpected) {
|
|
|
|
return fmt.Errorf("unexpected label names returned from SearchLabelNamesWithFiltersOnTimeRange;\ngot\n%q\nwant\n%q", lns, lnsExpected)
|
2020-11-15 23:42:27 +01:00
|
|
|
}
|
|
|
|
|
2022-06-12 03:32:13 +02:00
|
|
|
// Verify that SearchLabelNamesWithFiltersOnTimeRange with the specified time range returns empty results for incrorrect accountID, projectID
|
|
|
|
lns, err = s.SearchLabelNamesWithFiltersOnTimeRange(nil, accountID+1, projectID+1, nil, tr, 100, 1e9, noDeadline)
|
2020-11-16 16:59:01 +01:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error in SearchTagKeysOnTimeRange for incorrect accountID, projectID: %w", err)
|
|
|
|
}
|
2022-06-12 03:32:13 +02:00
|
|
|
if len(lns) > 0 {
|
|
|
|
return fmt.Errorf("SearchTagKeysOnTimeRange with incorrect accountID, projectID returns unexpected non-empty result:\n%q", lns)
|
2020-11-16 16:59:01 +01:00
|
|
|
}
|
|
|
|
|
2022-06-12 03:32:13 +02:00
|
|
|
// Verify that SearchLabelValuesWithFiltersOnTimeRange returns correct result.
|
|
|
|
addIDs, err := s.SearchLabelValuesWithFiltersOnTimeRange(nil, accountID, projectID, "add_id", nil, TimeRange{}, addsCount+100, 1e9, noDeadline)
|
2020-11-15 23:42:27 +01:00
|
|
|
if err != nil {
|
2022-06-12 03:32:13 +02:00
|
|
|
return fmt.Errorf("error in SearchLabelValuesWithFiltersOnTimeRange: %w", err)
|
2020-11-15 23:42:27 +01:00
|
|
|
}
|
|
|
|
sort.Strings(addIDs)
|
|
|
|
if !reflect.DeepEqual(addIDs, addIDsExpected) {
|
2022-06-12 03:32:13 +02:00
|
|
|
return fmt.Errorf("unexpected tag values returned from SearchLabelValuesWithFiltersOnTimeRange;\ngot\n%q\nwant\n%q", addIDs, addIDsExpected)
|
2020-11-15 23:42:27 +01:00
|
|
|
}
|
|
|
|
|
2022-06-12 03:32:13 +02:00
|
|
|
// Verify that SearchLabelValuesWithFiltersOnTimeRange return empty results for incorrect accountID, projectID
|
|
|
|
addIDs, err = s.SearchLabelValuesWithFiltersOnTimeRange(nil, accountID+1, projectID+1, "add_id", nil, TimeRange{}, addsCount+100, 1e9, noDeadline)
|
2020-11-16 16:59:01 +01:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error in SearchTagValues for incorrect accountID, projectID: %w", err)
|
|
|
|
}
|
|
|
|
if len(addIDs) > 0 {
|
|
|
|
return fmt.Errorf("SearchTagValues with incorrect accountID, projectID returns unexpected non-empty result:\n%q", addIDs)
|
|
|
|
}
|
|
|
|
|
2022-06-12 03:32:13 +02:00
|
|
|
// Verify that SearchLabelValuesWithFiltersOnTimeRange with the specified time range returns correct result.
|
|
|
|
addIDs, err = s.SearchLabelValuesWithFiltersOnTimeRange(nil, accountID, projectID, "add_id", nil, tr, addsCount+100, 1e9, noDeadline)
|
2020-11-15 23:42:27 +01:00
|
|
|
if err != nil {
|
2022-06-12 03:32:13 +02:00
|
|
|
return fmt.Errorf("error in SearchLabelValuesWithFiltersOnTimeRange: %w", err)
|
2020-11-15 23:42:27 +01:00
|
|
|
}
|
|
|
|
sort.Strings(addIDs)
|
|
|
|
if !reflect.DeepEqual(addIDs, addIDsExpected) {
|
2022-06-12 03:32:13 +02:00
|
|
|
return fmt.Errorf("unexpected tag values returned from SearchLabelValuesWithFiltersOnTimeRange;\ngot\n%q\nwant\n%q", addIDs, addIDsExpected)
|
2020-11-15 23:42:27 +01:00
|
|
|
}
|
|
|
|
|
2022-06-12 03:32:13 +02:00
|
|
|
// Verify that SearchLabelValuesWithFiltersOnTimeRange returns empty results for incorrect accountID, projectID
|
|
|
|
addIDs, err = s.SearchLabelValuesWithFiltersOnTimeRange(nil, accountID+1, projectID+1, "addd_id", nil, tr, addsCount+100, 1e9, noDeadline)
|
2020-11-16 16:59:01 +01:00
|
|
|
if err != nil {
|
2022-06-12 03:32:13 +02:00
|
|
|
return fmt.Errorf("error in SearchLabelValuesWithFiltersOnTimeRange for incorrect accoundID, projectID: %w", err)
|
2020-11-16 16:59:01 +01:00
|
|
|
}
|
|
|
|
if len(addIDs) > 0 {
|
2022-06-12 03:32:13 +02:00
|
|
|
return fmt.Errorf("SearchLabelValuesWithFiltersOnTimeRange with incorrect accountID, projectID returns unexpected non-empty result:\n%q", addIDs)
|
2020-11-16 16:59:01 +01:00
|
|
|
}
|
|
|
|
|
2020-11-16 12:15:16 +01:00
|
|
|
// Verify that SearchMetricNames returns correct result.
|
|
|
|
tfs := NewTagFilters(accountID, projectID)
|
|
|
|
if err := tfs.Add([]byte("add_id"), []byte("0"), false, false); err != nil {
|
|
|
|
return fmt.Errorf("unexpected error in TagFilters.Add: %w", err)
|
|
|
|
}
|
2022-06-28 16:36:27 +02:00
|
|
|
metricNames, err := s.SearchMetricNames(nil, []*TagFilters{tfs}, tr, metricsPerAdd*addsCount*100+100, noDeadline)
|
2020-11-16 12:15:16 +01:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error in SearchMetricNames: %w", err)
|
|
|
|
}
|
2022-06-28 16:36:27 +02:00
|
|
|
if len(metricNames) < metricsPerAdd {
|
|
|
|
return fmt.Errorf("unexpected number of metricNames returned from SearchMetricNames; got %d; want at least %d", len(metricNames), int(metricsPerAdd))
|
2020-11-16 12:15:16 +01:00
|
|
|
}
|
2022-06-28 16:36:27 +02:00
|
|
|
var mn MetricName
|
|
|
|
for i, metricName := range metricNames {
|
|
|
|
if err := mn.UnmarshalString(metricName); err != nil {
|
|
|
|
return fmt.Errorf("cannot unmarshal metricName=%q: %w", metricName, err)
|
|
|
|
}
|
2020-11-16 12:15:16 +01:00
|
|
|
addID := mn.GetTagValue("add_id")
|
|
|
|
if string(addID) != "0" {
|
|
|
|
return fmt.Errorf("unexpected addID for metricName #%d; got %q; want %q", i, addID, "0")
|
|
|
|
}
|
|
|
|
job := mn.GetTagValue("job")
|
|
|
|
if string(job) != "webservice" {
|
|
|
|
return fmt.Errorf("unexpected job for metricName #%d; got %q; want %q", i, job, "webservice")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-16 16:59:01 +01:00
|
|
|
// Verify that SearchMetricNames returns empty results for incorrect accountID, projectID
|
|
|
|
tfs = NewTagFilters(accountID+1, projectID+1)
|
|
|
|
if err := tfs.Add([]byte("add_id"), []byte("0"), false, false); err != nil {
|
|
|
|
return fmt.Errorf("unexpected error in TagFilters.Add: %w", err)
|
|
|
|
}
|
2022-06-28 16:36:27 +02:00
|
|
|
metricNames, err = s.SearchMetricNames(nil, []*TagFilters{tfs}, tr, metricsPerAdd*addsCount*100+100, noDeadline)
|
2020-11-16 16:59:01 +01:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("error in SearchMetricNames for incorrect accountID, projectID: %w", err)
|
|
|
|
}
|
2022-06-28 16:36:27 +02:00
|
|
|
if len(metricNames) > 0 {
|
|
|
|
return fmt.Errorf("SearchMetricNames with incorrect accountID, projectID returns unexpected non-empty result:\n%+v", metricNames)
|
2020-11-16 16:59:01 +01:00
|
|
|
}
|
|
|
|
|
2020-11-15 23:42:27 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-03-24 21:24:54 +01:00
|
|
|
func TestStorageAddRowsSerial(t *testing.T) {
|
|
|
|
path := "TestStorageAddRowsSerial"
|
2021-05-20 13:15:19 +02:00
|
|
|
s, err := OpenStorage(path, 0, 1e5, 1e5)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("cannot open storage: %s", err)
|
|
|
|
}
|
2020-03-24 21:24:54 +01:00
|
|
|
if err := testStorageAddRows(s); err != nil {
|
|
|
|
t.Fatalf("unexpected error: %s", err)
|
|
|
|
}
|
|
|
|
s.MustClose()
|
|
|
|
if err := os.RemoveAll(path); err != nil {
|
|
|
|
t.Fatalf("cannot remove %q: %s", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestStorageAddRowsConcurrent(t *testing.T) {
|
|
|
|
path := "TestStorageAddRowsConcurrent"
|
2021-05-20 13:15:19 +02:00
|
|
|
s, err := OpenStorage(path, 0, 1e5, 1e5)
|
2020-03-24 21:24:54 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("cannot open storage: %s", err)
|
|
|
|
}
|
|
|
|
ch := make(chan error, 3)
|
|
|
|
for i := 0; i < cap(ch); i++ {
|
|
|
|
go func() {
|
|
|
|
ch <- testStorageAddRows(s)
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
for i := 0; i < cap(ch); i++ {
|
|
|
|
select {
|
|
|
|
case err := <-ch:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error: %s", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2020-03-24 21:24:54 +01:00
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2020-03-24 21:24:54 +01:00
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
s.MustClose()
|
|
|
|
if err := os.RemoveAll(path); err != nil {
|
|
|
|
t.Fatalf("cannot remove %q: %s", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
|
|
|
func testGenerateMetricRows(rows uint64, timestampMin, timestampMax int64) []MetricRow {
|
|
|
|
var mrs []MetricRow
|
|
|
|
var mn MetricName
|
|
|
|
mn.Tags = []Tag{
|
|
|
|
{[]byte("job"), []byte("webservice")},
|
|
|
|
{[]byte("instance"), []byte("1.2.3.4")},
|
|
|
|
}
|
|
|
|
for i := 0; i < int(rows); i++ {
|
|
|
|
mn.AccountID = uint32(rand.Intn(2))
|
|
|
|
mn.ProjectID = uint32(rand.Intn(3))
|
|
|
|
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d", i))
|
|
|
|
metricNameRaw := mn.marshalRaw(nil)
|
|
|
|
timestamp := rand.Int63n(timestampMax-timestampMin) + timestampMin
|
|
|
|
value := rand.NormFloat64() * 1e6
|
|
|
|
|
|
|
|
mr := MetricRow{
|
|
|
|
MetricNameRaw: metricNameRaw,
|
|
|
|
Timestamp: timestamp,
|
|
|
|
Value: value,
|
|
|
|
}
|
|
|
|
mrs = append(mrs, mr)
|
|
|
|
}
|
|
|
|
return mrs
|
|
|
|
}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
func testStorageAddRows(s *Storage) error {
|
|
|
|
const rowsPerAdd = 1e3
|
|
|
|
const addsCount = 10
|
|
|
|
|
|
|
|
for i := 0; i < addsCount; i++ {
|
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
|
|
|
mrs := testGenerateMetricRows(rowsPerAdd, 0, 1e10)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("unexpected error when adding mrs: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the storage contains rows.
|
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
|
|
|
minRowsExpected := uint64(rowsPerAdd * addsCount)
|
2019-05-22 23:16:55 +02:00
|
|
|
var m Metrics
|
|
|
|
s.UpdateMetrics(&m)
|
|
|
|
if m.TableMetrics.SmallRowsCount < minRowsExpected {
|
|
|
|
return fmt.Errorf("expecting at least %d rows in the table; got %d", minRowsExpected, m.TableMetrics.SmallRowsCount)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try creating a snapshot from the storage.
|
|
|
|
snapshotName, err := s.CreateSnapshot()
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot create snapshot from the storage: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the snapshot is visible
|
|
|
|
snapshots, err := s.ListSnapshots()
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot list snapshots: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
if !containsString(snapshots, snapshotName) {
|
|
|
|
return fmt.Errorf("cannot find snapshot %q in %q", snapshotName, snapshots)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try opening the storage from snapshot.
|
|
|
|
snapshotPath := s.path + "/snapshots/" + snapshotName
|
2021-05-20 13:15:19 +02:00
|
|
|
s1, err := OpenStorage(snapshotPath, 0, 0, 0)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot open storage from snapshot: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the snapshot contains rows
|
|
|
|
var m1 Metrics
|
|
|
|
s1.UpdateMetrics(&m1)
|
|
|
|
if m1.TableMetrics.SmallRowsCount < minRowsExpected {
|
|
|
|
return fmt.Errorf("snapshot %q must contain at least %d rows; got %d", snapshotPath, minRowsExpected, m1.TableMetrics.SmallRowsCount)
|
|
|
|
}
|
|
|
|
|
2020-09-17 11:01:53 +02:00
|
|
|
// Verify that force merge for the snapshot leaves only a single part per partition.
|
|
|
|
if err := s1.ForceMergePartitions(""); err != nil {
|
|
|
|
return fmt.Errorf("error when force merging partitions: %w", err)
|
|
|
|
}
|
|
|
|
ptws := s1.tb.GetPartitions(nil)
|
|
|
|
for _, ptw := range ptws {
|
|
|
|
pws := ptw.pt.GetParts(nil)
|
|
|
|
numParts := len(pws)
|
|
|
|
ptw.pt.PutParts(pws)
|
|
|
|
if numParts != 1 {
|
2021-02-17 13:59:04 +01:00
|
|
|
s1.tb.PutPartitions(ptws)
|
2020-09-17 11:01:53 +02:00
|
|
|
return fmt.Errorf("unexpected number of parts for partition %q after force merge; got %d; want 1", ptw.pt.name, numParts)
|
|
|
|
}
|
|
|
|
}
|
2021-02-17 13:59:04 +01:00
|
|
|
s1.tb.PutPartitions(ptws)
|
2020-09-17 11:01:53 +02:00
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
s1.MustClose()
|
|
|
|
|
|
|
|
// Delete the snapshot and make sure it is no longer visible.
|
|
|
|
if err := s.DeleteSnapshot(snapshotName); err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot delete snapshot %q: %w", snapshotName, err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
snapshots, err = s.ListSnapshots()
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("cannot list snapshots: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
if containsString(snapshots, snapshotName) {
|
|
|
|
return fmt.Errorf("snapshot %q must be deleted, but is still visible in %q", snapshotName, snapshots)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestStorageRotateIndexDB(t *testing.T) {
|
|
|
|
path := "TestStorageRotateIndexDB"
|
2021-05-20 13:15:19 +02:00
|
|
|
s, err := OpenStorage(path, 0, 0, 0)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("cannot open storage: %s", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Start indexDB rotater in a separate goroutine
|
|
|
|
stopCh := make(chan struct{})
|
|
|
|
rotateDoneCh := make(chan struct{})
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
close(rotateDoneCh)
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
time.Sleep(time.Millisecond)
|
|
|
|
s.mustRotateIndexDB()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Run concurrent workers that insert / select data from the storage.
|
|
|
|
ch := make(chan error, 3)
|
|
|
|
for i := 0; i < cap(ch); i++ {
|
|
|
|
go func(workerNum int) {
|
|
|
|
ch <- testStorageAddMetrics(s, workerNum)
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
for i := 0; i < cap(ch); i++ {
|
|
|
|
select {
|
|
|
|
case err := <-ch:
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("unexpected error: %s", err)
|
|
|
|
}
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
t.Fatalf("timeout")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
close(stopCh)
|
|
|
|
<-rotateDoneCh
|
|
|
|
|
|
|
|
s.MustClose()
|
|
|
|
if err := os.RemoveAll(path); err != nil {
|
|
|
|
t.Fatalf("cannot remove %q: %s", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func testStorageAddMetrics(s *Storage, workerNum int) error {
|
|
|
|
const rowsCount = 1e3
|
|
|
|
|
|
|
|
var mn MetricName
|
|
|
|
mn.Tags = []Tag{
|
|
|
|
{[]byte("job"), []byte(fmt.Sprintf("webservice_%d", workerNum))},
|
|
|
|
{[]byte("instance"), []byte("1.2.3.4")},
|
|
|
|
}
|
|
|
|
for i := 0; i < rowsCount; i++ {
|
2019-05-22 23:23:23 +02:00
|
|
|
mn.AccountID = 123
|
|
|
|
mn.ProjectID = uint32(i % 3)
|
2019-05-22 23:16:55 +02:00
|
|
|
mn.MetricGroup = []byte(fmt.Sprintf("metric_%d_%d", workerNum, rand.Intn(10)))
|
|
|
|
metricNameRaw := mn.marshalRaw(nil)
|
|
|
|
timestamp := rand.Int63n(1e10)
|
|
|
|
value := rand.NormFloat64() * 1e6
|
|
|
|
|
|
|
|
mr := MetricRow{
|
|
|
|
MetricNameRaw: metricNameRaw,
|
|
|
|
Timestamp: timestamp,
|
|
|
|
Value: value,
|
|
|
|
}
|
|
|
|
if err := s.AddRows([]MetricRow{mr}, defaultPrecisionBits); err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return fmt.Errorf("unexpected error when adding mrs: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Verify the storage contains rows.
|
|
|
|
minRowsExpected := uint64(rowsCount)
|
|
|
|
var m Metrics
|
|
|
|
s.UpdateMetrics(&m)
|
|
|
|
if m.TableMetrics.SmallRowsCount < minRowsExpected {
|
|
|
|
return fmt.Errorf("expecting at least %d rows in the table; got %d", minRowsExpected, m.TableMetrics.SmallRowsCount)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-05-02 10:00:15 +02:00
|
|
|
func TestStorageDeleteStaleSnapshots(t *testing.T) {
|
|
|
|
path := "TestStorageDeleteStaleSnapshots"
|
|
|
|
s, err := OpenStorage(path, 0, 1e5, 1e5)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("cannot open storage: %s", err)
|
|
|
|
}
|
|
|
|
const rowsPerAdd = 1e3
|
|
|
|
const addsCount = 10
|
|
|
|
for i := 0; i < addsCount; i++ {
|
|
|
|
mrs := testGenerateMetricRows(rowsPerAdd, 0, 1e10)
|
|
|
|
if err := s.AddRows(mrs, defaultPrecisionBits); err != nil {
|
|
|
|
t.Fatalf("unexpected error when adding mrs: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Try creating a snapshot from the storage.
|
|
|
|
snapshotName, err := s.CreateSnapshot()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("cannot create snapshot from the storage: %s", err)
|
|
|
|
}
|
|
|
|
// Delete snapshots older than 1 month
|
|
|
|
if err := s.DeleteStaleSnapshots(30 * 24 * time.Hour); err != nil {
|
|
|
|
t.Fatalf("error in DeleteStaleSnapshots(1 month): %s", err)
|
|
|
|
}
|
|
|
|
snapshots, err := s.ListSnapshots()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("cannot list snapshots: %s", err)
|
|
|
|
}
|
|
|
|
if len(snapshots) != 1 {
|
|
|
|
t.Fatalf("expecting one snapshot; got %q", snapshots)
|
|
|
|
}
|
|
|
|
if snapshots[0] != snapshotName {
|
|
|
|
t.Fatalf("snapshot %q is missing in %q", snapshotName, snapshots)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the snapshot which is older than 1 nanoseconds
|
|
|
|
time.Sleep(2 * time.Nanosecond)
|
|
|
|
if err := s.DeleteStaleSnapshots(time.Nanosecond); err != nil {
|
|
|
|
t.Fatalf("cannot delete snapshot %q: %s", snapshotName, err)
|
|
|
|
}
|
|
|
|
snapshots, err = s.ListSnapshots()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("cannot list snapshots: %s", err)
|
|
|
|
}
|
|
|
|
if len(snapshots) != 0 {
|
|
|
|
t.Fatalf("expecting zero snapshots; got %q", snapshots)
|
|
|
|
}
|
|
|
|
s.MustClose()
|
|
|
|
if err := os.RemoveAll(path); err != nil {
|
|
|
|
t.Fatalf("cannot remove %q: %s", path, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
func containsString(a []string, s string) bool {
|
|
|
|
for i := range a {
|
|
|
|
if a[i] == s {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|