2019-05-22 23:16:55 +02:00
package storage
import (
"bytes"
"fmt"
"io"
"math/rand"
"os"
"reflect"
2020-05-28 10:58:47 +02:00
"regexp"
2020-11-04 23:15:43 +01:00
"sort"
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
"sync/atomic"
2019-05-22 23:16:55 +02:00
"testing"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
2019-10-08 15:25:24 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
2021-06-11 11:42:26 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
2021-02-21 21:06:45 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
2020-11-02 18:11:48 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
2019-08-13 20:35:19 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache"
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
"github.com/VictoriaMetrics/fastcache"
2019-05-22 23:16:55 +02:00
)
2022-10-23 11:15:24 +02:00
func TestMarshalUnmarshalMetricIDs ( t * testing . T ) {
f := func ( metricIDs [ ] uint64 ) {
t . Helper ( )
data := marshalMetricIDs ( nil , metricIDs )
result , err := unmarshalMetricIDs ( nil , data )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! reflect . DeepEqual ( result , metricIDs ) {
t . Fatalf ( "unexpected metricIDs after unmarshaling;\ngot\n%d\nwant\n%d" , result , metricIDs )
}
}
f ( nil )
f ( [ ] uint64 { 1 } )
f ( [ ] uint64 { 1234 , 678932943 , 843289893843 } )
}
func TestMergeSortedMetricIDs ( t * testing . T ) {
f := func ( a , b [ ] uint64 ) {
t . Helper ( )
m := make ( map [ uint64 ] bool )
var resultExpected [ ] uint64
for _ , v := range a {
if ! m [ v ] {
m [ v ] = true
resultExpected = append ( resultExpected , v )
}
}
for _ , v := range b {
if ! m [ v ] {
m [ v ] = true
resultExpected = append ( resultExpected , v )
}
}
sort . Slice ( resultExpected , func ( i , j int ) bool {
return resultExpected [ i ] < resultExpected [ j ]
} )
result := mergeSortedMetricIDs ( a , b )
if ! reflect . DeepEqual ( result , resultExpected ) {
t . Fatalf ( "unexpected result for mergeSortedMetricIDs(%d, %d); got\n%d\nwant\n%d" , a , b , result , resultExpected )
}
result = mergeSortedMetricIDs ( b , a )
if ! reflect . DeepEqual ( result , resultExpected ) {
t . Fatalf ( "unexpected result for mergeSortedMetricIDs(%d, %d); got\n%d\nwant\n%d" , b , a , result , resultExpected )
}
}
f ( nil , nil )
f ( [ ] uint64 { 1 } , nil )
f ( nil , [ ] uint64 { 23 } )
f ( [ ] uint64 { 1234 } , [ ] uint64 { 0 } )
f ( [ ] uint64 { 1 } , [ ] uint64 { 1 } )
f ( [ ] uint64 { 1 } , [ ] uint64 { 1 , 2 , 3 } )
f ( [ ] uint64 { 1 , 2 , 3 } , [ ] uint64 { 1 , 2 , 3 } )
f ( [ ] uint64 { 1 , 2 , 3 } , [ ] uint64 { 2 , 3 } )
f ( [ ] uint64 { 0 , 1 , 7 , 8 , 9 , 13 , 20 } , [ ] uint64 { 1 , 2 , 7 , 13 , 15 } )
f ( [ ] uint64 { 0 , 1 , 2 , 3 , 4 } , [ ] uint64 { 5 , 6 , 7 , 8 } )
f ( [ ] uint64 { 0 , 1 , 2 , 3 , 4 } , [ ] uint64 { 4 , 5 , 6 , 7 , 8 } )
f ( [ ] uint64 { 0 , 1 , 2 , 3 , 4 } , [ ] uint64 { 3 , 4 , 5 , 6 , 7 , 8 } )
f ( [ ] uint64 { 2 , 3 , 4 } , [ ] uint64 { 1 , 5 , 6 , 7 } )
f ( [ ] uint64 { 2 , 3 , 4 } , [ ] uint64 { 1 , 2 , 5 , 6 , 7 } )
f ( [ ] uint64 { 2 , 3 , 4 } , [ ] uint64 { 1 , 2 , 4 , 5 , 6 , 7 } )
f ( [ ] uint64 { 2 , 3 , 4 } , [ ] uint64 { 1 , 2 , 3 , 4 , 5 , 6 , 7 } )
f ( [ ] uint64 { 2 , 3 , 4 , 6 } , [ ] uint64 { 1 , 2 , 3 , 4 , 5 , 6 , 7 } )
f ( [ ] uint64 { 2 , 3 , 4 , 6 , 7 } , [ ] uint64 { 1 , 2 , 3 , 4 , 5 , 6 , 7 } )
f ( [ ] uint64 { 2 , 3 , 4 , 6 , 7 , 8 } , [ ] uint64 { 1 , 2 , 3 , 4 , 5 , 6 , 7 } )
f ( [ ] uint64 { 2 , 3 , 4 , 6 , 7 , 8 , 9 } , [ ] uint64 { 1 , 2 , 3 , 4 , 5 , 6 , 7 } )
f ( [ ] uint64 { 1 , 2 , 3 , 4 , 6 , 7 , 8 , 9 } , [ ] uint64 { 1 , 2 , 3 , 4 , 5 , 6 , 7 } )
f ( [ ] uint64 { 1 , 2 , 3 , 4 , 6 , 7 , 8 , 9 } , [ ] uint64 { 2 , 3 , 4 , 5 , 6 , 7 } )
2022-12-19 20:56:46 +01:00
f ( [ ] uint64 { } , [ ] uint64 { 1 , 2 , 3 } )
f ( [ ] uint64 { 0 } , [ ] uint64 { 1 , 2 , 3 } )
f ( [ ] uint64 { 1 } , [ ] uint64 { 1 , 2 , 3 } )
f ( [ ] uint64 { 1 , 2 } , [ ] uint64 { 3 , 4 } )
2022-10-23 11:15:24 +02:00
}
2020-05-27 20:35:58 +02:00
func TestReverseBytes ( t * testing . T ) {
f := func ( s , resultExpected string ) {
t . Helper ( )
result := reverseBytes ( nil , [ ] byte ( s ) )
if string ( result ) != resultExpected {
t . Fatalf ( "unexpected result for reverseBytes(%q); got %q; want %q" , s , result , resultExpected )
}
}
f ( "" , "" )
f ( "a" , "a" )
f ( "av" , "va" )
f ( "foo.bar" , "rab.oof" )
}
2019-10-08 15:25:24 +02:00
func TestMergeTagToMetricIDsRows ( t * testing . T ) {
f := func ( items [ ] string , expectedItems [ ] string ) {
t . Helper ( )
var data [ ] byte
2021-02-21 21:06:45 +01:00
var itemsB [ ] mergeset . Item
2019-10-08 15:25:24 +02:00
for _ , item := range items {
data = append ( data , item ... )
2021-02-21 21:06:45 +01:00
itemsB = append ( itemsB , mergeset . Item {
Start : uint32 ( len ( data ) - len ( item ) ) ,
End : uint32 ( len ( data ) ) ,
} )
2019-10-08 15:25:24 +02:00
}
2021-02-21 21:06:45 +01:00
if ! checkItemsSorted ( data , itemsB ) {
2019-11-06 13:24:48 +01:00
t . Fatalf ( "source items aren't sorted; items:\n%q" , itemsB )
2019-10-08 15:25:24 +02:00
}
resultData , resultItemsB := mergeTagToMetricIDsRows ( data , itemsB )
if len ( resultItemsB ) != len ( expectedItems ) {
t . Fatalf ( "unexpected len(resultItemsB); got %d; want %d" , len ( resultItemsB ) , len ( expectedItems ) )
}
2021-02-21 21:06:45 +01:00
if ! checkItemsSorted ( resultData , resultItemsB ) {
2019-11-06 13:24:48 +01:00
t . Fatalf ( "result items aren't sorted; items:\n%q" , resultItemsB )
2019-10-08 15:25:24 +02:00
}
2021-02-21 21:06:45 +01:00
buf := resultData
for i , it := range resultItemsB {
item := it . Bytes ( resultData )
if ! bytes . HasPrefix ( buf , item ) {
t . Fatalf ( "unexpected prefix for resultData #%d;\ngot\n%X\nwant\n%X" , i , buf , item )
2019-10-08 15:25:24 +02:00
}
2021-02-21 21:06:45 +01:00
buf = buf [ len ( item ) : ]
2019-10-08 15:25:24 +02:00
}
2021-02-21 21:06:45 +01:00
if len ( buf ) != 0 {
t . Fatalf ( "unexpected tail left in resultData: %X" , buf )
2019-10-08 15:25:24 +02:00
}
var resultItems [ ] string
2021-02-21 21:06:45 +01:00
for _ , it := range resultItemsB {
resultItems = append ( resultItems , string ( it . Bytes ( resultData ) ) )
2019-10-08 15:25:24 +02:00
}
if ! reflect . DeepEqual ( expectedItems , resultItems ) {
t . Fatalf ( "unexpected items;\ngot\n%X\nwant\n%X" , resultItems , expectedItems )
}
}
2019-11-09 22:17:42 +01:00
xy := func ( nsPrefix byte , accountID , projectID uint32 , key , value string , metricIDs [ ] uint64 ) string {
dst := marshalCommonPrefix ( nil , nsPrefix , accountID , projectID )
if nsPrefix == nsPrefixDateTagToMetricIDs {
dst = encoding . MarshalUint64 ( dst , 1234567901233 )
}
2019-10-08 15:25:24 +02:00
t := & Tag {
Key : [ ] byte ( key ) ,
Value : [ ] byte ( value ) ,
}
dst = t . Marshal ( dst )
for _ , metricID := range metricIDs {
dst = encoding . MarshalUint64 ( dst , metricID )
}
return string ( dst )
}
2019-11-09 22:17:42 +01:00
x := func ( accountID , projectID uint32 , key , value string , metricIDs [ ] uint64 ) string {
return xy ( nsPrefixTagToMetricIDs , accountID , projectID , key , value , metricIDs )
}
y := func ( accountID , projectID uint32 , key , value string , metricIDs [ ] uint64 ) string {
return xy ( nsPrefixDateTagToMetricIDs , accountID , projectID , key , value , metricIDs )
}
2019-10-08 15:25:24 +02:00
f ( nil , nil )
f ( [ ] string { } , nil )
f ( [ ] string { "foo" } , [ ] string { "foo" } )
f ( [ ] string { "a" , "b" , "c" , "def" } , [ ] string { "a" , "b" , "c" , "def" } )
f ( [ ] string { "\x00" , "\x00b" , "\x00c" , "\x00def" } , [ ] string { "\x00" , "\x00b" , "\x00c" , "\x00def" } )
f ( [ ] string {
x ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
} , [ ] string {
x ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
} )
2019-11-09 22:17:42 +01:00
f ( [ ] string {
x ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
y ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
y ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
y ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
} , [ ] string {
x ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
y ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
y ( 0 , 0 , "" , "" , [ ] uint64 { 0 } ) ,
} )
2019-10-08 15:25:24 +02:00
f ( [ ] string {
x ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
"xyz" ,
} , [ ] string {
x ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
"xyz" ,
} )
f ( [ ] string {
"\x00asdf" ,
x ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
} , [ ] string {
"\x00asdf" ,
x ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
} )
f ( [ ] string {
"\x00asdf" ,
2019-11-09 22:17:42 +01:00
y ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
y ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
y ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
y ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
} , [ ] string {
"\x00asdf" ,
y ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
y ( 1 , 2 , "" , "" , [ ] uint64 { 0 } ) ,
} )
f ( [ ] string {
"\x00asdf" ,
x ( 3 , 1 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 3 , 1 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 3 , 1 , "" , "" , [ ] uint64 { 0 } ) ,
2019-10-08 15:25:24 +02:00
x ( 3 , 1 , "" , "" , [ ] uint64 { 0 } ) ,
2019-11-09 22:17:42 +01:00
"xyz" ,
} , [ ] string {
"\x00asdf" ,
2019-10-08 15:25:24 +02:00
x ( 3 , 1 , "" , "" , [ ] uint64 { 0 } ) ,
2019-11-09 22:17:42 +01:00
"xyz" ,
} )
f ( [ ] string {
"\x00asdf" ,
2019-10-08 15:25:24 +02:00
x ( 3 , 1 , "" , "" , [ ] uint64 { 0 } ) ,
x ( 3 , 1 , "" , "" , [ ] uint64 { 0 } ) ,
2019-11-09 22:17:42 +01:00
y ( 3 , 1 , "" , "" , [ ] uint64 { 0 } ) ,
y ( 3 , 1 , "" , "" , [ ] uint64 { 0 } ) ,
2019-10-08 15:25:24 +02:00
"xyz" ,
} , [ ] string {
"\x00asdf" ,
x ( 3 , 1 , "" , "" , [ ] uint64 { 0 } ) ,
2019-11-09 22:17:42 +01:00
y ( 3 , 1 , "" , "" , [ ] uint64 { 0 } ) ,
2019-10-08 15:25:24 +02:00
"xyz" ,
} )
f ( [ ] string {
"\x00asdf" ,
x ( 4 , 2 , "" , "" , [ ] uint64 { 1 } ) ,
x ( 4 , 2 , "" , "" , [ ] uint64 { 2 } ) ,
x ( 4 , 2 , "" , "" , [ ] uint64 { 3 } ) ,
x ( 4 , 2 , "" , "" , [ ] uint64 { 4 } ) ,
"xyz" ,
} , [ ] string {
"\x00asdf" ,
x ( 4 , 2 , "" , "" , [ ] uint64 { 1 , 2 , 3 , 4 } ) ,
"xyz" ,
} )
f ( [ ] string {
"\x00asdf" ,
x ( 1 , 1 , "" , "" , [ ] uint64 { 1 } ) ,
x ( 1 , 1 , "" , "" , [ ] uint64 { 2 } ) ,
x ( 1 , 1 , "" , "" , [ ] uint64 { 3 } ) ,
x ( 1 , 1 , "" , "" , [ ] uint64 { 4 } ) ,
} , [ ] string {
"\x00asdf" ,
x ( 1 , 1 , "" , "" , [ ] uint64 { 1 , 2 , 3 } ) ,
x ( 1 , 1 , "" , "" , [ ] uint64 { 4 } ) ,
} )
f ( [ ] string {
"\x00asdf" ,
x ( 2 , 2 , "" , "" , [ ] uint64 { 1 } ) ,
x ( 2 , 2 , "" , "" , [ ] uint64 { 2 , 3 , 4 } ) ,
x ( 2 , 2 , "" , "" , [ ] uint64 { 2 , 3 , 4 , 5 } ) ,
x ( 2 , 2 , "" , "" , [ ] uint64 { 3 , 5 } ) ,
"foo" ,
} , [ ] string {
"\x00asdf" ,
x ( 2 , 2 , "" , "" , [ ] uint64 { 1 , 2 , 3 , 4 , 5 } ) ,
"foo" ,
} )
f ( [ ] string {
"\x00asdf" ,
x ( 3 , 3 , "" , "" , [ ] uint64 { 1 } ) ,
x ( 3 , 3 , "" , "a" , [ ] uint64 { 2 , 3 , 4 } ) ,
x ( 3 , 3 , "" , "a" , [ ] uint64 { 2 , 3 , 4 , 5 } ) ,
x ( 3 , 3 , "" , "b" , [ ] uint64 { 3 , 5 } ) ,
"foo" ,
} , [ ] string {
"\x00asdf" ,
x ( 3 , 3 , "" , "" , [ ] uint64 { 1 } ) ,
x ( 3 , 3 , "" , "a" , [ ] uint64 { 2 , 3 , 4 , 5 } ) ,
x ( 3 , 3 , "" , "b" , [ ] uint64 { 3 , 5 } ) ,
"foo" ,
} )
f ( [ ] string {
"\x00asdf" ,
x ( 2 , 4 , "" , "" , [ ] uint64 { 1 } ) ,
x ( 2 , 4 , "x" , "a" , [ ] uint64 { 2 , 3 , 4 } ) ,
x ( 2 , 4 , "y" , "" , [ ] uint64 { 2 , 3 , 4 , 5 } ) ,
x ( 2 , 4 , "y" , "x" , [ ] uint64 { 3 , 5 } ) ,
"foo" ,
} , [ ] string {
"\x00asdf" ,
x ( 2 , 4 , "" , "" , [ ] uint64 { 1 } ) ,
x ( 2 , 4 , "x" , "a" , [ ] uint64 { 2 , 3 , 4 } ) ,
x ( 2 , 4 , "y" , "" , [ ] uint64 { 2 , 3 , 4 , 5 } ) ,
x ( 2 , 4 , "y" , "x" , [ ] uint64 { 3 , 5 } ) ,
"foo" ,
} )
f ( [ ] string {
"\x00asdf" ,
x ( 2 , 4 , "x" , "a" , [ ] uint64 { 1 } ) ,
x ( 2 , 5 , "x" , "a" , [ ] uint64 { 2 , 3 , 4 } ) ,
x ( 3 , 4 , "x" , "a" , [ ] uint64 { 2 , 3 , 4 , 5 } ) ,
x ( 3 , 4 , "x" , "b" , [ ] uint64 { 3 , 5 } ) ,
x ( 3 , 4 , "x" , "b" , [ ] uint64 { 5 , 6 } ) ,
"foo" ,
} , [ ] string {
"\x00asdf" ,
x ( 2 , 4 , "x" , "a" , [ ] uint64 { 1 } ) ,
x ( 2 , 5 , "x" , "a" , [ ] uint64 { 2 , 3 , 4 } ) ,
x ( 3 , 4 , "x" , "a" , [ ] uint64 { 2 , 3 , 4 , 5 } ) ,
x ( 3 , 4 , "x" , "b" , [ ] uint64 { 3 , 5 , 6 } ) ,
"foo" ,
} )
f ( [ ] string {
"\x00asdf" ,
x ( 2 , 2 , "sdf" , "aa" , [ ] uint64 { 1 , 1 , 3 } ) ,
x ( 2 , 2 , "sdf" , "aa" , [ ] uint64 { 1 , 2 } ) ,
"foo" ,
} , [ ] string {
"\x00asdf" ,
x ( 2 , 2 , "sdf" , "aa" , [ ] uint64 { 1 , 2 , 3 } ) ,
"foo" ,
} )
f ( [ ] string {
"\x00asdf" ,
x ( 3 , 2 , "sdf" , "aa" , [ ] uint64 { 1 , 2 , 2 , 4 } ) ,
x ( 3 , 2 , "sdf" , "aa" , [ ] uint64 { 1 , 2 , 3 } ) ,
"foo" ,
} , [ ] string {
"\x00asdf" ,
x ( 3 , 2 , "sdf" , "aa" , [ ] uint64 { 1 , 2 , 3 , 4 } ) ,
"foo" ,
} )
// Construct big source chunks
var metricIDs [ ] uint64
metricIDs = metricIDs [ : 0 ]
for i := 0 ; i < maxMetricIDsPerRow - 1 ; i ++ {
metricIDs = append ( metricIDs , uint64 ( i ) )
}
f ( [ ] string {
"\x00aa" ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
2019-11-09 22:17:42 +01:00
y ( 2 , 3 , "foo" , "bar" , metricIDs ) ,
y ( 2 , 3 , "foo" , "bar" , metricIDs ) ,
2019-10-08 15:25:24 +02:00
"x" ,
} , [ ] string {
"\x00aa" ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
2019-11-09 22:17:42 +01:00
y ( 2 , 3 , "foo" , "bar" , metricIDs ) ,
2019-10-08 15:25:24 +02:00
"x" ,
} )
metricIDs = metricIDs [ : 0 ]
for i := 0 ; i < maxMetricIDsPerRow ; i ++ {
metricIDs = append ( metricIDs , uint64 ( i ) )
}
f ( [ ] string {
"\x00aa" ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
"x" ,
} , [ ] string {
"\x00aa" ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
"x" ,
} )
metricIDs = metricIDs [ : 0 ]
for i := 0 ; i < 3 * maxMetricIDsPerRow ; i ++ {
metricIDs = append ( metricIDs , uint64 ( i ) )
}
f ( [ ] string {
"\x00aa" ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
"x" ,
} , [ ] string {
"\x00aa" ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
"x" ,
} )
f ( [ ] string {
"\x00aa" ,
x ( 3 , 2 , "foo" , "bar" , [ ] uint64 { 0 , 0 , 1 , 2 , 3 } ) ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
"x" ,
} , [ ] string {
"\x00aa" ,
x ( 3 , 2 , "foo" , "bar" , [ ] uint64 { 0 , 1 , 2 , 3 } ) ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
x ( 3 , 2 , "foo" , "bar" , metricIDs ) ,
"x" ,
} )
// Check for duplicate metricIDs removal
metricIDs = metricIDs [ : 0 ]
for i := 0 ; i < maxMetricIDsPerRow - 1 ; i ++ {
metricIDs = append ( metricIDs , 123 )
}
f ( [ ] string {
"\x00aa" ,
x ( 1 , 2 , "foo" , "bar" , metricIDs ) ,
x ( 1 , 2 , "foo" , "bar" , metricIDs ) ,
2019-11-09 22:17:42 +01:00
y ( 1 , 1 , "foo" , "bar" , metricIDs ) ,
2019-10-08 15:25:24 +02:00
"x" ,
} , [ ] string {
"\x00aa" ,
x ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 } ) ,
2019-11-09 22:17:42 +01:00
y ( 1 , 1 , "foo" , "bar" , [ ] uint64 { 123 } ) ,
2019-10-08 15:25:24 +02:00
"x" ,
} )
// Check fallback to the original items after merging, which result in incorrect ordering.
metricIDs = metricIDs [ : 0 ]
for i := 0 ; i < maxMetricIDsPerRow - 3 ; i ++ {
metricIDs = append ( metricIDs , uint64 ( 123 ) )
}
f ( [ ] string {
"\x00aa" ,
x ( 1 , 2 , "foo" , "bar" , metricIDs ) ,
x ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 , 123 , 125 } ) ,
x ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 , 124 } ) ,
"x" ,
} , [ ] string {
"\x00aa" ,
x ( 1 , 2 , "foo" , "bar" , metricIDs ) ,
x ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 , 123 , 125 } ) ,
x ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 , 124 } ) ,
"x" ,
} )
f ( [ ] string {
"\x00aa" ,
x ( 1 , 2 , "foo" , "bar" , metricIDs ) ,
x ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 , 123 , 125 } ) ,
x ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 , 124 } ) ,
2019-11-09 22:17:42 +01:00
y ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 , 124 } ) ,
2019-10-08 15:25:24 +02:00
} , [ ] string {
"\x00aa" ,
x ( 1 , 2 , "foo" , "bar" , metricIDs ) ,
x ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 , 123 , 125 } ) ,
x ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 , 124 } ) ,
2019-11-09 22:17:42 +01:00
y ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 , 124 } ) ,
2019-10-08 15:25:24 +02:00
} )
f ( [ ] string {
x ( 1 , 2 , "foo" , "bar" , metricIDs ) ,
x ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 , 123 , 125 } ) ,
x ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 , 124 } ) ,
} , [ ] string {
x ( 1 , 2 , "foo" , "bar" , metricIDs ) ,
x ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 , 123 , 125 } ) ,
x ( 1 , 2 , "foo" , "bar" , [ ] uint64 { 123 , 124 } ) ,
} )
}
2019-09-25 16:55:13 +02:00
func TestRemoveDuplicateMetricIDs ( t * testing . T ) {
f := func ( metricIDs , expectedMetricIDs [ ] uint64 ) {
2019-10-08 15:25:24 +02:00
t . Helper ( )
2019-09-25 16:55:13 +02:00
a := removeDuplicateMetricIDs ( metricIDs )
if ! reflect . DeepEqual ( a , expectedMetricIDs ) {
t . Fatalf ( "unexpected result from removeDuplicateMetricIDs:\ngot\n%d\nwant\n%d" , a , expectedMetricIDs )
}
}
f ( nil , nil )
f ( [ ] uint64 { 123 } , [ ] uint64 { 123 } )
f ( [ ] uint64 { 123 , 123 } , [ ] uint64 { 123 } )
f ( [ ] uint64 { 123 , 123 , 123 } , [ ] uint64 { 123 } )
2019-09-25 17:23:13 +02:00
f ( [ ] uint64 { 123 , 1234 , 1235 } , [ ] uint64 { 123 , 1234 , 1235 } )
2019-09-25 16:55:13 +02:00
f ( [ ] uint64 { 0 , 1 , 1 , 2 } , [ ] uint64 { 0 , 1 , 2 } )
f ( [ ] uint64 { 0 , 0 , 0 , 1 , 1 , 2 } , [ ] uint64 { 0 , 1 , 2 } )
f ( [ ] uint64 { 0 , 1 , 1 , 2 , 2 } , [ ] uint64 { 0 , 1 , 2 } )
f ( [ ] uint64 { 0 , 1 , 2 , 2 } , [ ] uint64 { 0 , 1 , 2 } )
}
2019-05-22 23:16:55 +02:00
func TestIndexDBOpenClose ( t * testing . T ) {
2021-06-11 11:42:26 +02:00
s := newTestStorage ( )
defer stopTestStorage ( s )
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
tableName := nextIndexDBTableName ( )
2019-05-22 23:16:55 +02:00
for i := 0 ; i < 5 ; i ++ {
2022-06-01 13:21:12 +02:00
var isReadOnly uint32
2023-04-15 07:08:43 +02:00
db := mustOpenIndexDB ( tableName , s , 0 , & isReadOnly )
2019-05-22 23:16:55 +02:00
db . MustClose ( )
}
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
if err := os . RemoveAll ( tableName ) ; err != nil {
2019-05-22 23:16:55 +02:00
t . Fatalf ( "cannot remove indexDB: %s" , err )
}
}
func TestIndexDB ( t * testing . T ) {
const accountsCount = 3
const projectsCount = 2
const metricGroups = 10
t . Run ( "serial" , func ( t * testing . T ) {
2021-06-11 11:42:26 +02:00
s := newTestStorage ( )
defer stopTestStorage ( s )
2019-06-25 19:09:57 +02:00
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
dbName := nextIndexDBTableName ( )
2022-06-01 13:21:12 +02:00
var isReadOnly uint32
2023-04-15 07:08:43 +02:00
db := mustOpenIndexDB ( dbName , s , 0 , & isReadOnly )
2019-05-22 23:16:55 +02:00
defer func ( ) {
db . MustClose ( )
if err := os . RemoveAll ( dbName ) ; err != nil {
t . Fatalf ( "cannot remove indexDB: %s" , err )
}
} ( )
2022-11-25 19:32:45 +01:00
mns , tsids , tenants , err := testIndexDBGetOrCreateTSIDByName ( db , accountsCount , projectsCount , metricGroups )
2019-05-22 23:16:55 +02:00
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
2022-11-25 19:32:45 +01:00
if err := testIndexDBCheckTSIDByName ( db , mns , tsids , tenants , false ) ; err != nil {
2019-05-22 23:16:55 +02:00
t . Fatalf ( "unexpected error: %s" , err )
}
// Re-open the db and verify it works as expected.
db . MustClose ( )
2023-04-15 07:08:43 +02:00
db = mustOpenIndexDB ( dbName , s , 0 , & isReadOnly )
2022-11-25 19:32:45 +01:00
if err := testIndexDBCheckTSIDByName ( db , mns , tsids , tenants , false ) ; err != nil {
2019-05-22 23:16:55 +02:00
t . Fatalf ( "unexpected error: %s" , err )
}
} )
t . Run ( "concurrent" , func ( t * testing . T ) {
2021-06-11 11:42:26 +02:00
s := newTestStorage ( )
defer stopTestStorage ( s )
2019-06-25 19:09:57 +02:00
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
dbName := nextIndexDBTableName ( )
2022-06-01 13:21:12 +02:00
var isReadOnly uint32
2023-04-15 07:08:43 +02:00
db := mustOpenIndexDB ( dbName , s , 0 , & isReadOnly )
2019-05-22 23:16:55 +02:00
defer func ( ) {
db . MustClose ( )
if err := os . RemoveAll ( dbName ) ; err != nil {
t . Fatalf ( "cannot remove indexDB: %s" , err )
}
} ( )
ch := make ( chan error , 3 )
for i := 0 ; i < cap ( ch ) ; i ++ {
go func ( ) {
2022-11-25 19:32:45 +01:00
mns , tsid , tenants , err := testIndexDBGetOrCreateTSIDByName ( db , accountsCount , projectsCount , metricGroups )
2019-05-22 23:16:55 +02:00
if err != nil {
ch <- err
return
}
2022-11-25 19:32:45 +01:00
if err := testIndexDBCheckTSIDByName ( db , mns , tsid , tenants , true ) ; err != nil {
2019-05-22 23:16:55 +02:00
ch <- err
return
}
ch <- nil
} ( )
}
var errors [ ] error
for i := 0 ; i < cap ( ch ) ; i ++ {
select {
case err := <- ch :
if err != nil {
2020-06-30 21:58:18 +02:00
errors = append ( errors , fmt . Errorf ( "unexpected error: %w" , err ) )
2019-05-22 23:16:55 +02:00
}
case <- time . After ( 30 * time . Second ) :
t . Fatalf ( "timeout" )
}
}
if len ( errors ) > 0 {
t . Fatal ( errors [ 0 ] )
}
} )
}
2022-11-25 19:32:45 +01:00
func testIndexDBGetOrCreateTSIDByName ( db * indexDB , accountsCount , projectsCount , metricGroups int ) ( [ ] MetricName , [ ] TSID , [ ] string , error ) {
2023-01-24 05:10:29 +01:00
r := rand . New ( rand . NewSource ( 1 ) )
2019-05-22 23:16:55 +02:00
// Create tsids.
var mns [ ] MetricName
var tsids [ ] TSID
2022-11-25 19:32:45 +01:00
tenants := make ( map [ string ] struct { } )
2019-05-22 23:16:55 +02:00
2020-07-23 23:30:33 +02:00
is := db . getIndexSearch ( 0 , 0 , noDeadline )
2019-05-22 23:16:55 +02:00
defer db . putIndexSearch ( is )
2019-09-23 19:40:38 +02:00
var metricNameBuf [ ] byte
2022-06-20 12:47:43 +02:00
var metricNameRawBuf [ ] byte
2019-05-22 23:16:55 +02:00
for i := 0 ; i < 4e2 + 1 ; i ++ {
var mn MetricName
2019-05-22 23:23:23 +02:00
mn . AccountID = uint32 ( ( i + 2 ) % accountsCount )
mn . ProjectID = uint32 ( ( i + 1 ) % projectsCount )
2022-11-25 19:32:45 +01:00
tenant := fmt . Sprintf ( "%d:%d" , mn . AccountID , mn . ProjectID )
tenants [ tenant ] = struct { } { }
2019-05-22 23:16:55 +02:00
// Init MetricGroup.
2020-05-27 20:35:58 +02:00
mn . MetricGroup = [ ] byte ( fmt . Sprintf ( "metricGroup.%d\x00\x01\x02" , i % metricGroups ) )
2019-05-22 23:16:55 +02:00
// Init other tags.
2023-01-24 05:10:29 +01:00
tagsCount := r . Intn ( 10 ) + 1
2019-05-22 23:16:55 +02:00
for j := 0 ; j < tagsCount ; j ++ {
key := fmt . Sprintf ( "key\x01\x02\x00_%d_%d" , i , j )
value := fmt . Sprintf ( "val\x01_%d\x00_%d\x02" , i , j )
mn . AddTag ( key , value )
}
mn . sortTags ( )
2019-09-23 19:40:38 +02:00
metricNameBuf = mn . Marshal ( metricNameBuf [ : 0 ] )
2022-06-20 12:47:43 +02:00
metricNameRawBuf = mn . marshalRaw ( metricNameRawBuf [ : 0 ] )
2019-05-22 23:16:55 +02:00
// Create tsid for the metricName.
2023-05-18 20:28:51 +02:00
var tsid TSID
if err := is . GetOrCreateTSIDByName ( & tsid , metricNameBuf , metricNameRawBuf , 0 ) ; err != nil {
2022-11-25 19:32:45 +01:00
return nil , nil , nil , fmt . Errorf ( "unexpected error when creating tsid for mn:\n%s: %w" , & mn , err )
2019-05-22 23:16:55 +02:00
}
2023-05-18 20:28:51 +02:00
if tsid . AccountID != mn . AccountID {
return nil , nil , nil , fmt . Errorf ( "unexpected TSID.AccountID; got %d; want %d; mn:\n%s\ntsid:\n%+v" , tsid . AccountID , mn . AccountID , & mn , & tsid )
2019-05-22 23:23:23 +02:00
}
2023-05-18 20:28:51 +02:00
if tsid . ProjectID != mn . ProjectID {
return nil , nil , nil , fmt . Errorf ( "unexpected TSID.ProjectID; got %d; want %d; mn:\n%s\ntsid:\n%+v" , tsid . ProjectID , mn . ProjectID , & mn , & tsid )
2019-05-22 23:23:23 +02:00
}
2019-05-22 23:16:55 +02:00
mns = append ( mns , mn )
2023-05-18 20:28:51 +02:00
tsids = append ( tsids , tsid )
2019-05-22 23:16:55 +02:00
}
2019-06-25 11:55:27 +02:00
// fill Date -> MetricID cache
2023-05-18 20:28:51 +02:00
date := uint64 ( timestampFromTime ( time . Now ( ) ) ) / msecPerDay
2019-06-25 11:55:27 +02:00
for i := range tsids {
tsid := & tsids [ i ]
2023-05-18 20:28:51 +02:00
is . createPerDayIndexes ( date , tsid . MetricID , & mns [ i ] )
2019-06-25 11:55:27 +02:00
}
2019-09-23 19:40:38 +02:00
// Flush index to disk, so it becomes visible for search
2019-06-25 11:55:27 +02:00
db . tb . DebugFlush ( )
2022-11-25 19:32:45 +01:00
var tenantsList [ ] string
for tenant := range tenants {
tenantsList = append ( tenantsList , tenant )
}
sort . Strings ( tenantsList )
return mns , tsids , tenantsList , nil
2019-09-23 19:40:38 +02:00
}
2022-11-25 19:32:45 +01:00
func testIndexDBCheckTSIDByName ( db * indexDB , mns [ ] MetricName , tsids [ ] TSID , tenants [ ] string , isConcurrent bool ) error {
2022-06-12 03:32:13 +02:00
hasValue := func ( lvs [ ] string , v [ ] byte ) bool {
for _ , lv := range lvs {
if string ( v ) == lv {
2019-05-22 23:16:55 +02:00
return true
}
}
return false
}
2022-06-12 03:32:13 +02:00
allLabelNames := make ( map [ accountProjectKey ] map [ string ] bool )
2019-05-22 23:23:23 +02:00
timeseriesCounters := make ( map [ accountProjectKey ] map [ uint64 ] bool )
2023-05-18 20:28:51 +02:00
var tsidCopy TSID
2019-05-22 23:16:55 +02:00
var metricNameCopy [ ] byte
for i := range mns {
mn := & mns [ i ]
tsid := & tsids [ i ]
2019-05-22 23:23:23 +02:00
apKey := accountProjectKey {
AccountID : tsid . AccountID ,
ProjectID : tsid . ProjectID ,
}
tc := timeseriesCounters [ apKey ]
if tc == nil {
tc = make ( map [ uint64 ] bool )
timeseriesCounters [ apKey ] = tc
}
2019-05-22 23:16:55 +02:00
tc [ tsid . MetricID ] = true
mn . sortTags ( )
metricName := mn . Marshal ( nil )
2023-05-18 20:28:51 +02:00
if err := db . getTSIDByNameNoCreate ( & tsidCopy , metricName ) ; err != nil {
return fmt . Errorf ( "cannot obtain tsid #%d for mn %s: %w" , i , mn , err )
2019-05-22 23:16:55 +02:00
}
if isConcurrent {
// Copy tsid.MetricID, since multiple TSIDs may match
// the same mn in concurrent mode.
2023-05-18 20:28:51 +02:00
tsidCopy . MetricID = tsid . MetricID
2019-05-22 23:16:55 +02:00
}
2023-05-18 20:28:51 +02:00
if ! reflect . DeepEqual ( tsid , & tsidCopy ) {
return fmt . Errorf ( "unexpected tsid for mn:\n%s\ngot\n%+v\nwant\n%+v" , mn , & tsidCopy , tsid )
2019-05-22 23:16:55 +02:00
}
// Search for metric name for the given metricID.
var err error
2023-05-18 20:28:51 +02:00
metricNameCopy , err = db . searchMetricNameWithCache ( metricNameCopy [ : 0 ] , tsidCopy . MetricID , tsidCopy . AccountID , tsidCopy . ProjectID )
2019-05-22 23:16:55 +02:00
if err != nil {
2023-05-18 20:28:51 +02:00
return fmt . Errorf ( "error in searchMetricNameWithCache for metricID=%d; i=%d: %w" , tsidCopy . MetricID , i , err )
2019-05-22 23:16:55 +02:00
}
if ! bytes . Equal ( metricName , metricNameCopy ) {
2023-05-18 20:28:51 +02:00
return fmt . Errorf ( "unexpected mn for metricID=%d;\ngot\n%q\nwant\n%q" , tsidCopy . MetricID , metricNameCopy , metricName )
2019-05-22 23:16:55 +02:00
}
// Try searching metric name for non-existent MetricID.
2021-03-22 21:41:47 +01:00
buf , err := db . searchMetricNameWithCache ( nil , 1 , mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err != io . EOF {
return fmt . Errorf ( "expecting io.EOF error when searching for non-existing metricID; got %v" , err )
}
if len ( buf ) > 0 {
return fmt . Errorf ( "expecting empty buf when searching for non-existent metricID; got %X" , buf )
}
2022-06-12 03:32:13 +02:00
// Test SearchLabelValuesWithFiltersOnTimeRange
lvs , err := db . SearchLabelValuesWithFiltersOnTimeRange ( nil , mn . AccountID , mn . ProjectID , "__name__" , nil , TimeRange { } , 1e5 , 1e9 , noDeadline )
2019-05-22 23:16:55 +02:00
if err != nil {
2022-06-12 03:32:13 +02:00
return fmt . Errorf ( "error in SearchLabelValuesWithFiltersOnTimeRange(labelName=%q): %w" , "__name__" , err )
2019-05-22 23:16:55 +02:00
}
2022-06-12 03:32:13 +02:00
if ! hasValue ( lvs , mn . MetricGroup ) {
return fmt . Errorf ( "SearchLabelValuesWithFiltersOnTimeRange(labelName=%q): couldn't find %q; found %q" , "__name__" , mn . MetricGroup , lvs )
2019-05-22 23:16:55 +02:00
}
2022-06-12 03:32:13 +02:00
labelNames := allLabelNames [ apKey ]
if labelNames == nil {
labelNames = make ( map [ string ] bool )
allLabelNames [ apKey ] = labelNames
2019-05-22 23:23:23 +02:00
}
2019-05-22 23:16:55 +02:00
for i := range mn . Tags {
tag := & mn . Tags [ i ]
2022-06-12 03:32:13 +02:00
lvs , err := db . SearchLabelValuesWithFiltersOnTimeRange ( nil , mn . AccountID , mn . ProjectID , string ( tag . Key ) , nil , TimeRange { } , 1e5 , 1e9 , noDeadline )
2019-05-22 23:16:55 +02:00
if err != nil {
2022-06-12 03:32:13 +02:00
return fmt . Errorf ( "error in SearchLabelValuesWithFiltersOnTimeRange(labelName=%q): %w" , tag . Key , err )
2019-05-22 23:16:55 +02:00
}
2022-06-12 03:32:13 +02:00
if ! hasValue ( lvs , tag . Value ) {
return fmt . Errorf ( "SearchLabelValuesWithFiltersOnTimeRange(labelName=%q): couldn't find %q; found %q" , tag . Key , tag . Value , lvs )
2019-05-22 23:16:55 +02:00
}
2022-06-12 03:32:13 +02:00
labelNames [ string ( tag . Key ) ] = true
2019-05-22 23:16:55 +02:00
}
}
2022-06-12 03:32:13 +02:00
// Test SearchLabelNamesWithFiltersOnTimeRange (empty filters, global time range)
for k , labelNames := range allLabelNames {
lns , err := db . SearchLabelNamesWithFiltersOnTimeRange ( nil , k . AccountID , k . ProjectID , nil , TimeRange { } , 1e5 , 1e9 , noDeadline )
2019-05-22 23:23:23 +02:00
if err != nil {
2022-11-25 19:32:45 +01:00
return fmt . Errorf ( "error in SearchLabelNamesWithFiltersOnTimeRange: %w" , err )
2019-05-22 23:23:23 +02:00
}
2022-06-12 03:32:13 +02:00
if ! hasValue ( lns , [ ] byte ( "__name__" ) ) {
return fmt . Errorf ( "cannot find __name__ in %q" , lns )
2019-05-22 23:23:23 +02:00
}
2022-06-12 03:32:13 +02:00
for labelName := range labelNames {
if ! hasValue ( lns , [ ] byte ( labelName ) ) {
return fmt . Errorf ( "cannot find %q in %q" , labelName , lns )
2019-05-22 23:23:23 +02:00
}
2019-05-22 23:16:55 +02:00
}
}
2022-11-25 19:32:45 +01:00
// Test SearchTenants on global time range
tenantsGot , err := db . SearchTenants ( nil , TimeRange { } , noDeadline )
if err != nil {
return fmt . Errorf ( "error in SearchTenants: %w" , err )
}
sort . Strings ( tenantsGot )
if ! reflect . DeepEqual ( tenants , tenantsGot ) {
return fmt . Errorf ( "unexpected tenants got when searching in global time range;\ngot\n%s\nwant\n%s" , tenantsGot , tenants )
}
// Test SearchTenants on specific time range
2023-05-18 20:28:51 +02:00
currentTime := timestampFromTime ( time . Now ( ) )
2022-11-25 19:32:45 +01:00
tr := TimeRange {
MinTimestamp : currentTime - msecPerDay ,
MaxTimestamp : currentTime + msecPerDay ,
}
tenantsGot , err = db . SearchTenants ( nil , tr , noDeadline )
if err != nil {
return fmt . Errorf ( "error in SearchTenants: %w" , err )
}
sort . Strings ( tenantsGot )
if ! reflect . DeepEqual ( tenants , tenantsGot ) {
return fmt . Errorf ( "unexpected tenants got when searching in global time range;\ngot\n%s\nwant\n%s" , tenantsGot , tenants )
}
2019-06-10 11:36:42 +02:00
// Check timerseriesCounters only for serial test.
// Concurrent test may create duplicate timeseries, so GetSeriesCount
// would return more timeseries than needed.
if ! isConcurrent {
for k , tc := range timeseriesCounters {
2020-07-23 19:42:57 +02:00
n , err := db . GetSeriesCount ( k . AccountID , k . ProjectID , noDeadline )
2019-06-10 11:36:42 +02:00
if err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "unexpected error in GetSeriesCount(%v): %w" , k , err )
2019-06-10 11:36:42 +02:00
}
if n != uint64 ( len ( tc ) ) {
return fmt . Errorf ( "unexpected GetSeriesCount(%v); got %d; want %d" , k , n , uint64 ( len ( tc ) ) )
}
}
}
2019-05-22 23:16:55 +02:00
// Try tag filters.
2022-11-25 19:32:45 +01:00
tr = TimeRange {
2020-10-01 18:03:34 +02:00
MinTimestamp : currentTime - msecPerDay ,
MaxTimestamp : currentTime + msecPerDay ,
}
2019-05-22 23:16:55 +02:00
for i := range mns {
mn := & mns [ i ]
tsid := & tsids [ i ]
// Search without regexps.
2019-05-22 23:23:23 +02:00
tfs := NewTagFilters ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( nil , mn . MetricGroup , false , false ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot create tag filter for MetricGroup: %w" , err )
2019-05-22 23:16:55 +02:00
}
for j := 0 ; j < len ( mn . Tags ) ; j ++ {
t := & mn . Tags [ j ]
if err := tfs . Add ( t . Key , t . Value , false , false ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot create tag filter for tag: %w" , err )
2019-05-22 23:16:55 +02:00
}
}
if err := tfs . Add ( nil , [ ] byte ( "foobar" ) , true , false ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot add negative filter: %w" , err )
2019-05-22 23:16:55 +02:00
}
if err := tfs . Add ( nil , nil , true , false ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot add no-op negative filter: %w" , err )
2019-05-22 23:16:55 +02:00
}
2022-10-23 11:15:24 +02:00
tsidsFound , err := searchTSIDsInTest ( db , [ ] * TagFilters { tfs } , tr )
2019-05-22 23:16:55 +02:00
if err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot search by exact tag filter: %w" , err )
2019-05-22 23:16:55 +02:00
}
if ! testHasTSID ( tsidsFound , tsid ) {
2019-09-23 19:40:38 +02:00
return fmt . Errorf ( "tsids is missing in exact tsidsFound\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s\ni=%d" , tsid , tsidsFound , tfs , mn , i )
2019-05-22 23:16:55 +02:00
}
// Verify tag cache.
2022-10-23 11:15:24 +02:00
tsidsCached , err := searchTSIDsInTest ( db , [ ] * TagFilters { tfs } , tr )
2019-05-22 23:16:55 +02:00
if err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot search by exact tag filter: %w" , err )
2019-05-22 23:16:55 +02:00
}
if ! reflect . DeepEqual ( tsidsCached , tsidsFound ) {
return fmt . Errorf ( "unexpected tsids returned; got\n%+v; want\n%+v" , tsidsCached , tsidsFound )
}
// Add negative filter for zeroing search results.
if err := tfs . Add ( nil , mn . MetricGroup , true , false ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot add negative filter for zeroing search results: %w" , err )
2019-05-22 23:16:55 +02:00
}
2022-10-23 11:15:24 +02:00
tsidsFound , err = searchTSIDsInTest ( db , [ ] * TagFilters { tfs } , tr )
2019-05-22 23:16:55 +02:00
if err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot search by exact tag filter with full negative: %w" , err )
2019-05-22 23:16:55 +02:00
}
if testHasTSID ( tsidsFound , tsid ) {
return fmt . Errorf ( "unexpected tsid found for exact negative filter\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s" , tsid , tsidsFound , tfs , mn )
}
2020-05-28 10:58:47 +02:00
// Search for Graphite wildcard
tfs . Reset ( mn . AccountID , mn . ProjectID )
n := bytes . IndexByte ( mn . MetricGroup , '.' )
if n < 0 {
return fmt . Errorf ( "cannot find dot in MetricGroup %q" , mn . MetricGroup )
}
re := "[^.]*" + regexp . QuoteMeta ( string ( mn . MetricGroup [ n : ] ) )
if err := tfs . Add ( nil , [ ] byte ( re ) , false , true ) ; err != nil {
return fmt . Errorf ( "cannot create regexp tag filter for Graphite wildcard" )
}
2022-10-23 11:15:24 +02:00
tsidsFound , err = searchTSIDsInTest ( db , [ ] * TagFilters { tfs } , tr )
2020-05-28 10:58:47 +02:00
if err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot search by regexp tag filter for Graphite wildcard: %w" , err )
2020-05-28 10:58:47 +02:00
}
if ! testHasTSID ( tsidsFound , tsid ) {
return fmt . Errorf ( "tsids is missing in regexp for Graphite wildcard tsidsFound\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s" , tsid , tsidsFound , tfs , mn )
}
2021-09-09 20:09:18 +02:00
// Search with a filter matching empty tag (a single filter)
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1601
tfs . Reset ( mn . AccountID , mn . ProjectID )
if err := tfs . Add ( nil , mn . MetricGroup , false , false ) ; err != nil {
return fmt . Errorf ( "cannot create tag filter for MetricGroup: %w" , err )
}
if err := tfs . Add ( [ ] byte ( "non-existent-tag" ) , [ ] byte ( "foo|" ) , false , true ) ; err != nil {
return fmt . Errorf ( "cannot create regexp tag filter for non-existing tag: %w" , err )
}
2022-10-23 11:15:24 +02:00
tsidsFound , err = searchTSIDsInTest ( db , [ ] * TagFilters { tfs } , tr )
2021-09-09 20:09:18 +02:00
if err != nil {
return fmt . Errorf ( "cannot search with a filter matching empty tag: %w" , err )
}
2021-09-11 09:57:13 +02:00
if ! testHasTSID ( tsidsFound , tsid ) {
return fmt . Errorf ( "tsids is missing when matching a filter with empty tag tsidsFound\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s" , tsid , tsidsFound , tfs , mn )
}
2021-09-09 20:09:18 +02:00
// Search with filters matching empty tags (multiple filters)
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1601
tfs . Reset ( mn . AccountID , mn . ProjectID )
if err := tfs . Add ( nil , mn . MetricGroup , false , false ) ; err != nil {
return fmt . Errorf ( "cannot create tag filter for MetricGroup: %w" , err )
}
if err := tfs . Add ( [ ] byte ( "non-existent-tag1" ) , [ ] byte ( "foo|" ) , false , true ) ; err != nil {
return fmt . Errorf ( "cannot create regexp tag filter for non-existing tag1: %w" , err )
}
if err := tfs . Add ( [ ] byte ( "non-existent-tag2" ) , [ ] byte ( "bar|" ) , false , true ) ; err != nil {
return fmt . Errorf ( "cannot create regexp tag filter for non-existing tag2: %w" , err )
}
2022-10-23 11:15:24 +02:00
tsidsFound , err = searchTSIDsInTest ( db , [ ] * TagFilters { tfs } , tr )
2021-09-09 20:09:18 +02:00
if err != nil {
return fmt . Errorf ( "cannot search with multipel filters matching empty tags: %w" , err )
}
2021-09-11 09:57:13 +02:00
if ! testHasTSID ( tsidsFound , tsid ) {
return fmt . Errorf ( "tsids is missing when matching multiple filters with empty tags tsidsFound\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s" , tsid , tsidsFound , tfs , mn )
}
2021-09-09 20:09:18 +02:00
2019-05-22 23:16:55 +02:00
// Search with regexps.
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( nil , mn . MetricGroup , false , true ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot create regexp tag filter for MetricGroup: %w" , err )
2019-05-22 23:16:55 +02:00
}
for j := 0 ; j < len ( mn . Tags ) ; j ++ {
t := & mn . Tags [ j ]
if err := tfs . Add ( t . Key , append ( t . Value , "|foo*." ... ) , false , true ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot create regexp tag filter for tag: %w" , err )
2019-05-22 23:16:55 +02:00
}
if err := tfs . Add ( t . Key , append ( t . Value , "|aaa|foo|bar" ... ) , false , true ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot create regexp tag filter for tag: %w" , err )
2019-05-22 23:16:55 +02:00
}
}
if err := tfs . Add ( nil , [ ] byte ( "^foobar$" ) , true , true ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot add negative filter with regexp: %w" , err )
2019-05-22 23:16:55 +02:00
}
if err := tfs . Add ( nil , nil , true , true ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot add no-op negative filter with regexp: %w" , err )
2019-05-22 23:16:55 +02:00
}
2022-10-23 11:15:24 +02:00
tsidsFound , err = searchTSIDsInTest ( db , [ ] * TagFilters { tfs } , tr )
2019-05-22 23:16:55 +02:00
if err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot search by regexp tag filter: %w" , err )
2019-05-22 23:16:55 +02:00
}
if ! testHasTSID ( tsidsFound , tsid ) {
return fmt . Errorf ( "tsids is missing in regexp tsidsFound\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s" , tsid , tsidsFound , tfs , mn )
}
if err := tfs . Add ( nil , mn . MetricGroup , true , true ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot add negative filter for zeroing search results: %w" , err )
2019-05-22 23:16:55 +02:00
}
2022-10-23 11:15:24 +02:00
tsidsFound , err = searchTSIDsInTest ( db , [ ] * TagFilters { tfs } , tr )
2019-05-22 23:16:55 +02:00
if err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot search by regexp tag filter with full negative: %w" , err )
2019-05-22 23:16:55 +02:00
}
if testHasTSID ( tsidsFound , tsid ) {
return fmt . Errorf ( "unexpected tsid found for regexp negative filter\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s" , tsid , tsidsFound , tfs , mn )
}
// Search with filter matching zero results.
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( [ ] byte ( "non-existing-key" ) , [ ] byte ( "foobar" ) , false , false ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot add non-existing key: %w" , err )
2019-05-22 23:16:55 +02:00
}
if err := tfs . Add ( nil , mn . MetricGroup , false , true ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot create tag filter for MetricGroup matching zero results: %w" , err )
2019-05-22 23:16:55 +02:00
}
2022-10-23 11:15:24 +02:00
tsidsFound , err = searchTSIDsInTest ( db , [ ] * TagFilters { tfs } , tr )
2019-05-22 23:16:55 +02:00
if err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot search by non-existing tag filter: %w" , err )
2019-05-22 23:16:55 +02:00
}
if len ( tsidsFound ) > 0 {
return fmt . Errorf ( "non-zero tsidsFound for non-existing tag filter: %+v" , tsidsFound )
}
if isConcurrent {
// Skip empty filter search in concurrent mode, since it looks like
// it has a lag preventing from correct work.
continue
}
2022-10-23 11:15:24 +02:00
// Search with empty filter. It should match all the results.
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2022-10-23 11:15:24 +02:00
tsidsFound , err = searchTSIDsInTest ( db , [ ] * TagFilters { tfs } , tr )
2019-05-22 23:16:55 +02:00
if err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot search for common prefix: %w" , err )
2019-05-22 23:16:55 +02:00
}
if ! testHasTSID ( tsidsFound , tsid ) {
return fmt . Errorf ( "tsids is missing in common prefix\ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s" , tsid , tsidsFound , tfs , mn )
}
// Search with empty metricGroup. It should match zero results.
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( nil , nil , false , false ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot create tag filter for empty metricGroup: %w" , err )
2019-05-22 23:16:55 +02:00
}
2022-10-23 11:15:24 +02:00
tsidsFound , err = searchTSIDsInTest ( db , [ ] * TagFilters { tfs } , tr )
2019-05-22 23:16:55 +02:00
if err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot search for empty metricGroup: %w" , err )
2019-05-22 23:16:55 +02:00
}
if len ( tsidsFound ) != 0 {
return fmt . Errorf ( "unexpected non-empty tsids found for empty metricGroup: %v" , tsidsFound )
}
// Search with multiple tfss
2019-05-22 23:23:23 +02:00
tfs1 := NewTagFilters ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs1 . Add ( nil , nil , false , false ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot create tag filter for empty metricGroup: %w" , err )
2019-05-22 23:16:55 +02:00
}
2019-05-22 23:23:23 +02:00
tfs2 := NewTagFilters ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs2 . Add ( nil , mn . MetricGroup , false , false ) ; err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot create tag filter for MetricGroup: %w" , err )
2019-05-22 23:16:55 +02:00
}
2022-10-23 11:15:24 +02:00
tsidsFound , err = searchTSIDsInTest ( db , [ ] * TagFilters { tfs1 , tfs2 } , tr )
2019-05-22 23:16:55 +02:00
if err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot search for empty metricGroup: %w" , err )
2019-05-22 23:16:55 +02:00
}
if ! testHasTSID ( tsidsFound , tsid ) {
return fmt . Errorf ( "tsids is missing when searching for multiple tfss \ntsid=%+v\ntsidsFound=%+v\ntfs=%s\nmn=%s" , tsid , tsidsFound , tfs , mn )
}
// Verify empty tfss
2022-10-23 11:15:24 +02:00
tsidsFound , err = searchTSIDsInTest ( db , nil , tr )
2019-05-22 23:16:55 +02:00
if err != nil {
2020-06-30 21:58:18 +02:00
return fmt . Errorf ( "cannot search for nil tfss: %w" , err )
2019-05-22 23:16:55 +02:00
}
if len ( tsidsFound ) != 0 {
2019-05-22 23:23:23 +02:00
return fmt . Errorf ( "unexpected non-empty tsids fround for nil tfss" )
2019-05-22 23:16:55 +02:00
}
}
return nil
}
2022-10-23 11:15:24 +02:00
func searchTSIDsInTest ( db * indexDB , tfss [ ] * TagFilters , tr TimeRange ) ( [ ] TSID , error ) {
metricIDs , err := db . searchMetricIDs ( nil , tfss , tr , 1e5 , noDeadline )
if err != nil {
return nil , err
}
if len ( tfss ) == 0 {
if len ( metricIDs ) > 0 {
return nil , fmt . Errorf ( "expecting empty metricIDs for non-empty tfss; got %d metricIDs" , len ( metricIDs ) )
}
return nil , nil
}
accountID := tfss [ 0 ] . accountID
projectID := tfss [ 0 ] . projectID
return db . getTSIDsFromMetricIDs ( nil , accountID , projectID , metricIDs , noDeadline )
}
2019-05-22 23:16:55 +02:00
func testHasTSID ( tsids [ ] TSID , tsid * TSID ) bool {
for i := range tsids {
if tsids [ i ] == * tsid {
return true
}
}
return false
}
func TestMatchTagFilters ( t * testing . T ) {
var mn MetricName
2019-05-22 23:23:23 +02:00
mn . AccountID = 123
mn . ProjectID = 456
2019-05-22 23:16:55 +02:00
mn . MetricGroup = append ( mn . MetricGroup , "foobar_metric" ... )
for i := 0 ; i < 5 ; i ++ {
key := fmt . Sprintf ( "key %d" , i )
value := fmt . Sprintf ( "value %d" , i )
mn . AddTag ( key , value )
}
var bb bytesutil . ByteBuffer
2019-05-22 23:23:23 +02:00
// Verify tag filters for different account / project
tfs := NewTagFilters ( mn . AccountID , mn . ProjectID + 1 )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( nil , [ ] byte ( "foobar_metric" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add filter: %s" , err )
}
ok , err := matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
2019-05-22 23:23:23 +02:00
if ok {
t . Fatalf ( "Tag filters shouldn't match for invalid projectID" )
}
tfs . Reset ( mn . AccountID + 1 , mn . ProjectID )
if err := tfs . Add ( nil , [ ] byte ( "foobar_metric" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Tag filters shouldn't match for invalid accountID" )
}
// Correct AccountID , ProjectID
tfs . Reset ( mn . AccountID , mn . ProjectID )
if err := tfs . Add ( nil , [ ] byte ( "foobar_metric" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
2019-05-22 23:16:55 +02:00
if ! ok {
t . Fatalf ( "should match" )
}
// Empty tag filters should match.
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "empty tag filters should match" )
}
// Negative match by MetricGroup
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( nil , [ ] byte ( "foobar" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, no negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( nil , [ ] byte ( "obar.+" ) , false , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, no negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( nil , [ ] byte ( "foobar_metric" ) , true , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( nil , [ ] byte ( "foob.+metric" ) , true , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2020-04-01 16:40:18 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
if err := tfs . Add ( nil , [ ] byte ( ".+" ) , true , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2019-05-22 23:16:55 +02:00
// Positive match by MetricGroup
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( nil , [ ] byte ( "foobar_metric" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, no negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( nil , [ ] byte ( "foobar.+etric" ) , false , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, no negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( nil , [ ] byte ( "obar_metric" ) , true , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( nil , [ ] byte ( "ob.+metric" ) , true , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
}
2020-04-01 16:40:18 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
if err := tfs . Add ( nil , [ ] byte ( ".+" ) , false , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, positive filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
}
2019-05-22 23:16:55 +02:00
2022-03-18 11:58:22 +01:00
// Positive empty match by non-existing tag
2022-03-18 15:15:01 +01:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2022-03-18 11:58:22 +01:00
if err := tfs . Add ( [ ] byte ( "non-existing-tag" ) , [ ] byte ( "foobar|" ) , false , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, positive filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
}
2019-05-22 23:16:55 +02:00
// Negative match by non-existing tag
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( [ ] byte ( "non-existing-tag" ) , [ ] byte ( "foobar" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, no negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( [ ] byte ( "non-existing-tag" ) , [ ] byte ( "obar.+" ) , false , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, no negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( [ ] byte ( "non-existing-tag" ) , [ ] byte ( "foobar_metric" ) , true , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
2019-07-30 14:14:09 +02:00
if ! ok {
t . Fatalf ( "Should match" )
2019-05-22 23:16:55 +02:00
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( [ ] byte ( "non-existing-tag" ) , [ ] byte ( "foob.+metric" ) , true , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
2019-07-30 14:14:09 +02:00
if ! ok {
t . Fatalf ( "Should match" )
}
tfs . Reset ( mn . AccountID , mn . ProjectID )
if err := tfs . Add ( [ ] byte ( "non-existing-tag" ) , [ ] byte ( ".+" ) , true , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
2019-05-22 23:16:55 +02:00
}
2020-04-01 16:40:18 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
if err := tfs . Add ( [ ] byte ( "non-existing-tag" ) , [ ] byte ( ".+" ) , false , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, non-negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2019-05-22 23:16:55 +02:00
// Negative match by existing tag
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( [ ] byte ( "key 0" ) , [ ] byte ( "foobar" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, no negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( [ ] byte ( "key 1" ) , [ ] byte ( "obar.+" ) , false , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, no negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( [ ] byte ( "key 2" ) , [ ] byte ( "value 2" ) , true , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( [ ] byte ( "key 3" ) , [ ] byte ( "v.+lue 3" ) , true , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2020-04-01 16:40:18 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
if err := tfs . Add ( [ ] byte ( "key 3" ) , [ ] byte ( ".+" ) , true , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2019-05-22 23:16:55 +02:00
2020-06-10 17:40:00 +02:00
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/546
tfs . Reset ( mn . AccountID , mn . ProjectID )
if err := tfs . Add ( [ ] byte ( "key 3" ) , [ ] byte ( "|value 3" ) , true , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
tfs . Reset ( mn . AccountID , mn . ProjectID )
if err := tfs . Add ( [ ] byte ( "key 3" ) , [ ] byte ( "|value 2" ) , true , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
}
2019-05-22 23:16:55 +02:00
// Positive match by existing tag
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( [ ] byte ( "key 0" ) , [ ] byte ( "value 0" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, no negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( [ ] byte ( "key 1" ) , [ ] byte ( ".+lue 1" ) , false , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, no negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( [ ] byte ( "key 2" ) , [ ] byte ( "value 3" ) , true , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
}
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2020-04-01 16:40:18 +02:00
if err := tfs . Add ( [ ] byte ( "key 3" ) , [ ] byte ( "v.+lue 2|" ) , true , true ) ; err != nil {
2019-05-22 23:16:55 +02:00
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
2019-07-30 14:14:09 +02:00
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
}
tfs . Reset ( mn . AccountID , mn . ProjectID )
if err := tfs . Add ( [ ] byte ( "key 3" ) , [ ] byte ( "" ) , true , false ) ; err != nil {
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
2019-05-22 23:16:55 +02:00
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
}
2020-04-01 16:40:18 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
if err := tfs . Add ( [ ] byte ( "key 3" ) , [ ] byte ( ".+" ) , false , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, non-negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
}
2019-05-22 23:16:55 +02:00
// Positive match by multiple tags and MetricGroup
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
if err := tfs . Add ( [ ] byte ( "key 0" ) , [ ] byte ( "value 0" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, no negative filter: %s" , err )
}
if err := tfs . Add ( [ ] byte ( "key 2" ) , [ ] byte ( "value [0-9]" ) , false , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, no negative filter: %s" , err )
}
if err := tfs . Add ( [ ] byte ( "key 3" ) , [ ] byte ( "value 23" ) , true , false ) ; err != nil {
t . Fatalf ( "cannt add no regexp, negative filter: %s" , err )
}
if err := tfs . Add ( [ ] byte ( "key 2" ) , [ ] byte ( "lue.+43" ) , true , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
if err := tfs . Add ( nil , [ ] byte ( "foobar_metric" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, no negative filter: %s" , err )
}
if err := tfs . Add ( nil , [ ] byte ( "foo.+metric" ) , false , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, no negative filter: %s" , err )
}
if err := tfs . Add ( nil , [ ] byte ( "sdfdsf" ) , true , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, negative filter: %s" , err )
}
if err := tfs . Add ( nil , [ ] byte ( "o.+metr" ) , true , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! ok {
t . Fatalf ( "Should match" )
}
// Negative match by multiple tags and MetricGroup
2019-05-22 23:23:23 +02:00
tfs . Reset ( mn . AccountID , mn . ProjectID )
2019-05-22 23:16:55 +02:00
// Positive matches
if err := tfs . Add ( [ ] byte ( "key 0" ) , [ ] byte ( "value 0" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, no negative filter: %s" , err )
}
if err := tfs . Add ( [ ] byte ( "key 2" ) , [ ] byte ( "value [0-9]" ) , false , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, no negative filter: %s" , err )
}
if err := tfs . Add ( [ ] byte ( "key 3" ) , [ ] byte ( "value 23" ) , true , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, negative filter: %s" , err )
}
// Negative matches
if err := tfs . Add ( [ ] byte ( "key 2" ) , [ ] byte ( "v.+2" ) , true , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
if err := tfs . Add ( nil , [ ] byte ( "obar_metric" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, no negative filter: %s" , err )
}
if err := tfs . Add ( nil , [ ] byte ( "oo.+metric" ) , false , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, no negative filter: %s" , err )
}
// Positive matches
if err := tfs . Add ( nil , [ ] byte ( "sdfdsf" ) , true , false ) ; err != nil {
t . Fatalf ( "cannot add no regexp, negative filter: %s" , err )
}
if err := tfs . Add ( nil , [ ] byte ( "o.+metr" ) , true , true ) ; err != nil {
t . Fatalf ( "cannot add regexp, negative filter: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2020-04-01 16:40:18 +02:00
// Negative match for multiple non-regexp positive filters
tfs . Reset ( mn . AccountID , mn . ProjectID )
if err := tfs . Add ( nil , [ ] byte ( "foobar_metric" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add non-regexp positive filter for MetricGroup: %s" , err )
}
if err := tfs . Add ( [ ] byte ( "non-existing-metric" ) , [ ] byte ( "foobar" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add non-regexp positive filter for non-existing tag: %s" , err )
}
ok , err = matchTagFilters ( & mn , toTFPointers ( tfs . tfs ) , & bb )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ok {
t . Fatalf ( "Shouldn't match" )
}
2019-05-22 23:16:55 +02:00
}
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
func TestIndexDBRepopulateAfterRotation ( t * testing . T ) {
2023-01-24 05:10:29 +01:00
r := rand . New ( rand . NewSource ( 1 ) )
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
path := "TestIndexRepopulateAfterRotation"
2023-04-15 08:01:20 +02:00
s := MustOpenStorage ( path , msecsPerMonth , 1e5 , 1e5 )
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
defer func ( ) {
s . MustClose ( )
if err := os . RemoveAll ( path ) ; err != nil {
t . Fatalf ( "cannot remove %q: %s" , path , err )
}
} ( )
db := s . idb ( )
if db . generation == 0 {
t . Fatalf ( "expected indexDB generation to be not 0" )
}
const metricRowsN = 1000
2023-05-18 20:28:51 +02:00
// use min-max timestamps of 1month range to create smaller number of partitions
timeMin , timeMax := time . Now ( ) . Add ( - 730 * time . Hour ) , time . Now ( )
mrs := testGenerateMetricRows ( r , metricRowsN , timeMin . UnixMilli ( ) , timeMax . UnixMilli ( ) )
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
if err := s . AddRows ( mrs , defaultPrecisionBits ) ; err != nil {
t . Fatalf ( "unexpected error when adding mrs: %s" , err )
}
s . DebugFlush ( )
// verify the storage contains rows.
var m Metrics
s . UpdateMetrics ( & m )
2022-12-06 00:27:57 +01:00
if rowsCount := m . TableMetrics . TotalRowsCount ( ) ; rowsCount < uint64 ( metricRowsN ) {
t . Fatalf ( "expecting at least %d rows in the table; got %d" , metricRowsN , rowsCount )
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
}
// check new series were registered in indexDB
added := atomic . LoadUint64 ( & db . newTimeseriesCreated )
if added != metricRowsN {
t . Fatalf ( "expected indexDB to contain %d rows; got %d" , metricRowsN , added )
}
// check new series were added to cache
var cs fastcache . Stats
s . tsidCache . UpdateStats ( & cs )
if cs . EntriesCount != metricRowsN {
t . Fatalf ( "expected tsidCache to contain %d rows; got %d" , metricRowsN , cs . EntriesCount )
}
// check if cache entries do belong to current indexDB generation
var genTSID generationTSID
for _ , mr := range mrs {
s . getTSIDFromCache ( & genTSID , mr . MetricNameRaw )
if genTSID . generation != db . generation {
t . Fatalf ( "expected all entries in tsidCache to have the same indexDB generation: %d;" +
"got %d" , db . generation , genTSID . generation )
}
}
prevGeneration := db . generation
// force index rotation
s . mustRotateIndexDB ( )
// check tsidCache wasn't reset after the rotation
var cs2 fastcache . Stats
s . tsidCache . UpdateStats ( & cs2 )
if cs . EntriesCount != metricRowsN {
t . Fatalf ( "expected tsidCache after rotation to contain %d rows; got %d" , metricRowsN , cs2 . EntriesCount )
}
dbNew := s . idb ( )
if dbNew . generation == 0 {
t . Fatalf ( "expected new indexDB generation to be not 0" )
}
if dbNew . generation == prevGeneration {
t . Fatalf ( "expected new indexDB generation %d to be different from prev indexDB" , dbNew . generation )
}
// Re-insert rows again and verify that entries belong prevGeneration and dbNew.generation,
// while the majority of entries remain at prevGeneration.
if err := s . AddRows ( mrs , defaultPrecisionBits ) ; err != nil {
t . Fatalf ( "unexpected error when adding mrs: %s" , err )
}
s . DebugFlush ( )
entriesByGeneration := make ( map [ uint64 ] int )
for _ , mr := range mrs {
s . getTSIDFromCache ( & genTSID , mr . MetricNameRaw )
entriesByGeneration [ genTSID . generation ] ++
}
2023-05-18 20:28:51 +02:00
if len ( entriesByGeneration ) > 2 {
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
t . Fatalf ( "expecting two generations; got %d" , entriesByGeneration )
}
prevEntries := entriesByGeneration [ prevGeneration ]
currEntries := entriesByGeneration [ dbNew . generation ]
totalEntries := prevEntries + currEntries
2023-05-18 20:28:51 +02:00
if totalEntries != metricRowsN {
t . Fatalf ( "unexpected number of entries in tsid cache; got %d; want %d" , totalEntries , metricRowsN )
}
if float64 ( currEntries ) / float64 ( totalEntries ) > 0.1 {
t . Fatalf ( "too big share of entries in the new generation; currEntries=%d, prevEntries=%d" , currEntries , prevEntries )
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
}
}
2019-11-12 14:09:33 +01:00
func TestSearchTSIDWithTimeRange ( t * testing . T ) {
2021-06-11 11:42:26 +02:00
s := newTestStorage ( )
defer stopTestStorage ( s )
2019-11-12 14:09:33 +01:00
lib/index: reduce read/write load after indexDB rotation (#2177)
* lib/index: reduce read/write load after indexDB rotation
IndexDB in VM is responsible for storing TSID - ID's used for identifying
time series. The index is stored on disk and used by both ingestion and read path.
IndexDB is stored separately to data parts and is global for all stored data.
It can't be deleted partially as VM deletes data parts. Instead, indexDB is
rotated once in `retention` interval.
The rotation procedure means that `current` indexDB becomes `previous`,
and new freshly created indexDB struct becomes `current`. So in any time,
VM holds indexDB for current and previous retention periods.
When time series is ingested or queried, VM checks if its TSID is present
in `current` indexDB. If it is missing, it checks the `previous` indexDB.
If TSID was found, it gets copied to the `current` indexDB. In this way
`current` indexDB stores only series which were active during the retention
period.
To improve indexDB lookups, VM uses a cache layer called `tsidCache`. Both
write and read path consult `tsidCache` and on miss the relad lookup happens.
When rotation happens, VM resets the `tsidCache`. This is needed for ingestion
path to trigger `current` indexDB re-population. Since index re-population
requires additional resources, every index rotation event may cause some extra
load on CPU and disk. While it may be unnoticeable for most of the cases,
for systems with very high number of unique series each rotation may lead
to performance degradation for some period of time.
This PR makes an attempt to smooth out resource usage after the rotation.
The changes are following:
1. `tsidCache` is no longer reset after the rotation;
2. Instead, each entry in `tsidCache` gains a notion of indexDB to which
they belong;
3. On ingestion path after the rotation we check if requested TSID was
found in `tsidCache`. Then we have 3 branches:
3.1 Fast path. It was found, and belongs to the `current` indexDB. Return TSID.
3.2 Slow path. It wasn't found, so we generate it from scratch,
add to `current` indexDB, add it to `tsidCache`.
3.3 Smooth path. It was found but does not belong to the `current` indexDB.
In this case, we add it to the `current` indexDB with some probability.
The probability is based on time passed since the last rotation with some threshold.
The more time has passed since rotation the higher is chance to re-populate `current` indexDB.
The default re-population interval in this PR is set to `1h`, during which entries from
`previous` index supposed to slowly re-populate `current` index.
The new metric `vm_timeseries_repopulated_total` was added to identify how many TSIDs
were moved from `previous` indexDB to the `current` indexDB. This metric supposed to
grow only during the first `1h` after the last rotation.
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1401
Signed-off-by: hagen1778 <roman@victoriametrics.com>
* wip
* wip
Co-authored-by: Aliaksandr Valialkin <valyala@victoriametrics.com>
2022-02-11 23:30:08 +01:00
dbName := nextIndexDBTableName ( )
2022-06-01 13:21:12 +02:00
var isReadOnly uint32
2023-04-15 07:08:43 +02:00
db := mustOpenIndexDB ( dbName , s , 0 , & isReadOnly )
2019-11-12 14:09:33 +01:00
defer func ( ) {
db . MustClose ( )
if err := os . RemoveAll ( dbName ) ; err != nil {
t . Fatalf ( "cannot remove indexDB: %s" , err )
}
} ( )
// Create a bunch of per-day time series
const accountID = 12345
const projectID = 85453
2020-07-23 23:30:33 +02:00
is := db . getIndexSearch ( accountID , projectID , noDeadline )
defer db . putIndexSearch ( is )
2019-11-12 14:09:33 +01:00
const days = 5
const metricsPerDay = 1000
theDay := time . Date ( 2019 , time . October , 15 , 5 , 1 , 0 , 0 , time . UTC )
now := uint64 ( timestampFromTime ( theDay ) )
baseDate := now / msecPerDay
var metricNameBuf [ ] byte
2022-06-20 12:47:43 +02:00
var metricNameRawBuf [ ] byte
2020-11-02 18:11:48 +01:00
perDayMetricIDs := make ( map [ uint64 ] * uint64set . Set )
var allMetricIDs uint64set . Set
2022-06-12 03:32:13 +02:00
labelNames := [ ] string {
2023-01-07 09:50:14 +01:00
"__name__" , "constant" , "day" , "UniqueId" , "some_unique_id" ,
2020-11-04 23:15:43 +01:00
}
2022-06-12 03:32:13 +02:00
labelValues := [ ] string {
2020-11-04 23:15:43 +01:00
"testMetric" ,
}
2022-06-12 03:32:13 +02:00
sort . Strings ( labelNames )
2019-11-12 14:09:33 +01:00
for day := 0 ; day < days ; day ++ {
var tsids [ ] TSID
2021-05-23 15:39:55 +02:00
var mns [ ] MetricName
2019-11-12 14:09:33 +01:00
for metric := 0 ; metric < metricsPerDay ; metric ++ {
var mn MetricName
mn . AccountID = accountID
mn . ProjectID = projectID
mn . MetricGroup = [ ] byte ( "testMetric" )
mn . AddTag (
"constant" ,
"const" ,
)
mn . AddTag (
"day" ,
fmt . Sprintf ( "%v" , day ) ,
)
mn . AddTag (
2023-01-07 09:50:14 +01:00
"UniqueId" ,
2019-11-12 14:09:33 +01:00
fmt . Sprintf ( "%v" , metric ) ,
)
2023-01-07 09:50:14 +01:00
mn . AddTag (
"some_unique_id" ,
fmt . Sprintf ( "%v" , day ) ,
)
2019-11-12 14:09:33 +01:00
mn . sortTags ( )
metricNameBuf = mn . Marshal ( metricNameBuf [ : 0 ] )
2022-06-20 12:47:43 +02:00
metricNameRawBuf = mn . marshalRaw ( metricNameRawBuf [ : 0 ] )
2023-05-18 20:28:51 +02:00
var tsid TSID
if err := is . GetOrCreateTSIDByName ( & tsid , metricNameBuf , metricNameRawBuf , 0 ) ; err != nil {
2019-11-12 14:09:33 +01:00
t . Fatalf ( "unexpected error when creating tsid for mn:\n%s: %s" , & mn , err )
}
2023-05-18 20:28:51 +02:00
if tsid . AccountID != accountID {
t . Fatalf ( "unexpected accountID; got %d; want %d" , tsid . AccountID , accountID )
2019-11-12 14:09:33 +01:00
}
2023-05-18 20:28:51 +02:00
if tsid . ProjectID != projectID {
t . Fatalf ( "unexpected accountID; got %d; want %d" , tsid . ProjectID , projectID )
2019-11-12 14:09:33 +01:00
}
2021-05-23 15:39:55 +02:00
mns = append ( mns , mn )
2023-05-18 20:28:51 +02:00
tsids = append ( tsids , tsid )
2019-11-12 14:09:33 +01:00
}
// Add the metrics to the per-day stores
2020-10-01 18:03:34 +02:00
date := baseDate - uint64 ( day )
2020-11-02 18:11:48 +01:00
var metricIDs uint64set . Set
2019-11-12 14:09:33 +01:00
for i := range tsids {
tsid := & tsids [ i ]
2020-11-02 18:11:48 +01:00
metricIDs . Add ( tsid . MetricID )
2023-05-18 20:28:51 +02:00
is . createPerDayIndexes ( date , tsid . MetricID , & mns [ i ] )
2019-11-12 14:09:33 +01:00
}
2020-11-02 18:11:48 +01:00
allMetricIDs . Union ( & metricIDs )
perDayMetricIDs [ date ] = & metricIDs
2019-11-12 14:09:33 +01:00
}
// Flush index to disk, so it becomes visible for search
db . tb . DebugFlush ( )
2020-11-02 18:11:48 +01:00
is2 := db . getIndexSearch ( accountID , projectID , noDeadline )
defer db . putIndexSearch ( is2 )
// Check that all the metrics are found for all the days.
for date := baseDate - days + 1 ; date <= baseDate ; date ++ {
metricIDs , err := is2 . getMetricIDsForDate ( date , metricsPerDay )
if err != nil {
t . Fatalf ( "unexpected error: %s" , err )
}
if ! perDayMetricIDs [ date ] . Equal ( metricIDs ) {
t . Fatalf ( "unexpected metricIDs found;\ngot\n%d\nwant\n%d" , metricIDs . AppendTo ( nil ) , perDayMetricIDs [ date ] . AppendTo ( nil ) )
}
}
2021-07-30 07:37:10 +02:00
// Check that all the metrics are found in global index
metricIDs , err := is2 . getMetricIDsForDate ( 0 , metricsPerDay * days )
if err != nil {
2020-11-02 18:11:48 +01:00
t . Fatalf ( "unexpected error: %s" , err )
}
2021-07-30 07:37:10 +02:00
if ! allMetricIDs . Equal ( metricIDs ) {
2020-11-02 18:11:48 +01:00
t . Fatalf ( "unexpected metricIDs found;\ngot\n%d\nwant\n%d" , metricIDs . AppendTo ( nil ) , allMetricIDs . AppendTo ( nil ) )
}
2022-06-12 03:32:13 +02:00
// Check SearchLabelNamesWithFiltersOnTimeRange with the specified time range.
tr := TimeRange {
2020-11-04 23:15:43 +01:00
MinTimestamp : int64 ( now ) - msecPerDay ,
MaxTimestamp : int64 ( now ) ,
2022-06-12 03:32:13 +02:00
}
lns , err := db . SearchLabelNamesWithFiltersOnTimeRange ( nil , accountID , projectID , nil , tr , 10000 , 1e9 , noDeadline )
2020-11-04 23:15:43 +01:00
if err != nil {
2022-06-12 03:32:13 +02:00
t . Fatalf ( "unexpected error in SearchLabelNamesWithFiltersOnTimeRange(timeRange=%s): %s" , & tr , err )
2020-11-04 23:15:43 +01:00
}
2022-06-12 03:32:13 +02:00
sort . Strings ( lns )
if ! reflect . DeepEqual ( lns , labelNames ) {
t . Fatalf ( "unexpected labelNames; got\n%s\nwant\n%s" , lns , labelNames )
2020-11-04 23:15:43 +01:00
}
2022-06-12 03:32:13 +02:00
// Check SearchLabelValuesWithFiltersOnTimeRange with the specified time range.
lvs , err := db . SearchLabelValuesWithFiltersOnTimeRange ( nil , accountID , projectID , "" , nil , tr , 10000 , 1e9 , noDeadline )
2020-11-04 23:15:43 +01:00
if err != nil {
2022-06-12 03:32:13 +02:00
t . Fatalf ( "unexpected error in SearchLabelValuesWithFiltersOnTimeRange(timeRange=%s): %s" , & tr , err )
2020-11-04 23:15:43 +01:00
}
2022-06-12 03:32:13 +02:00
sort . Strings ( lvs )
if ! reflect . DeepEqual ( lvs , labelValues ) {
t . Fatalf ( "unexpected labelValues; got\n%s\nwant\n%s" , lvs , labelValues )
2020-11-04 23:15:43 +01:00
}
2019-11-12 14:09:33 +01:00
// Create a filter that will match series that occur across multiple days
tfs := NewTagFilters ( accountID , projectID )
if err := tfs . Add ( [ ] byte ( "constant" ) , [ ] byte ( "const" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add filter: %s" , err )
}
2020-10-01 18:03:34 +02:00
// Perform a search within a day.
2019-11-12 14:09:33 +01:00
// This should return the metrics for the day
2022-06-12 03:32:13 +02:00
tr = TimeRange {
2019-11-12 14:09:33 +01:00
MinTimestamp : int64 ( now - 2 * msecPerHour - 1 ) ,
MaxTimestamp : int64 ( now ) ,
}
2022-10-23 11:15:24 +02:00
matchedTSIDs , err := searchTSIDsInTest ( db , [ ] * TagFilters { tfs } , tr )
2019-11-12 14:09:33 +01:00
if err != nil {
t . Fatalf ( "error searching tsids: %v" , err )
}
if len ( matchedTSIDs ) != metricsPerDay {
2020-10-01 18:03:34 +02:00
t . Fatalf ( "expected %d time series for current day, got %d time series" , metricsPerDay , len ( matchedTSIDs ) )
2019-11-12 14:09:33 +01:00
}
2022-06-12 03:32:13 +02:00
// Check SearchLabelNamesWithFiltersOnTimeRange with the specified filter.
lns , err = db . SearchLabelNamesWithFiltersOnTimeRange ( nil , accountID , projectID , [ ] * TagFilters { tfs } , TimeRange { } , 10000 , 1e9 , noDeadline )
if err != nil {
t . Fatalf ( "unexpected error in SearchLabelNamesWithFiltersOnTimeRange(filters=%s): %s" , tfs , err )
}
sort . Strings ( lns )
if ! reflect . DeepEqual ( lns , labelNames ) {
t . Fatalf ( "unexpected labelNames; got\n%s\nwant\n%s" , lns , labelNames )
}
// Check SearchLabelNamesWithFiltersOnTimeRange with the specified filter and time range.
lns , err = db . SearchLabelNamesWithFiltersOnTimeRange ( nil , accountID , projectID , [ ] * TagFilters { tfs } , tr , 10000 , 1e9 , noDeadline )
if err != nil {
t . Fatalf ( "unexpected error in SearchLabelNamesWithFiltersOnTimeRange(filters=%s, timeRange=%s): %s" , tfs , & tr , err )
}
sort . Strings ( lns )
if ! reflect . DeepEqual ( lns , labelNames ) {
t . Fatalf ( "unexpected labelNames; got\n%s\nwant\n%s" , lns , labelNames )
}
// Check SearchLabelValuesWithFiltersOnTimeRange with the specified filter.
lvs , err = db . SearchLabelValuesWithFiltersOnTimeRange ( nil , accountID , projectID , "" , [ ] * TagFilters { tfs } , TimeRange { } , 10000 , 1e9 , noDeadline )
if err != nil {
t . Fatalf ( "unexpected error in SearchLabelValuesWithFiltersOnTimeRange(filters=%s): %s" , tfs , err )
}
sort . Strings ( lvs )
if ! reflect . DeepEqual ( lvs , labelValues ) {
t . Fatalf ( "unexpected labelValues; got\n%s\nwant\n%s" , lvs , labelValues )
}
// Check SearchLabelValuesWithFiltersOnTimeRange with the specified filter and time range.
lvs , err = db . SearchLabelValuesWithFiltersOnTimeRange ( nil , accountID , projectID , "" , [ ] * TagFilters { tfs } , tr , 10000 , 1e9 , noDeadline )
if err != nil {
t . Fatalf ( "unexpected error in SearchLabelValuesWithFiltersOnTimeRange(filters=%s, timeRange=%s): %s" , tfs , & tr , err )
}
sort . Strings ( lvs )
if ! reflect . DeepEqual ( lvs , labelValues ) {
t . Fatalf ( "unexpected labelValues; got\n%s\nwant\n%s" , lvs , labelValues )
}
2019-11-12 14:09:33 +01:00
// Perform a search across all the days, should match all metrics
tr = TimeRange {
MinTimestamp : int64 ( now - msecPerDay * days ) ,
MaxTimestamp : int64 ( now ) ,
}
2022-10-23 11:15:24 +02:00
matchedTSIDs , err = searchTSIDsInTest ( db , [ ] * TagFilters { tfs } , tr )
2019-11-12 14:09:33 +01:00
if err != nil {
t . Fatalf ( "error searching tsids: %v" , err )
}
if len ( matchedTSIDs ) != metricsPerDay * days {
2020-10-01 18:03:34 +02:00
t . Fatalf ( "expected %d time series for all days, got %d time series" , metricsPerDay * days , len ( matchedTSIDs ) )
2019-11-12 14:09:33 +01:00
}
2020-04-22 18:57:36 +02:00
2022-06-14 16:46:16 +02:00
// Check GetTSDBStatus with nil filters.
status , err := db . GetTSDBStatus ( nil , accountID , projectID , nil , baseDate , "day" , 5 , 1e6 , noDeadline )
2020-04-22 18:57:36 +02:00
if err != nil {
2022-06-14 16:46:16 +02:00
t . Fatalf ( "error in GetTSDBStatus with nil filters: %s" , err )
2020-04-22 18:57:36 +02:00
}
if ! status . hasEntries ( ) {
t . Fatalf ( "expecting non-empty TSDB status" )
}
expectedSeriesCountByMetricName := [ ] TopHeapEntry {
{
Name : "testMetric" ,
Count : 1000 ,
} ,
}
if ! reflect . DeepEqual ( status . SeriesCountByMetricName , expectedSeriesCountByMetricName ) {
t . Fatalf ( "unexpected SeriesCountByMetricName;\ngot\n%v\nwant\n%v" , status . SeriesCountByMetricName , expectedSeriesCountByMetricName )
}
2022-06-14 15:32:38 +02:00
expectedSeriesCountByLabelName := [ ] TopHeapEntry {
2023-01-07 09:50:14 +01:00
{
Name : "UniqueId" ,
Count : 1000 ,
} ,
2022-06-14 15:32:38 +02:00
{
Name : "__name__" ,
Count : 1000 ,
} ,
{
Name : "constant" ,
Count : 1000 ,
} ,
{
Name : "day" ,
Count : 1000 ,
} ,
{
2023-01-07 09:50:14 +01:00
Name : "some_unique_id" ,
2022-06-14 15:32:38 +02:00
Count : 1000 ,
} ,
}
if ! reflect . DeepEqual ( status . SeriesCountByLabelName , expectedSeriesCountByLabelName ) {
t . Fatalf ( "unexpected SeriesCountByLabelName;\ngot\n%v\nwant\n%v" , status . SeriesCountByLabelName , expectedSeriesCountByLabelName )
}
2022-06-14 16:46:16 +02:00
expectedSeriesCountByFocusLabelValue := [ ] TopHeapEntry {
{
Name : "0" ,
Count : 1000 ,
} ,
}
if ! reflect . DeepEqual ( status . SeriesCountByFocusLabelValue , expectedSeriesCountByFocusLabelValue ) {
t . Fatalf ( "unexpected SeriesCountByFocusLabelValue;\ngot\n%v\nwant\n%v" , status . SeriesCountByFocusLabelValue , expectedSeriesCountByFocusLabelValue )
}
2020-04-22 18:57:36 +02:00
expectedLabelValueCountByLabelName := [ ] TopHeapEntry {
{
2023-01-07 09:50:14 +01:00
Name : "UniqueId" ,
2020-04-22 18:57:36 +02:00
Count : 1000 ,
} ,
{
Name : "__name__" ,
Count : 1 ,
} ,
{
Name : "constant" ,
Count : 1 ,
} ,
{
Name : "day" ,
Count : 1 ,
} ,
2023-01-07 09:50:14 +01:00
{
Name : "some_unique_id" ,
Count : 1 ,
} ,
2020-04-22 18:57:36 +02:00
}
if ! reflect . DeepEqual ( status . LabelValueCountByLabelName , expectedLabelValueCountByLabelName ) {
t . Fatalf ( "unexpected LabelValueCountByLabelName;\ngot\n%v\nwant\n%v" , status . LabelValueCountByLabelName , expectedLabelValueCountByLabelName )
}
expectedSeriesCountByLabelValuePair := [ ] TopHeapEntry {
{
Name : "__name__=testMetric" ,
Count : 1000 ,
} ,
{
Name : "constant=const" ,
Count : 1000 ,
} ,
{
Name : "day=0" ,
Count : 1000 ,
} ,
{
2023-01-07 09:50:14 +01:00
Name : "some_unique_id=0" ,
Count : 1000 ,
2020-04-22 18:57:36 +02:00
} ,
{
2023-01-07 09:50:14 +01:00
Name : "UniqueId=1" ,
2020-04-22 18:57:36 +02:00
Count : 1 ,
} ,
}
if ! reflect . DeepEqual ( status . SeriesCountByLabelValuePair , expectedSeriesCountByLabelValuePair ) {
t . Fatalf ( "unexpected SeriesCountByLabelValuePair;\ngot\n%v\nwant\n%v" , status . SeriesCountByLabelValuePair , expectedSeriesCountByLabelValuePair )
}
2022-06-08 17:43:05 +02:00
expectedTotalSeries := uint64 ( 1000 )
if status . TotalSeries != expectedTotalSeries {
t . Fatalf ( "unexpected TotalSeries; got %d; want %d" , status . TotalSeries , expectedTotalSeries )
}
2023-01-07 09:50:14 +01:00
expectedLabelValuePairs := uint64 ( 5000 )
2022-06-08 17:43:05 +02:00
if status . TotalLabelValuePairs != expectedLabelValuePairs {
t . Fatalf ( "unexpected TotalLabelValuePairs; got %d; want %d" , status . TotalLabelValuePairs , expectedLabelValuePairs )
}
2021-05-12 14:18:45 +02:00
2022-06-14 16:46:16 +02:00
// Check GetTSDBStatus with non-nil filter, which matches all the series
2021-05-12 14:18:45 +02:00
tfs = NewTagFilters ( accountID , projectID )
if err := tfs . Add ( [ ] byte ( "day" ) , [ ] byte ( "0" ) , false , false ) ; err != nil {
t . Fatalf ( "cannot add filter: %s" , err )
}
2022-06-14 16:46:16 +02:00
status , err = db . GetTSDBStatus ( nil , accountID , projectID , [ ] * TagFilters { tfs } , baseDate , "" , 5 , 1e6 , noDeadline )
2021-05-12 14:18:45 +02:00
if err != nil {
2022-06-14 16:46:16 +02:00
t . Fatalf ( "error in GetTSDBStatus: %s" , err )
2021-05-12 14:18:45 +02:00
}
if ! status . hasEntries ( ) {
t . Fatalf ( "expecting non-empty TSDB status" )
}
expectedSeriesCountByMetricName = [ ] TopHeapEntry {
{
Name : "testMetric" ,
Count : 1000 ,
} ,
}
if ! reflect . DeepEqual ( status . SeriesCountByMetricName , expectedSeriesCountByMetricName ) {
t . Fatalf ( "unexpected SeriesCountByMetricName;\ngot\n%v\nwant\n%v" , status . SeriesCountByMetricName , expectedSeriesCountByMetricName )
}
2022-06-08 17:43:05 +02:00
expectedTotalSeries = 1000
if status . TotalSeries != expectedTotalSeries {
t . Fatalf ( "unexpected TotalSeries; got %d; want %d" , status . TotalSeries , expectedTotalSeries )
}
2023-01-07 09:50:14 +01:00
expectedLabelValuePairs = 5000
2022-06-08 17:43:05 +02:00
if status . TotalLabelValuePairs != expectedLabelValuePairs {
t . Fatalf ( "unexpected TotalLabelValuePairs; got %d; want %d" , status . TotalLabelValuePairs , expectedLabelValuePairs )
}
2022-06-12 13:17:44 +02:00
2022-06-14 16:46:16 +02:00
// Check GetTSDBStatus with non-nil filter, which matches all the series on a global time range
status , err = db . GetTSDBStatus ( nil , accountID , projectID , nil , 0 , "day" , 5 , 1e6 , noDeadline )
2022-06-12 13:17:44 +02:00
if err != nil {
2022-06-14 16:46:16 +02:00
t . Fatalf ( "error in GetTSDBStatus: %s" , err )
2022-06-12 13:17:44 +02:00
}
if ! status . hasEntries ( ) {
t . Fatalf ( "expecting non-empty TSDB status" )
}
expectedSeriesCountByMetricName = [ ] TopHeapEntry {
{
Name : "testMetric" ,
Count : 5000 ,
} ,
}
if ! reflect . DeepEqual ( status . SeriesCountByMetricName , expectedSeriesCountByMetricName ) {
t . Fatalf ( "unexpected SeriesCountByMetricName;\ngot\n%v\nwant\n%v" , status . SeriesCountByMetricName , expectedSeriesCountByMetricName )
}
expectedTotalSeries = 5000
if status . TotalSeries != expectedTotalSeries {
t . Fatalf ( "unexpected TotalSeries; got %d; want %d" , status . TotalSeries , expectedTotalSeries )
}
2023-01-07 09:50:14 +01:00
expectedLabelValuePairs = 25000
2022-06-12 13:17:44 +02:00
if status . TotalLabelValuePairs != expectedLabelValuePairs {
t . Fatalf ( "unexpected TotalLabelValuePairs; got %d; want %d" , status . TotalLabelValuePairs , expectedLabelValuePairs )
}
2022-06-14 16:46:16 +02:00
expectedSeriesCountByFocusLabelValue = [ ] TopHeapEntry {
{
Name : "0" ,
Count : 1000 ,
} ,
{
Name : "1" ,
Count : 1000 ,
} ,
{
Name : "2" ,
Count : 1000 ,
} ,
{
Name : "3" ,
Count : 1000 ,
} ,
{
Name : "4" ,
Count : 1000 ,
} ,
}
if ! reflect . DeepEqual ( status . SeriesCountByFocusLabelValue , expectedSeriesCountByFocusLabelValue ) {
t . Fatalf ( "unexpected SeriesCountByFocusLabelValue;\ngot\n%v\nwant\n%v" , status . SeriesCountByFocusLabelValue , expectedSeriesCountByFocusLabelValue )
}
2022-06-12 13:17:44 +02:00
2022-06-14 16:46:16 +02:00
// Check GetTSDBStatus with non-nil filter, which matches only 3 series
2022-06-08 17:43:05 +02:00
tfs = NewTagFilters ( accountID , projectID )
2023-01-07 09:50:14 +01:00
if err := tfs . Add ( [ ] byte ( "UniqueId" ) , [ ] byte ( "0|1|3" ) , false , true ) ; err != nil {
2022-06-08 17:43:05 +02:00
t . Fatalf ( "cannot add filter: %s" , err )
}
2022-06-14 16:46:16 +02:00
status , err = db . GetTSDBStatus ( nil , accountID , projectID , [ ] * TagFilters { tfs } , baseDate , "" , 5 , 1e6 , noDeadline )
2022-06-08 17:43:05 +02:00
if err != nil {
2022-06-14 16:46:16 +02:00
t . Fatalf ( "error in GetTSDBStatus: %s" , err )
2022-06-08 17:43:05 +02:00
}
if ! status . hasEntries ( ) {
t . Fatalf ( "expecting non-empty TSDB status" )
}
expectedSeriesCountByMetricName = [ ] TopHeapEntry {
{
Name : "testMetric" ,
Count : 3 ,
} ,
}
if ! reflect . DeepEqual ( status . SeriesCountByMetricName , expectedSeriesCountByMetricName ) {
t . Fatalf ( "unexpected SeriesCountByMetricName;\ngot\n%v\nwant\n%v" , status . SeriesCountByMetricName , expectedSeriesCountByMetricName )
}
expectedTotalSeries = 3
if status . TotalSeries != expectedTotalSeries {
t . Fatalf ( "unexpected TotalSeries; got %d; want %d" , status . TotalSeries , expectedTotalSeries )
}
2023-01-07 09:50:14 +01:00
expectedLabelValuePairs = 15
2022-06-08 17:43:05 +02:00
if status . TotalLabelValuePairs != expectedLabelValuePairs {
t . Fatalf ( "unexpected TotalLabelValuePairs; got %d; want %d" , status . TotalLabelValuePairs , expectedLabelValuePairs )
}
2022-06-12 13:17:44 +02:00
2022-06-14 16:46:16 +02:00
// Check GetTSDBStatus with non-nil filter on global time range, which matches only 15 series
status , err = db . GetTSDBStatus ( nil , accountID , projectID , [ ] * TagFilters { tfs } , 0 , "" , 5 , 1e6 , noDeadline )
2022-06-12 13:17:44 +02:00
if err != nil {
2022-06-14 16:46:16 +02:00
t . Fatalf ( "error in GetTSDBStatus: %s" , err )
2022-06-12 13:17:44 +02:00
}
if ! status . hasEntries ( ) {
t . Fatalf ( "expecting non-empty TSDB status" )
}
expectedSeriesCountByMetricName = [ ] TopHeapEntry {
{
Name : "testMetric" ,
Count : 15 ,
} ,
}
if ! reflect . DeepEqual ( status . SeriesCountByMetricName , expectedSeriesCountByMetricName ) {
t . Fatalf ( "unexpected SeriesCountByMetricName;\ngot\n%v\nwant\n%v" , status . SeriesCountByMetricName , expectedSeriesCountByMetricName )
}
expectedTotalSeries = 15
if status . TotalSeries != expectedTotalSeries {
t . Fatalf ( "unexpected TotalSeries; got %d; want %d" , status . TotalSeries , expectedTotalSeries )
}
2023-01-07 09:50:14 +01:00
expectedLabelValuePairs = 75
2022-06-12 13:17:44 +02:00
if status . TotalLabelValuePairs != expectedLabelValuePairs {
t . Fatalf ( "unexpected TotalLabelValuePairs; got %d; want %d" , status . TotalLabelValuePairs , expectedLabelValuePairs )
}
2019-11-12 14:09:33 +01:00
}
2019-05-22 23:16:55 +02:00
func toTFPointers ( tfs [ ] tagFilter ) [ ] * tagFilter {
tfps := make ( [ ] * tagFilter , len ( tfs ) )
for i := range tfs {
tfps [ i ] = & tfs [ i ]
}
return tfps
}
2021-06-11 11:42:26 +02:00
func newTestStorage ( ) * Storage {
2021-06-15 13:56:51 +02:00
s := & Storage {
2021-06-11 11:42:26 +02:00
cachePath : "test-storage-cache" ,
2022-07-05 13:49:03 +02:00
metricIDCache : workingsetcache . New ( 1234 ) ,
metricNameCache : workingsetcache . New ( 1234 ) ,
tsidCache : workingsetcache . New ( 1234 ) ,
dateMetricIDCache : newDateMetricIDCache ( ) ,
2022-10-24 00:30:50 +02:00
retentionMsecs : maxRetentionMsecs ,
2021-06-11 11:42:26 +02:00
}
2021-06-15 13:56:51 +02:00
s . setDeletedMetricIDs ( & uint64set . Set { } )
2022-10-24 02:02:14 +02:00
var idb * indexDB
s . idbCurr . Store ( idb )
2021-06-15 13:56:51 +02:00
return s
2021-06-11 11:42:26 +02:00
}
func stopTestStorage ( s * Storage ) {
s . metricIDCache . Stop ( )
s . metricNameCache . Stop ( )
s . tsidCache . Stop ( )
2022-09-13 14:48:20 +02:00
fs . MustRemoveDirAtomic ( s . cachePath )
2021-06-11 11:42:26 +02:00
}