2023-06-20 07:55:12 +02:00
|
|
|
package logstorage
|
|
|
|
|
|
|
|
import (
|
2024-05-12 16:33:29 +02:00
|
|
|
"context"
|
2024-05-20 04:08:30 +02:00
|
|
|
"fmt"
|
2023-06-20 07:55:12 +02:00
|
|
|
"math"
|
2024-05-12 16:33:29 +02:00
|
|
|
"slices"
|
2023-06-20 07:55:12 +02:00
|
|
|
"sort"
|
2024-05-20 04:08:30 +02:00
|
|
|
"strings"
|
2023-06-20 07:55:12 +02:00
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
2024-05-20 04:08:30 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
2023-06-20 07:55:12 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// genericSearchOptions contain options used for search.
|
|
|
|
type genericSearchOptions struct {
|
|
|
|
// tenantIDs must contain the list of tenantIDs for the search.
|
|
|
|
tenantIDs []TenantID
|
|
|
|
|
|
|
|
// filter is the filter to use for the search
|
|
|
|
filter filter
|
|
|
|
|
2024-05-12 16:33:29 +02:00
|
|
|
// neededColumnNames contains names of columns to return in the result
|
|
|
|
neededColumnNames []string
|
|
|
|
|
|
|
|
// unneededColumnNames contains names of columns, which mustn't be returned in the result.
|
|
|
|
//
|
|
|
|
// This list is consulted if needAllColumns=true
|
|
|
|
unneededColumnNames []string
|
|
|
|
|
|
|
|
// needAllColumns is set to true when all the columns except of unneededColumnNames must be returned in the result
|
|
|
|
needAllColumns bool
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type searchOptions struct {
|
|
|
|
// Optional sorted list of tenantIDs for the search.
|
|
|
|
// If it is empty, then the search is performed by streamIDs
|
|
|
|
tenantIDs []TenantID
|
|
|
|
|
|
|
|
// Optional sorted list of streamIDs for the search.
|
|
|
|
// If it is empty, then the search is performed by tenantIDs
|
|
|
|
streamIDs []streamID
|
|
|
|
|
|
|
|
// minTimestamp is the minimum timestamp for the search
|
|
|
|
minTimestamp int64
|
|
|
|
|
|
|
|
// maxTimestamp is the maximum timestamp for the search
|
|
|
|
maxTimestamp int64
|
|
|
|
|
|
|
|
// filter is the filter to use for the search
|
|
|
|
filter filter
|
|
|
|
|
2024-05-12 16:33:29 +02:00
|
|
|
// neededColumnNames contains names of columns to return in the result
|
|
|
|
neededColumnNames []string
|
|
|
|
|
|
|
|
// unneededColumnNames contains names of columns, which mustn't be returned in the result.
|
|
|
|
//
|
|
|
|
// This list is consulted when needAllColumns=true.
|
|
|
|
unneededColumnNames []string
|
|
|
|
|
|
|
|
// needAllColumns is set to true when all the columns except of unneededColumnNames must be returned in the result
|
|
|
|
needAllColumns bool
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
// WriteBlockFunc must write a block with the given timestamps and columns.
|
|
|
|
//
|
|
|
|
// WriteBlockFunc cannot hold references to timestamps and columns after returning.
|
|
|
|
type WriteBlockFunc func(workerID uint, timestamps []int64, columns []BlockColumn)
|
|
|
|
|
2024-05-12 16:33:29 +02:00
|
|
|
// RunQuery runs the given q and calls writeBlock for results.
|
2024-05-20 04:08:30 +02:00
|
|
|
func (s *Storage) RunQuery(ctx context.Context, tenantIDs []TenantID, q *Query, writeBlock WriteBlockFunc) error {
|
|
|
|
qNew, err := s.initFilterInValues(ctx, tenantIDs, q)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
2024-05-12 16:33:29 +02:00
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
writeBlockResult := func(workerID uint, br *blockResult) {
|
|
|
|
if len(br.timestamps) == 0 {
|
|
|
|
return
|
|
|
|
}
|
2024-05-12 16:33:29 +02:00
|
|
|
|
2023-06-20 07:55:12 +02:00
|
|
|
brs := getBlockRows()
|
2024-05-12 16:33:29 +02:00
|
|
|
csDst := brs.cs
|
2023-06-20 07:55:12 +02:00
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
cs := br.getColumns()
|
|
|
|
for _, c := range cs {
|
2024-05-12 16:33:29 +02:00
|
|
|
values := c.getValues(br)
|
|
|
|
csDst = append(csDst, BlockColumn{
|
|
|
|
Name: c.name,
|
|
|
|
Values: values,
|
2023-06-20 07:55:12 +02:00
|
|
|
})
|
|
|
|
}
|
2024-05-12 16:33:29 +02:00
|
|
|
writeBlock(workerID, br.timestamps, csDst)
|
2023-06-20 07:55:12 +02:00
|
|
|
|
2024-05-12 16:33:29 +02:00
|
|
|
brs.cs = csDst
|
2023-06-20 07:55:12 +02:00
|
|
|
putBlockRows(brs)
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return s.runQuery(ctx, tenantIDs, qNew, writeBlockResult)
|
|
|
|
}
|
2024-05-12 16:33:29 +02:00
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
func (s *Storage) runQuery(ctx context.Context, tenantIDs []TenantID, q *Query, writeBlockResultFunc func(workerID uint, br *blockResult)) error {
|
|
|
|
neededColumnNames, unneededColumnNames := q.getNeededColumns()
|
|
|
|
so := &genericSearchOptions{
|
|
|
|
tenantIDs: tenantIDs,
|
|
|
|
filter: q.f,
|
|
|
|
neededColumnNames: neededColumnNames,
|
|
|
|
unneededColumnNames: unneededColumnNames,
|
|
|
|
needAllColumns: slices.Contains(neededColumnNames, "*"),
|
|
|
|
}
|
|
|
|
|
|
|
|
workersCount := cgroup.AvailableCPUs()
|
|
|
|
|
|
|
|
ppMain := newDefaultPipeProcessor(writeBlockResultFunc)
|
|
|
|
pp := ppMain
|
2024-05-12 16:33:29 +02:00
|
|
|
stopCh := ctx.Done()
|
|
|
|
cancels := make([]func(), len(q.pipes))
|
|
|
|
pps := make([]pipeProcessor, len(q.pipes))
|
|
|
|
for i := len(q.pipes) - 1; i >= 0; i-- {
|
|
|
|
p := q.pipes[i]
|
|
|
|
ctxChild, cancel := context.WithCancel(ctx)
|
|
|
|
pp = p.newPipeProcessor(workersCount, stopCh, cancel, pp)
|
|
|
|
stopCh = ctxChild.Done()
|
|
|
|
ctx = ctxChild
|
|
|
|
|
|
|
|
cancels[i] = cancel
|
|
|
|
pps[i] = pp
|
|
|
|
}
|
|
|
|
|
|
|
|
s.search(workersCount, so, stopCh, pp.writeBlock)
|
|
|
|
|
|
|
|
var errFlush error
|
|
|
|
for i, pp := range pps {
|
|
|
|
if err := pp.flush(); err != nil && errFlush == nil {
|
|
|
|
errFlush = err
|
|
|
|
}
|
|
|
|
cancel := cancels[i]
|
|
|
|
cancel()
|
|
|
|
}
|
|
|
|
if err := ppMain.flush(); err != nil && errFlush == nil {
|
|
|
|
errFlush = err
|
|
|
|
}
|
|
|
|
return errFlush
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
// GetFieldNames returns field names from q results for the given tenantIDs.
|
2024-05-24 03:06:55 +02:00
|
|
|
func (s *Storage) GetFieldNames(ctx context.Context, tenantIDs []TenantID, q *Query) ([]ValueWithHits, error) {
|
2024-05-20 04:08:30 +02:00
|
|
|
pipes := append([]pipe{}, q.pipes...)
|
2024-05-24 03:06:55 +02:00
|
|
|
pipeStr := "field_names"
|
2024-05-20 04:08:30 +02:00
|
|
|
lex := newLexer(pipeStr)
|
2024-05-22 21:01:20 +02:00
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
pf, err := parsePipeFieldNames(lex)
|
|
|
|
if err != nil {
|
2024-05-22 21:01:20 +02:00
|
|
|
logger.Panicf("BUG: unexpected error when parsing 'field_names' pipe at [%s]: %s", pipeStr, err)
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
pf.isFirstPipe = len(pipes) == 0
|
2024-05-22 21:01:20 +02:00
|
|
|
|
2024-05-24 03:06:55 +02:00
|
|
|
if !lex.isEnd() {
|
|
|
|
logger.Panicf("BUG: unexpected tail left after parsing pipes [%s]: %q", pipeStr, lex.s)
|
|
|
|
}
|
|
|
|
|
|
|
|
pipes = append(pipes, pf)
|
|
|
|
|
|
|
|
q = &Query{
|
|
|
|
f: q.f,
|
|
|
|
pipes: pipes,
|
2024-05-22 21:01:20 +02:00
|
|
|
}
|
|
|
|
|
2024-05-24 03:06:55 +02:00
|
|
|
return s.runValuesWithHitsQuery(ctx, tenantIDs, q)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Storage) getFieldValuesNoHits(ctx context.Context, tenantIDs []TenantID, q *Query, fieldName string) ([]string, error) {
|
|
|
|
pipes := append([]pipe{}, q.pipes...)
|
|
|
|
quotedFieldName := quoteTokenIfNeeded(fieldName)
|
|
|
|
pipeStr := fmt.Sprintf("uniq by (%s)", quotedFieldName)
|
|
|
|
lex := newLexer(pipeStr)
|
|
|
|
|
|
|
|
pu, err := parsePipeUniq(lex)
|
2024-05-22 21:01:20 +02:00
|
|
|
if err != nil {
|
2024-05-24 03:06:55 +02:00
|
|
|
logger.Panicf("BUG: unexpected error when parsing 'uniq' pipe at [%s]: %s", pipeStr, err)
|
2024-05-22 21:01:20 +02:00
|
|
|
}
|
2024-05-24 03:06:55 +02:00
|
|
|
|
2024-05-22 21:01:20 +02:00
|
|
|
if !lex.isEnd() {
|
|
|
|
logger.Panicf("BUG: unexpected tail left after parsing pipes [%s]: %q", pipeStr, lex.s)
|
|
|
|
}
|
|
|
|
|
2024-05-24 03:06:55 +02:00
|
|
|
pipes = append(pipes, pu)
|
2024-05-20 04:08:30 +02:00
|
|
|
|
|
|
|
q = &Query{
|
|
|
|
f: q.f,
|
|
|
|
pipes: pipes,
|
|
|
|
}
|
|
|
|
|
2024-05-24 03:06:55 +02:00
|
|
|
var values []string
|
|
|
|
var valuesLock sync.Mutex
|
|
|
|
writeBlockResult := func(_ uint, br *blockResult) {
|
|
|
|
if len(br.timestamps) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
cs := br.getColumns()
|
|
|
|
if len(cs) != 1 {
|
|
|
|
logger.Panicf("BUG: expecting one column; got %d columns", len(cs))
|
|
|
|
}
|
|
|
|
|
|
|
|
columnValues := cs[0].getValues(br)
|
|
|
|
|
|
|
|
columnValuesCopy := make([]string, len(columnValues))
|
|
|
|
for i := range columnValues {
|
|
|
|
columnValuesCopy[i] = strings.Clone(columnValues[i])
|
|
|
|
}
|
|
|
|
|
|
|
|
valuesLock.Lock()
|
|
|
|
values = append(values, columnValuesCopy...)
|
|
|
|
valuesLock.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := s.runQuery(ctx, tenantIDs, q, writeBlockResult); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return values, nil
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
|
2024-05-24 03:06:55 +02:00
|
|
|
// GetFieldValues returns unique values with the number of hits for the given fieldName returned by q for the given tenantIDs.
|
2024-05-20 04:08:30 +02:00
|
|
|
//
|
2024-05-22 21:01:20 +02:00
|
|
|
// If limit > 0, then up to limit unique values are returned.
|
2024-05-24 03:06:55 +02:00
|
|
|
func (s *Storage) GetFieldValues(ctx context.Context, tenantIDs []TenantID, q *Query, fieldName string, limit uint64) ([]ValueWithHits, error) {
|
2024-05-22 21:01:20 +02:00
|
|
|
pipes := append([]pipe{}, q.pipes...)
|
|
|
|
quotedFieldName := quoteTokenIfNeeded(fieldName)
|
2024-05-25 21:36:16 +02:00
|
|
|
pipeStr := fmt.Sprintf("uniq by (%s) with hits limit %d", quotedFieldName, limit)
|
2024-05-22 21:01:20 +02:00
|
|
|
lex := newLexer(pipeStr)
|
2024-05-20 04:08:30 +02:00
|
|
|
|
2024-05-22 21:01:20 +02:00
|
|
|
pu, err := parsePipeUniq(lex)
|
|
|
|
if err != nil {
|
|
|
|
logger.Panicf("BUG: unexpected error when parsing 'uniq' pipe at [%s]: %s", pipeStr, err)
|
|
|
|
}
|
2024-05-20 04:08:30 +02:00
|
|
|
|
2024-05-22 21:01:20 +02:00
|
|
|
if !lex.isEnd() {
|
|
|
|
logger.Panicf("BUG: unexpected tail left after parsing pipes [%s]: %q", pipeStr, lex.s)
|
|
|
|
}
|
|
|
|
|
2024-05-24 03:06:55 +02:00
|
|
|
pipes = append(pipes, pu)
|
2024-05-22 21:01:20 +02:00
|
|
|
|
|
|
|
q = &Query{
|
|
|
|
f: q.f,
|
|
|
|
pipes: pipes,
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
|
2024-05-24 03:06:55 +02:00
|
|
|
return s.runValuesWithHitsQuery(ctx, tenantIDs, q)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ValueWithHits contains value and hits.
|
|
|
|
type ValueWithHits struct {
|
|
|
|
Value string
|
|
|
|
Hits uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
func toValuesWithHits(m map[string]*uint64) []ValueWithHits {
|
|
|
|
results := make([]ValueWithHits, 0, len(m))
|
|
|
|
for k, pHits := range m {
|
|
|
|
results = append(results, ValueWithHits{
|
|
|
|
Value: k,
|
|
|
|
Hits: *pHits,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
sortValuesWithHits(results)
|
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
|
|
|
func sortValuesWithHits(results []ValueWithHits) {
|
|
|
|
slices.SortFunc(results, func(a, b ValueWithHits) int {
|
|
|
|
if a.Hits == b.Hits {
|
|
|
|
if a.Value == b.Value {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
if lessString(a.Value, b.Value) {
|
|
|
|
return -1
|
|
|
|
}
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
// Sort in descending order of hits
|
|
|
|
if a.Hits < b.Hits {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
return -1
|
|
|
|
})
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
|
2024-05-25 21:36:16 +02:00
|
|
|
// GetStreamFieldNames returns stream field names from q results for the given tenantIDs.
|
|
|
|
func (s *Storage) GetStreamFieldNames(ctx context.Context, tenantIDs []TenantID, q *Query) ([]ValueWithHits, error) {
|
2024-05-22 21:01:20 +02:00
|
|
|
streams, err := s.GetStreams(ctx, tenantIDs, q, math.MaxUint64)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
2024-05-22 21:01:20 +02:00
|
|
|
|
2024-05-24 03:06:55 +02:00
|
|
|
m := make(map[string]*uint64)
|
2024-05-25 21:36:16 +02:00
|
|
|
forEachStreamField(streams, func(f Field, hits uint64) {
|
|
|
|
pHits, ok := m[f.Name]
|
2024-05-24 03:06:55 +02:00
|
|
|
if !ok {
|
2024-05-25 21:36:16 +02:00
|
|
|
nameCopy := strings.Clone(f.Name)
|
2024-05-24 03:06:55 +02:00
|
|
|
hitsLocal := uint64(0)
|
|
|
|
pHits = &hitsLocal
|
|
|
|
m[nameCopy] = pHits
|
2024-05-22 21:01:20 +02:00
|
|
|
}
|
2024-05-24 03:06:55 +02:00
|
|
|
*pHits += hits
|
2024-05-22 21:01:20 +02:00
|
|
|
})
|
2024-05-24 03:06:55 +02:00
|
|
|
names := toValuesWithHits(m)
|
2024-05-22 21:01:20 +02:00
|
|
|
return names, nil
|
|
|
|
}
|
|
|
|
|
2024-05-25 21:36:16 +02:00
|
|
|
// GetStreamFieldValues returns stream field values for the given fieldName from q results for the given tenantIDs.
|
2024-05-22 21:01:20 +02:00
|
|
|
//
|
2024-05-25 21:36:16 +02:00
|
|
|
// If limit > 9, then up to limit unique values are returned.
|
|
|
|
func (s *Storage) GetStreamFieldValues(ctx context.Context, tenantIDs []TenantID, q *Query, fieldName string, limit uint64) ([]ValueWithHits, error) {
|
2024-05-22 21:01:20 +02:00
|
|
|
streams, err := s.GetStreams(ctx, tenantIDs, q, math.MaxUint64)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
2024-05-22 21:01:20 +02:00
|
|
|
|
2024-05-24 03:06:55 +02:00
|
|
|
m := make(map[string]*uint64)
|
2024-05-25 21:36:16 +02:00
|
|
|
forEachStreamField(streams, func(f Field, hits uint64) {
|
|
|
|
if f.Name != fieldName {
|
2024-05-22 21:01:20 +02:00
|
|
|
return
|
|
|
|
}
|
2024-05-25 21:36:16 +02:00
|
|
|
pHits, ok := m[f.Value]
|
2024-05-24 03:06:55 +02:00
|
|
|
if !ok {
|
2024-05-25 21:36:16 +02:00
|
|
|
valueCopy := strings.Clone(f.Value)
|
2024-05-24 03:06:55 +02:00
|
|
|
hitsLocal := uint64(0)
|
|
|
|
pHits = &hitsLocal
|
|
|
|
m[valueCopy] = pHits
|
2024-05-22 21:01:20 +02:00
|
|
|
}
|
2024-05-24 03:06:55 +02:00
|
|
|
*pHits += hits
|
2024-05-22 21:01:20 +02:00
|
|
|
})
|
2024-05-24 03:06:55 +02:00
|
|
|
values := toValuesWithHits(m)
|
|
|
|
if limit > 0 && uint64(len(values)) > limit {
|
2024-05-22 21:01:20 +02:00
|
|
|
values = values[:limit]
|
|
|
|
}
|
|
|
|
return values, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetStreams returns streams from q results for the given tenantIDs.
|
|
|
|
//
|
|
|
|
// If limit > 0, then up to limit unique streams are returned.
|
2024-05-24 03:06:55 +02:00
|
|
|
func (s *Storage) GetStreams(ctx context.Context, tenantIDs []TenantID, q *Query, limit uint64) ([]ValueWithHits, error) {
|
2024-05-22 21:01:20 +02:00
|
|
|
return s.GetFieldValues(ctx, tenantIDs, q, "_stream", limit)
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
|
2024-05-24 03:06:55 +02:00
|
|
|
func (s *Storage) runValuesWithHitsQuery(ctx context.Context, tenantIDs []TenantID, q *Query) ([]ValueWithHits, error) {
|
|
|
|
var results []ValueWithHits
|
|
|
|
var resultsLock sync.Mutex
|
2024-05-20 11:04:01 +02:00
|
|
|
writeBlockResult := func(_ uint, br *blockResult) {
|
2024-05-20 04:08:30 +02:00
|
|
|
if len(br.timestamps) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
cs := br.getColumns()
|
2024-05-24 03:06:55 +02:00
|
|
|
if len(cs) != 2 {
|
|
|
|
logger.Panicf("BUG: expecting two columns; got %d columns", len(cs))
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
|
2024-05-24 03:06:55 +02:00
|
|
|
columnValues := cs[0].getValues(br)
|
|
|
|
columnHits := cs[1].getValues(br)
|
|
|
|
|
|
|
|
valuesWithHits := make([]ValueWithHits, len(columnValues))
|
|
|
|
for i := range columnValues {
|
|
|
|
x := &valuesWithHits[i]
|
|
|
|
hits, _ := tryParseUint64(columnHits[i])
|
|
|
|
x.Value = strings.Clone(columnValues[i])
|
|
|
|
x.Hits = hits
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
|
2024-05-24 03:06:55 +02:00
|
|
|
resultsLock.Lock()
|
|
|
|
results = append(results, valuesWithHits...)
|
|
|
|
resultsLock.Unlock()
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
err := s.runQuery(ctx, tenantIDs, q, writeBlockResult)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-05-24 03:06:55 +02:00
|
|
|
sortValuesWithHits(results)
|
2024-05-20 04:08:30 +02:00
|
|
|
|
2024-05-24 03:06:55 +02:00
|
|
|
return results, nil
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Storage) initFilterInValues(ctx context.Context, tenantIDs []TenantID, q *Query) (*Query, error) {
|
|
|
|
if !hasFilterInWithQueryForFilter(q.f) && !hasFilterInWithQueryForPipes(q.pipes) {
|
|
|
|
return q, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
getFieldValues := func(q *Query, fieldName string) ([]string, error) {
|
2024-05-24 03:06:55 +02:00
|
|
|
return s.getFieldValuesNoHits(ctx, tenantIDs, q, fieldName)
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
cache := make(map[string][]string)
|
|
|
|
fNew, err := initFilterInValuesForFilter(cache, q.f, getFieldValues)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
pipesNew, err := initFilterInValuesForPipes(cache, q.pipes, getFieldValues)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
qNew := &Query{
|
|
|
|
f: fNew,
|
|
|
|
pipes: pipesNew,
|
|
|
|
}
|
|
|
|
return qNew, nil
|
|
|
|
}
|
|
|
|
|
2024-05-22 21:01:20 +02:00
|
|
|
func (iff *ifFilter) hasFilterInWithQuery() bool {
|
|
|
|
if iff == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return hasFilterInWithQueryForFilter(iff.f)
|
|
|
|
}
|
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
func hasFilterInWithQueryForFilter(f filter) bool {
|
2024-05-22 21:01:20 +02:00
|
|
|
if f == nil {
|
|
|
|
return false
|
|
|
|
}
|
2024-05-20 04:08:30 +02:00
|
|
|
visitFunc := func(f filter) bool {
|
|
|
|
fi, ok := f.(*filterIn)
|
|
|
|
return ok && fi.needExecuteQuery
|
|
|
|
}
|
|
|
|
return visitFilter(f, visitFunc)
|
|
|
|
}
|
|
|
|
|
|
|
|
func hasFilterInWithQueryForPipes(pipes []pipe) bool {
|
|
|
|
for _, p := range pipes {
|
2024-05-25 21:36:16 +02:00
|
|
|
if p.hasFilterInWithQuery() {
|
|
|
|
return true
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
type getFieldValuesFunc func(q *Query, fieldName string) ([]string, error)
|
|
|
|
|
2024-05-22 21:01:20 +02:00
|
|
|
func (iff *ifFilter) initFilterInValues(cache map[string][]string, getFieldValuesFunc getFieldValuesFunc) (*ifFilter, error) {
|
|
|
|
if iff == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
f, err := initFilterInValuesForFilter(cache, iff.f, getFieldValuesFunc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
iffNew := *iff
|
|
|
|
iffNew.f = f
|
|
|
|
return &iffNew, nil
|
|
|
|
}
|
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
func initFilterInValuesForFilter(cache map[string][]string, f filter, getFieldValuesFunc getFieldValuesFunc) (filter, error) {
|
2024-05-22 21:01:20 +02:00
|
|
|
if f == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
visitFunc := func(f filter) bool {
|
|
|
|
fi, ok := f.(*filterIn)
|
|
|
|
return ok && fi.needExecuteQuery
|
|
|
|
}
|
|
|
|
copyFunc := func(f filter) (filter, error) {
|
|
|
|
fi := f.(*filterIn)
|
|
|
|
|
|
|
|
qStr := fi.q.String()
|
|
|
|
values, ok := cache[qStr]
|
|
|
|
if !ok {
|
|
|
|
vs, err := getFieldValuesFunc(fi.q, fi.qFieldName)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot obtain unique values for %s: %w", fi, err)
|
|
|
|
}
|
|
|
|
cache[qStr] = vs
|
|
|
|
values = vs
|
|
|
|
}
|
|
|
|
|
|
|
|
fiNew := &filterIn{
|
|
|
|
fieldName: fi.fieldName,
|
|
|
|
q: fi.q,
|
|
|
|
values: values,
|
|
|
|
}
|
|
|
|
return fiNew, nil
|
|
|
|
}
|
|
|
|
return copyFilter(f, visitFunc, copyFunc)
|
|
|
|
}
|
|
|
|
|
|
|
|
func initFilterInValuesForPipes(cache map[string][]string, pipes []pipe, getFieldValuesFunc getFieldValuesFunc) ([]pipe, error) {
|
|
|
|
pipesNew := make([]pipe, len(pipes))
|
|
|
|
for i, p := range pipes {
|
2024-05-25 21:36:16 +02:00
|
|
|
pNew, err := p.initFilterInValues(cache, getFieldValuesFunc)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
2024-05-25 21:36:16 +02:00
|
|
|
pipesNew[i] = pNew
|
2024-05-20 04:08:30 +02:00
|
|
|
}
|
|
|
|
return pipesNew, nil
|
|
|
|
}
|
|
|
|
|
2023-06-20 07:55:12 +02:00
|
|
|
type blockRows struct {
|
|
|
|
cs []BlockColumn
|
|
|
|
}
|
|
|
|
|
|
|
|
func (brs *blockRows) reset() {
|
|
|
|
cs := brs.cs
|
|
|
|
for i := range cs {
|
|
|
|
cs[i].reset()
|
|
|
|
}
|
|
|
|
brs.cs = cs[:0]
|
|
|
|
}
|
|
|
|
|
|
|
|
func getBlockRows() *blockRows {
|
|
|
|
v := blockRowsPool.Get()
|
|
|
|
if v == nil {
|
|
|
|
return &blockRows{}
|
|
|
|
}
|
|
|
|
return v.(*blockRows)
|
|
|
|
}
|
|
|
|
|
|
|
|
func putBlockRows(brs *blockRows) {
|
|
|
|
brs.reset()
|
|
|
|
blockRowsPool.Put(brs)
|
|
|
|
}
|
|
|
|
|
|
|
|
var blockRowsPool sync.Pool
|
|
|
|
|
|
|
|
// BlockColumn is a single column of a block of data
|
|
|
|
type BlockColumn struct {
|
|
|
|
// Name is the column name
|
|
|
|
Name string
|
|
|
|
|
|
|
|
// Values is column values
|
|
|
|
Values []string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *BlockColumn) reset() {
|
|
|
|
c.Name = ""
|
|
|
|
c.Values = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// searchResultFunc must process sr.
|
|
|
|
//
|
|
|
|
// The callback is called at the worker with the given workerID.
|
2024-02-18 22:01:34 +01:00
|
|
|
type searchResultFunc func(workerID uint, br *blockResult)
|
2023-06-20 07:55:12 +02:00
|
|
|
|
|
|
|
// search searches for the matching rows according to so.
|
|
|
|
//
|
2024-05-20 04:08:30 +02:00
|
|
|
// It calls processBlockResult for each matching block.
|
2023-06-20 07:55:12 +02:00
|
|
|
func (s *Storage) search(workersCount int, so *genericSearchOptions, stopCh <-chan struct{}, processBlockResult searchResultFunc) {
|
|
|
|
// Spin up workers
|
2024-05-12 16:33:29 +02:00
|
|
|
var wgWorkers sync.WaitGroup
|
2024-05-14 01:49:20 +02:00
|
|
|
workCh := make(chan *blockSearchWorkBatch, workersCount)
|
2024-05-12 16:33:29 +02:00
|
|
|
wgWorkers.Add(workersCount)
|
2023-06-20 07:55:12 +02:00
|
|
|
for i := 0; i < workersCount; i++ {
|
|
|
|
go func(workerID uint) {
|
|
|
|
bs := getBlockSearch()
|
2024-05-20 04:08:30 +02:00
|
|
|
bm := getBitmap(0)
|
2024-05-14 01:49:20 +02:00
|
|
|
for bswb := range workCh {
|
|
|
|
bsws := bswb.bsws
|
|
|
|
for i := range bsws {
|
|
|
|
bsw := &bsws[i]
|
2024-05-15 04:55:44 +02:00
|
|
|
if needStop(stopCh) {
|
2024-05-12 16:33:29 +02:00
|
|
|
// The search has been canceled. Just skip all the scheduled work in order to save CPU time.
|
2024-05-14 01:49:20 +02:00
|
|
|
bsw.reset()
|
2024-05-12 16:33:29 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
bs.search(bsw, bm)
|
2024-05-12 16:33:29 +02:00
|
|
|
if len(bs.br.timestamps) > 0 {
|
2024-02-18 22:01:34 +01:00
|
|
|
processBlockResult(workerID, &bs.br)
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
2024-05-14 01:49:20 +02:00
|
|
|
bsw.reset()
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
2024-05-14 01:49:20 +02:00
|
|
|
bswb.bsws = bswb.bsws[:0]
|
|
|
|
putBlockSearchWorkBatch(bswb)
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
2024-02-18 22:01:34 +01:00
|
|
|
putBlockSearch(bs)
|
2024-05-20 04:08:30 +02:00
|
|
|
putBitmap(bm)
|
2024-05-12 16:33:29 +02:00
|
|
|
wgWorkers.Done()
|
2023-06-20 07:55:12 +02:00
|
|
|
}(uint(i))
|
|
|
|
}
|
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
// Obtain time range from so.filter
|
|
|
|
f := so.filter
|
|
|
|
minTimestamp, maxTimestamp := getFilterTimeRange(f)
|
2023-06-20 07:55:12 +02:00
|
|
|
|
|
|
|
// Select partitions according to the selected time range
|
|
|
|
s.partitionsLock.Lock()
|
|
|
|
ptws := s.partitions
|
2024-05-20 04:08:30 +02:00
|
|
|
minDay := minTimestamp / nsecPerDay
|
2023-06-20 07:55:12 +02:00
|
|
|
n := sort.Search(len(ptws), func(i int) bool {
|
|
|
|
return ptws[i].day >= minDay
|
|
|
|
})
|
|
|
|
ptws = ptws[n:]
|
2024-05-20 04:08:30 +02:00
|
|
|
maxDay := maxTimestamp / nsecPerDay
|
2023-06-20 07:55:12 +02:00
|
|
|
n = sort.Search(len(ptws), func(i int) bool {
|
|
|
|
return ptws[i].day > maxDay
|
|
|
|
})
|
|
|
|
ptws = ptws[:n]
|
|
|
|
for _, ptw := range ptws {
|
|
|
|
ptw.incRef()
|
|
|
|
}
|
|
|
|
s.partitionsLock.Unlock()
|
|
|
|
|
2024-05-12 16:33:29 +02:00
|
|
|
// Obtain common filterStream from f
|
2023-06-20 07:55:12 +02:00
|
|
|
var sf *StreamFilter
|
|
|
|
sf, f = getCommonStreamFilter(f)
|
|
|
|
|
2024-05-12 16:33:29 +02:00
|
|
|
// Schedule concurrent search across matching partitions.
|
|
|
|
psfs := make([]partitionSearchFinalizer, len(ptws))
|
|
|
|
var wgSearchers sync.WaitGroup
|
|
|
|
for i, ptw := range ptws {
|
|
|
|
partitionSearchConcurrencyLimitCh <- struct{}{}
|
|
|
|
wgSearchers.Add(1)
|
|
|
|
go func(idx int, pt *partition) {
|
2024-05-20 04:08:30 +02:00
|
|
|
psfs[idx] = pt.search(minTimestamp, maxTimestamp, sf, f, so, workCh, stopCh)
|
2024-05-12 16:33:29 +02:00
|
|
|
wgSearchers.Done()
|
|
|
|
<-partitionSearchConcurrencyLimitCh
|
|
|
|
}(i, ptw.pt)
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
2024-05-12 16:33:29 +02:00
|
|
|
wgSearchers.Wait()
|
2023-06-20 07:55:12 +02:00
|
|
|
|
|
|
|
// Wait until workers finish their work
|
|
|
|
close(workCh)
|
2024-05-12 16:33:29 +02:00
|
|
|
wgWorkers.Wait()
|
2023-06-20 07:55:12 +02:00
|
|
|
|
2024-05-12 16:33:29 +02:00
|
|
|
// Finalize partition search
|
|
|
|
for _, psf := range psfs {
|
|
|
|
psf()
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Decrement references to partitions
|
|
|
|
for _, ptw := range ptws {
|
|
|
|
ptw.decRef()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-12 16:33:29 +02:00
|
|
|
// partitionSearchConcurrencyLimitCh limits the number of concurrent searches in partition.
|
|
|
|
//
|
|
|
|
// This is needed for limiting memory usage under high load.
|
|
|
|
var partitionSearchConcurrencyLimitCh = make(chan struct{}, cgroup.AvailableCPUs())
|
|
|
|
|
|
|
|
type partitionSearchFinalizer func()
|
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
func (pt *partition) search(minTimestamp, maxTimestamp int64, sf *StreamFilter, f filter, so *genericSearchOptions, workCh chan<- *blockSearchWorkBatch, stopCh <-chan struct{}) partitionSearchFinalizer {
|
2024-05-15 04:55:44 +02:00
|
|
|
if needStop(stopCh) {
|
2024-05-12 16:33:29 +02:00
|
|
|
// Do not spend CPU time on search, since it is already stopped.
|
|
|
|
return func() {}
|
|
|
|
}
|
|
|
|
|
2023-06-20 07:55:12 +02:00
|
|
|
tenantIDs := so.tenantIDs
|
|
|
|
var streamIDs []streamID
|
|
|
|
if sf != nil {
|
|
|
|
streamIDs = pt.idb.searchStreamIDs(tenantIDs, sf)
|
|
|
|
tenantIDs = nil
|
|
|
|
}
|
|
|
|
if hasStreamFilters(f) {
|
|
|
|
f = initStreamFilters(tenantIDs, pt.idb, f)
|
|
|
|
}
|
|
|
|
soInternal := &searchOptions{
|
2024-05-12 16:33:29 +02:00
|
|
|
tenantIDs: tenantIDs,
|
|
|
|
streamIDs: streamIDs,
|
2024-05-20 04:08:30 +02:00
|
|
|
minTimestamp: minTimestamp,
|
|
|
|
maxTimestamp: maxTimestamp,
|
2024-05-12 16:33:29 +02:00
|
|
|
filter: f,
|
|
|
|
neededColumnNames: so.neededColumnNames,
|
|
|
|
unneededColumnNames: so.unneededColumnNames,
|
|
|
|
needAllColumns: so.needAllColumns,
|
|
|
|
}
|
|
|
|
return pt.ddb.search(soInternal, workCh, stopCh)
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func hasStreamFilters(f filter) bool {
|
2024-05-20 04:08:30 +02:00
|
|
|
visitFunc := func(f filter) bool {
|
|
|
|
_, ok := f.(*filterStream)
|
|
|
|
return ok
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
2024-05-20 04:08:30 +02:00
|
|
|
return visitFilter(f, visitFunc)
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func initStreamFilters(tenantIDs []TenantID, idb *indexdb, f filter) filter {
|
2024-05-20 04:08:30 +02:00
|
|
|
visitFunc := func(f filter) bool {
|
|
|
|
_, ok := f.(*filterStream)
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
copyFunc := func(f filter) (filter, error) {
|
|
|
|
fs := f.(*filterStream)
|
|
|
|
fsNew := &filterStream{
|
|
|
|
f: fs.f,
|
2023-06-20 07:55:12 +02:00
|
|
|
tenantIDs: tenantIDs,
|
|
|
|
idb: idb,
|
|
|
|
}
|
2024-05-20 04:08:30 +02:00
|
|
|
return fsNew, nil
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
2024-05-20 04:08:30 +02:00
|
|
|
f, err := copyFilter(f, visitFunc, copyFunc)
|
|
|
|
if err != nil {
|
|
|
|
logger.Panicf("BUG: unexpected error: %s", err)
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
2024-05-20 04:08:30 +02:00
|
|
|
return f
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
|
2024-05-14 01:49:20 +02:00
|
|
|
func (ddb *datadb) search(so *searchOptions, workCh chan<- *blockSearchWorkBatch, stopCh <-chan struct{}) partitionSearchFinalizer {
|
2023-06-20 07:55:12 +02:00
|
|
|
// Select parts with data for the given time range
|
|
|
|
ddb.partsLock.Lock()
|
2024-05-12 16:33:29 +02:00
|
|
|
pws := appendPartsInTimeRange(nil, ddb.bigParts, so.minTimestamp, so.maxTimestamp)
|
|
|
|
pws = appendPartsInTimeRange(pws, ddb.smallParts, so.minTimestamp, so.maxTimestamp)
|
|
|
|
pws = appendPartsInTimeRange(pws, ddb.inmemoryParts, so.minTimestamp, so.maxTimestamp)
|
|
|
|
|
|
|
|
// Increase references to the searched parts, so they aren't deleted during search.
|
|
|
|
// References to the searched parts must be decremented by calling the returned partitionSearchFinalizer.
|
2023-06-20 07:55:12 +02:00
|
|
|
for _, pw := range pws {
|
|
|
|
pw.incRef()
|
|
|
|
}
|
|
|
|
ddb.partsLock.Unlock()
|
|
|
|
|
|
|
|
// Apply search to matching parts
|
|
|
|
for _, pw := range pws {
|
|
|
|
pw.p.search(so, workCh, stopCh)
|
|
|
|
}
|
|
|
|
|
2024-05-12 16:33:29 +02:00
|
|
|
return func() {
|
|
|
|
for _, pw := range pws {
|
|
|
|
pw.decRef()
|
|
|
|
}
|
|
|
|
}
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
|
2024-05-14 01:49:20 +02:00
|
|
|
func (p *part) search(so *searchOptions, workCh chan<- *blockSearchWorkBatch, stopCh <-chan struct{}) {
|
2023-06-20 07:55:12 +02:00
|
|
|
bhss := getBlockHeaders()
|
|
|
|
if len(so.tenantIDs) > 0 {
|
|
|
|
p.searchByTenantIDs(so, bhss, workCh, stopCh)
|
|
|
|
} else {
|
|
|
|
p.searchByStreamIDs(so, bhss, workCh, stopCh)
|
|
|
|
}
|
|
|
|
putBlockHeaders(bhss)
|
|
|
|
}
|
|
|
|
|
|
|
|
func getBlockHeaders() *blockHeaders {
|
|
|
|
v := blockHeadersPool.Get()
|
|
|
|
if v == nil {
|
|
|
|
return &blockHeaders{}
|
|
|
|
}
|
|
|
|
return v.(*blockHeaders)
|
|
|
|
}
|
|
|
|
|
|
|
|
func putBlockHeaders(bhss *blockHeaders) {
|
|
|
|
bhss.reset()
|
|
|
|
blockHeadersPool.Put(bhss)
|
|
|
|
}
|
|
|
|
|
|
|
|
var blockHeadersPool sync.Pool
|
|
|
|
|
|
|
|
type blockHeaders struct {
|
|
|
|
bhs []blockHeader
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bhss *blockHeaders) reset() {
|
|
|
|
bhs := bhss.bhs
|
|
|
|
for i := range bhs {
|
|
|
|
bhs[i].reset()
|
|
|
|
}
|
|
|
|
bhss.bhs = bhs[:0]
|
|
|
|
}
|
|
|
|
|
2024-05-14 01:49:20 +02:00
|
|
|
func (p *part) searchByTenantIDs(so *searchOptions, bhss *blockHeaders, workCh chan<- *blockSearchWorkBatch, stopCh <-chan struct{}) {
|
2023-06-20 07:55:12 +02:00
|
|
|
// it is assumed that tenantIDs are sorted
|
|
|
|
tenantIDs := so.tenantIDs
|
|
|
|
|
2024-05-14 01:49:20 +02:00
|
|
|
bswb := getBlockSearchWorkBatch()
|
2023-06-20 07:55:12 +02:00
|
|
|
scheduleBlockSearch := func(bh *blockHeader) bool {
|
2024-05-14 01:49:20 +02:00
|
|
|
if bswb.appendBlockSearchWork(p, so, bh) {
|
2023-06-20 07:55:12 +02:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return false
|
2024-05-14 01:49:20 +02:00
|
|
|
case workCh <- bswb:
|
|
|
|
bswb = getBlockSearchWorkBatch()
|
2023-06-20 07:55:12 +02:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// it is assumed that ibhs are sorted
|
|
|
|
ibhs := p.indexBlockHeaders
|
|
|
|
for len(ibhs) > 0 && len(tenantIDs) > 0 {
|
2024-05-15 04:55:44 +02:00
|
|
|
if needStop(stopCh) {
|
2023-06-20 07:55:12 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// locate tenantID equal or bigger than the tenantID in ibhs[0]
|
|
|
|
tenantID := &tenantIDs[0]
|
|
|
|
if tenantID.less(&ibhs[0].streamID.tenantID) {
|
|
|
|
tenantID = &ibhs[0].streamID.tenantID
|
|
|
|
n := sort.Search(len(tenantIDs), func(i int) bool {
|
|
|
|
return !tenantIDs[i].less(tenantID)
|
|
|
|
})
|
|
|
|
if n == len(tenantIDs) {
|
|
|
|
tenantIDs = nil
|
|
|
|
break
|
|
|
|
}
|
|
|
|
tenantID = &tenantIDs[n]
|
|
|
|
tenantIDs = tenantIDs[n:]
|
|
|
|
}
|
|
|
|
|
|
|
|
// locate indexBlockHeader with equal or bigger tenantID than the given tenantID
|
|
|
|
n := 0
|
|
|
|
if ibhs[0].streamID.tenantID.less(tenantID) {
|
|
|
|
n = sort.Search(len(ibhs), func(i int) bool {
|
|
|
|
return !ibhs[i].streamID.tenantID.less(tenantID)
|
|
|
|
})
|
2023-11-13 23:10:43 +01:00
|
|
|
// The end of ibhs[n-1] may contain blocks for the given tenantID, so move it backwards
|
|
|
|
n--
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
ibh := &ibhs[n]
|
|
|
|
ibhs = ibhs[n+1:]
|
|
|
|
|
|
|
|
if so.minTimestamp > ibh.maxTimestamp || so.maxTimestamp < ibh.minTimestamp {
|
|
|
|
// Skip the ibh, since it doesn't contain entries on the requested time range
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
bhss.bhs = ibh.mustReadBlockHeaders(bhss.bhs[:0], p)
|
|
|
|
|
|
|
|
bhs := bhss.bhs
|
|
|
|
for len(bhs) > 0 {
|
|
|
|
// search for blocks with the given tenantID
|
|
|
|
n = sort.Search(len(bhs), func(i int) bool {
|
|
|
|
return !bhs[i].streamID.tenantID.less(tenantID)
|
|
|
|
})
|
|
|
|
bhs = bhs[n:]
|
|
|
|
for len(bhs) > 0 && bhs[0].streamID.tenantID.equal(tenantID) {
|
|
|
|
bh := &bhs[0]
|
|
|
|
bhs = bhs[1:]
|
|
|
|
th := &bh.timestampsHeader
|
|
|
|
if so.minTimestamp > th.maxTimestamp || so.maxTimestamp < th.minTimestamp {
|
|
|
|
continue
|
|
|
|
}
|
2024-02-01 13:11:05 +01:00
|
|
|
if !scheduleBlockSearch(bh) {
|
|
|
|
return
|
|
|
|
}
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
if len(bhs) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// search for the next tenantID, which can potentially match tenantID from bhs[0]
|
|
|
|
tenantID = &bhs[0].streamID.tenantID
|
|
|
|
n = sort.Search(len(tenantIDs), func(i int) bool {
|
|
|
|
return !tenantIDs[i].less(tenantID)
|
|
|
|
})
|
|
|
|
if n == len(tenantIDs) {
|
|
|
|
tenantIDs = nil
|
|
|
|
break
|
|
|
|
}
|
|
|
|
tenantID = &tenantIDs[n]
|
|
|
|
tenantIDs = tenantIDs[n:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Flush the remaining work
|
2024-05-14 01:49:20 +02:00
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
case workCh <- bswb:
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-14 01:49:20 +02:00
|
|
|
func (p *part) searchByStreamIDs(so *searchOptions, bhss *blockHeaders, workCh chan<- *blockSearchWorkBatch, stopCh <-chan struct{}) {
|
2023-06-20 07:55:12 +02:00
|
|
|
// it is assumed that streamIDs are sorted
|
|
|
|
streamIDs := so.streamIDs
|
|
|
|
|
2024-05-14 01:49:20 +02:00
|
|
|
bswb := getBlockSearchWorkBatch()
|
2023-06-20 07:55:12 +02:00
|
|
|
scheduleBlockSearch := func(bh *blockHeader) bool {
|
2024-05-14 01:49:20 +02:00
|
|
|
if bswb.appendBlockSearchWork(p, so, bh) {
|
2023-06-20 07:55:12 +02:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
return false
|
2024-05-14 01:49:20 +02:00
|
|
|
case workCh <- bswb:
|
|
|
|
bswb = getBlockSearchWorkBatch()
|
2023-06-20 07:55:12 +02:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// it is assumed that ibhs are sorted
|
|
|
|
ibhs := p.indexBlockHeaders
|
|
|
|
|
|
|
|
for len(ibhs) > 0 && len(streamIDs) > 0 {
|
2024-05-15 04:55:44 +02:00
|
|
|
if needStop(stopCh) {
|
2023-06-20 07:55:12 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// locate streamID equal or bigger than the streamID in ibhs[0]
|
|
|
|
streamID := &streamIDs[0]
|
|
|
|
if streamID.less(&ibhs[0].streamID) {
|
|
|
|
streamID = &ibhs[0].streamID
|
|
|
|
n := sort.Search(len(streamIDs), func(i int) bool {
|
|
|
|
return !streamIDs[i].less(streamID)
|
|
|
|
})
|
|
|
|
if n == len(streamIDs) {
|
|
|
|
streamIDs = nil
|
|
|
|
break
|
|
|
|
}
|
|
|
|
streamID = &streamIDs[n]
|
|
|
|
streamIDs = streamIDs[n:]
|
|
|
|
}
|
|
|
|
|
|
|
|
// locate indexBlockHeader with equal or bigger streamID than the given streamID
|
|
|
|
n := 0
|
|
|
|
if ibhs[0].streamID.less(streamID) {
|
|
|
|
n = sort.Search(len(ibhs), func(i int) bool {
|
|
|
|
return !ibhs[i].streamID.less(streamID)
|
|
|
|
})
|
2023-11-13 23:10:43 +01:00
|
|
|
// The end of ibhs[n-1] may contain blocks for the given streamID, so move it backwards.
|
|
|
|
n--
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
ibh := &ibhs[n]
|
|
|
|
ibhs = ibhs[n+1:]
|
|
|
|
|
|
|
|
if so.minTimestamp > ibh.maxTimestamp || so.maxTimestamp < ibh.minTimestamp {
|
|
|
|
// Skip the ibh, since it doesn't contain entries on the requested time range
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
bhss.bhs = ibh.mustReadBlockHeaders(bhss.bhs[:0], p)
|
|
|
|
|
|
|
|
bhs := bhss.bhs
|
|
|
|
for len(bhs) > 0 {
|
|
|
|
// search for blocks with the given streamID
|
|
|
|
n = sort.Search(len(bhs), func(i int) bool {
|
|
|
|
return !bhs[i].streamID.less(streamID)
|
|
|
|
})
|
|
|
|
bhs = bhs[n:]
|
|
|
|
for len(bhs) > 0 && bhs[0].streamID.equal(streamID) {
|
|
|
|
bh := &bhs[0]
|
|
|
|
bhs = bhs[1:]
|
|
|
|
th := &bh.timestampsHeader
|
|
|
|
if so.minTimestamp > th.maxTimestamp || so.maxTimestamp < th.minTimestamp {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if !scheduleBlockSearch(bh) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(bhs) == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
// search for the next streamID, which can potentially match streamID from bhs[0]
|
|
|
|
streamID = &bhs[0].streamID
|
|
|
|
n = sort.Search(len(streamIDs), func(i int) bool {
|
|
|
|
return !streamIDs[i].less(streamID)
|
|
|
|
})
|
|
|
|
if n == len(streamIDs) {
|
|
|
|
streamIDs = nil
|
|
|
|
break
|
|
|
|
}
|
|
|
|
streamID = &streamIDs[n]
|
|
|
|
streamIDs = streamIDs[n:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Flush the remaining work
|
2024-05-14 01:49:20 +02:00
|
|
|
select {
|
|
|
|
case <-stopCh:
|
|
|
|
case workCh <- bswb:
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func appendPartsInTimeRange(dst, src []*partWrapper, minTimestamp, maxTimestamp int64) []*partWrapper {
|
|
|
|
for _, pw := range src {
|
|
|
|
if maxTimestamp < pw.p.ph.MinTimestamp || minTimestamp > pw.p.ph.MaxTimestamp {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
dst = append(dst, pw)
|
|
|
|
}
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
func getCommonStreamFilter(f filter) (*StreamFilter, filter) {
|
|
|
|
switch t := f.(type) {
|
2024-05-12 16:33:29 +02:00
|
|
|
case *filterAnd:
|
2023-06-20 07:55:12 +02:00
|
|
|
filters := t.filters
|
|
|
|
for i, filter := range filters {
|
2024-05-12 16:33:29 +02:00
|
|
|
sf, ok := filter.(*filterStream)
|
2023-06-20 07:55:12 +02:00
|
|
|
if ok && !sf.f.isEmpty() {
|
|
|
|
// Remove sf from filters, since it doesn't filter out anything then.
|
2024-05-12 16:33:29 +02:00
|
|
|
fa := &filterAnd{
|
2023-06-20 07:55:12 +02:00
|
|
|
filters: append(filters[:i:i], filters[i+1:]...),
|
|
|
|
}
|
2024-05-12 16:33:29 +02:00
|
|
|
return sf.f, fa
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
}
|
2024-05-12 16:33:29 +02:00
|
|
|
case *filterStream:
|
|
|
|
return t.f, &filterNoop{}
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
return nil, f
|
|
|
|
}
|
|
|
|
|
2024-05-20 04:08:30 +02:00
|
|
|
func getFilterTimeRange(f filter) (int64, int64) {
|
2023-06-20 07:55:12 +02:00
|
|
|
switch t := f.(type) {
|
2024-05-12 16:33:29 +02:00
|
|
|
case *filterAnd:
|
2024-05-20 04:08:30 +02:00
|
|
|
minTimestamp := int64(math.MinInt64)
|
|
|
|
maxTimestamp := int64(math.MaxInt64)
|
2023-06-20 07:55:12 +02:00
|
|
|
for _, filter := range t.filters {
|
2024-05-12 16:33:29 +02:00
|
|
|
ft, ok := filter.(*filterTime)
|
2023-06-20 07:55:12 +02:00
|
|
|
if ok {
|
2024-05-20 04:08:30 +02:00
|
|
|
if ft.minTimestamp > minTimestamp {
|
|
|
|
minTimestamp = ft.minTimestamp
|
|
|
|
}
|
|
|
|
if ft.maxTimestamp < maxTimestamp {
|
|
|
|
maxTimestamp = ft.maxTimestamp
|
|
|
|
}
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
|
|
|
}
|
2024-05-20 04:08:30 +02:00
|
|
|
return minTimestamp, maxTimestamp
|
2024-05-12 16:33:29 +02:00
|
|
|
case *filterTime:
|
2024-05-20 04:08:30 +02:00
|
|
|
return t.minTimestamp, t.maxTimestamp
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
2024-05-20 04:08:30 +02:00
|
|
|
return math.MinInt64, math.MaxInt64
|
2023-06-20 07:55:12 +02:00
|
|
|
}
|
2024-05-22 21:01:20 +02:00
|
|
|
|
2024-05-25 21:36:16 +02:00
|
|
|
func forEachStreamField(streams []ValueWithHits, f func(f Field, hits uint64)) {
|
|
|
|
var fields []Field
|
2024-05-24 03:06:55 +02:00
|
|
|
for i := range streams {
|
2024-05-22 21:01:20 +02:00
|
|
|
var err error
|
2024-05-25 21:36:16 +02:00
|
|
|
fields, err = parseStreamFields(fields[:0], streams[i].Value)
|
2024-05-22 21:01:20 +02:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
2024-05-24 03:06:55 +02:00
|
|
|
hits := streams[i].Hits
|
2024-05-25 21:36:16 +02:00
|
|
|
for j := range fields {
|
|
|
|
f(fields[j], hits)
|
2024-05-22 21:01:20 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-25 21:36:16 +02:00
|
|
|
func parseStreamFields(dst []Field, s string) ([]Field, error) {
|
2024-05-22 21:01:20 +02:00
|
|
|
if len(s) == 0 || s[0] != '{' {
|
|
|
|
return dst, fmt.Errorf("missing '{' at the beginning of stream name")
|
|
|
|
}
|
|
|
|
s = s[1:]
|
|
|
|
if len(s) == 0 || s[len(s)-1] != '}' {
|
|
|
|
return dst, fmt.Errorf("missing '}' at the end of stream name")
|
|
|
|
}
|
|
|
|
s = s[:len(s)-1]
|
|
|
|
if len(s) == 0 {
|
|
|
|
return dst, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
n := strings.Index(s, `="`)
|
|
|
|
if n < 0 {
|
2024-05-25 21:36:16 +02:00
|
|
|
return dst, fmt.Errorf("cannot find field value in double quotes at [%s]", s)
|
2024-05-22 21:01:20 +02:00
|
|
|
}
|
|
|
|
name := s[:n]
|
|
|
|
s = s[n+1:]
|
|
|
|
|
2024-05-24 03:06:55 +02:00
|
|
|
value, nOffset := tryUnquoteString(s, "")
|
2024-05-22 21:01:20 +02:00
|
|
|
if nOffset < 0 {
|
2024-05-25 21:36:16 +02:00
|
|
|
return dst, fmt.Errorf("cannot find parse field value in double quotes at [%s]", s)
|
2024-05-22 21:01:20 +02:00
|
|
|
}
|
|
|
|
s = s[nOffset:]
|
|
|
|
|
|
|
|
dst = append(dst, Field{
|
|
|
|
Name: name,
|
|
|
|
Value: value,
|
|
|
|
})
|
|
|
|
|
|
|
|
if len(s) == 0 {
|
|
|
|
return dst, nil
|
|
|
|
}
|
|
|
|
if s[0] != ',' {
|
|
|
|
return dst, fmt.Errorf("missing ',' after %s=%q", name, value)
|
|
|
|
}
|
|
|
|
s = s[1:]
|
|
|
|
}
|
|
|
|
}
|