2019-05-22 23:16:55 +02:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2022-06-01 01:31:40 +02:00
|
|
|
"strings"
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
2020-07-23 19:42:57 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
2019-05-22 23:16:55 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
2022-06-01 01:31:40 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/querytracer"
|
2023-11-11 12:30:08 +01:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/stringsutil"
|
2019-05-22 23:16:55 +02:00
|
|
|
)
|
|
|
|
|
2020-04-27 07:13:41 +02:00
|
|
|
// BlockRef references a Block.
|
|
|
|
//
|
|
|
|
// BlockRef is valid only until the corresponding Search is valid,
|
|
|
|
// i.e. it becomes invalid after Search.MustClose is called.
|
|
|
|
type BlockRef struct {
|
|
|
|
p *part
|
|
|
|
bh blockHeader
|
|
|
|
}
|
|
|
|
|
|
|
|
func (br *BlockRef) reset() {
|
|
|
|
br.p = nil
|
|
|
|
br.bh = blockHeader{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (br *BlockRef) init(p *part, bh *blockHeader) {
|
|
|
|
br.p = p
|
|
|
|
br.bh = *bh
|
|
|
|
}
|
|
|
|
|
|
|
|
// MustReadBlock reads block from br to dst.
|
2022-06-28 11:55:20 +02:00
|
|
|
func (br *BlockRef) MustReadBlock(dst *Block) {
|
2020-04-27 07:13:41 +02:00
|
|
|
dst.Reset()
|
|
|
|
dst.bh = br.bh
|
|
|
|
|
2022-01-31 23:18:39 +01:00
|
|
|
dst.timestampsData = bytesutil.ResizeNoCopyMayOverallocate(dst.timestampsData, int(br.bh.TimestampsBlockSize))
|
2020-04-27 07:13:41 +02:00
|
|
|
br.p.timestampsFile.MustReadAt(dst.timestampsData, int64(br.bh.TimestampsBlockOffset))
|
|
|
|
|
2022-01-31 23:18:39 +01:00
|
|
|
dst.valuesData = bytesutil.ResizeNoCopyMayOverallocate(dst.valuesData, int(br.bh.ValuesBlockSize))
|
2020-04-27 07:13:41 +02:00
|
|
|
br.p.valuesFile.MustReadAt(dst.valuesData, int64(br.bh.ValuesBlockOffset))
|
|
|
|
}
|
|
|
|
|
|
|
|
// MetricBlockRef contains reference to time series block for a single metric.
|
|
|
|
type MetricBlockRef struct {
|
|
|
|
// The metric name
|
|
|
|
MetricName []byte
|
|
|
|
|
|
|
|
// The block reference. Call BlockRef.MustReadBlock in order to obtain the block.
|
|
|
|
BlockRef *BlockRef
|
|
|
|
}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
// MetricBlock is a time series block for a single metric.
|
|
|
|
type MetricBlock struct {
|
2020-04-27 07:13:41 +02:00
|
|
|
// MetricName is metric name for the given Block.
|
2019-05-22 23:16:55 +02:00
|
|
|
MetricName []byte
|
|
|
|
|
2020-04-27 07:13:41 +02:00
|
|
|
// Block is a block for the given MetricName
|
|
|
|
Block Block
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Marshal marshals MetricBlock to dst
|
|
|
|
func (mb *MetricBlock) Marshal(dst []byte) []byte {
|
|
|
|
dst = encoding.MarshalBytes(dst, mb.MetricName)
|
2020-04-27 07:13:41 +02:00
|
|
|
return MarshalBlock(dst, &mb.Block)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2022-07-06 12:19:45 +02:00
|
|
|
// CopyFrom copies src to mb.
|
|
|
|
func (mb *MetricBlock) CopyFrom(src *MetricBlock) {
|
|
|
|
mb.MetricName = append(mb.MetricName[:0], src.MetricName...)
|
|
|
|
mb.Block.CopyFrom(&src.Block)
|
|
|
|
}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
// MarshalBlock marshals b to dst.
|
|
|
|
//
|
|
|
|
// b.MarshalData must be called on b before calling MarshalBlock.
|
|
|
|
func MarshalBlock(dst []byte, b *Block) []byte {
|
|
|
|
dst = b.bh.Marshal(dst)
|
|
|
|
dst = encoding.MarshalBytes(dst, b.timestampsData)
|
|
|
|
dst = encoding.MarshalBytes(dst, b.valuesData)
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unmarshal unmarshals MetricBlock from src
|
|
|
|
func (mb *MetricBlock) Unmarshal(src []byte) ([]byte, error) {
|
2020-04-27 07:13:41 +02:00
|
|
|
mb.Block.Reset()
|
2019-05-22 23:16:55 +02:00
|
|
|
tail, mn, err := encoding.UnmarshalBytes(src)
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return tail, fmt.Errorf("cannot unmarshal MetricName: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
mb.MetricName = append(mb.MetricName[:0], mn...)
|
|
|
|
src = tail
|
|
|
|
|
2020-04-27 07:13:41 +02:00
|
|
|
return UnmarshalBlock(&mb.Block, src)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// UnmarshalBlock unmarshal Block from src to dst.
|
|
|
|
//
|
|
|
|
// dst.UnmarshalData isn't called on the block.
|
|
|
|
func UnmarshalBlock(dst *Block, src []byte) ([]byte, error) {
|
|
|
|
tail, err := dst.bh.Unmarshal(src)
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return tail, fmt.Errorf("cannot unmarshal blockHeader: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
src = tail
|
|
|
|
|
|
|
|
tail, tds, err := encoding.UnmarshalBytes(src)
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return tail, fmt.Errorf("cannot unmarshal timestampsData: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
dst.timestampsData = append(dst.timestampsData[:0], tds...)
|
|
|
|
src = tail
|
|
|
|
|
|
|
|
tail, vd, err := encoding.UnmarshalBytes(src)
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return tail, fmt.Errorf("cannot unmarshal valuesData: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
dst.valuesData = append(dst.valuesData[:0], vd...)
|
|
|
|
src = tail
|
|
|
|
|
|
|
|
return src, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Search is a search for time series.
|
|
|
|
type Search struct {
|
2020-04-27 07:13:41 +02:00
|
|
|
// MetricBlockRef is updated with each Search.NextMetricBlock call.
|
|
|
|
MetricBlockRef MetricBlockRef
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-10-23 11:15:24 +02:00
|
|
|
// idb is used for MetricName lookup for the found data blocks.
|
2021-03-22 21:41:47 +01:00
|
|
|
idb *indexDB
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-10-24 01:52:38 +02:00
|
|
|
// retentionDeadline is used for filtering out blocks outside the configured retention.
|
|
|
|
retentionDeadline int64
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
ts tableSearch
|
|
|
|
|
2023-02-13 13:27:13 +01:00
|
|
|
// tr contains time range used in the search.
|
2020-08-10 12:45:44 +02:00
|
|
|
tr TimeRange
|
|
|
|
|
2020-08-10 12:36:00 +02:00
|
|
|
// tfss contains tag filters used in the search.
|
|
|
|
tfss []*TagFilters
|
|
|
|
|
2020-07-23 19:42:57 +02:00
|
|
|
// deadline in unix timestamp seconds for the current search.
|
|
|
|
deadline uint64
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
err error
|
|
|
|
|
|
|
|
needClosing bool
|
2020-07-23 18:21:49 +02:00
|
|
|
|
|
|
|
loops int
|
2021-03-23 16:56:47 +01:00
|
|
|
|
|
|
|
prevMetricID uint64
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Search) reset() {
|
2020-04-27 07:13:41 +02:00
|
|
|
s.MetricBlockRef.MetricName = s.MetricBlockRef.MetricName[:0]
|
|
|
|
s.MetricBlockRef.BlockRef = nil
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2021-03-22 21:41:47 +01:00
|
|
|
s.idb = nil
|
2022-10-24 01:52:38 +02:00
|
|
|
s.retentionDeadline = 0
|
2019-05-22 23:16:55 +02:00
|
|
|
s.ts.reset()
|
2020-08-10 12:45:44 +02:00
|
|
|
s.tr = TimeRange{}
|
2020-08-10 12:36:00 +02:00
|
|
|
s.tfss = nil
|
2020-07-23 19:42:57 +02:00
|
|
|
s.deadline = 0
|
2019-05-22 23:16:55 +02:00
|
|
|
s.err = nil
|
|
|
|
s.needClosing = false
|
2020-07-23 18:21:49 +02:00
|
|
|
s.loops = 0
|
2021-03-23 16:56:47 +01:00
|
|
|
s.prevMetricID = 0
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Init initializes s from the given storage, tfss and tr.
|
|
|
|
//
|
|
|
|
// MustClose must be called when the search is done.
|
2020-08-06 18:17:51 +02:00
|
|
|
//
|
|
|
|
// Init returns the upper bound on the number of found time series.
|
2022-06-01 01:31:40 +02:00
|
|
|
func (s *Search) Init(qt *querytracer.Tracer, storage *Storage, tfss []*TagFilters, tr TimeRange, maxMetrics int, deadline uint64) int {
|
2022-06-08 20:05:17 +02:00
|
|
|
qt = qt.NewChild("init series search: filters=%s, timeRange=%s", tfss, &tr)
|
|
|
|
defer qt.Done()
|
2019-05-22 23:16:55 +02:00
|
|
|
if s.needClosing {
|
|
|
|
logger.Panicf("BUG: missing MustClose call before the next call to Init")
|
|
|
|
}
|
2022-10-24 01:52:38 +02:00
|
|
|
retentionDeadline := int64(fasttime.UnixTimestamp()*1e3) - storage.retentionMsecs
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
s.reset()
|
2022-10-23 11:15:24 +02:00
|
|
|
s.idb = storage.idb()
|
2022-10-24 01:52:38 +02:00
|
|
|
s.retentionDeadline = retentionDeadline
|
2020-08-10 12:45:44 +02:00
|
|
|
s.tr = tr
|
2020-08-10 12:36:00 +02:00
|
|
|
s.tfss = tfss
|
2020-07-23 19:42:57 +02:00
|
|
|
s.deadline = deadline
|
2019-05-22 23:16:55 +02:00
|
|
|
s.needClosing = true
|
|
|
|
|
2022-10-23 11:15:24 +02:00
|
|
|
var tsids []TSID
|
|
|
|
metricIDs, err := s.idb.searchMetricIDs(qt, tfss, tr, maxMetrics, deadline)
|
|
|
|
if err == nil && len(metricIDs) > 0 && len(tfss) > 0 {
|
|
|
|
accountID := tfss[0].accountID
|
|
|
|
projectID := tfss[0].projectID
|
|
|
|
tsids, err = s.idb.getTSIDsFromMetricIDs(qt, accountID, projectID, metricIDs, deadline)
|
|
|
|
if err == nil {
|
|
|
|
err = storage.prefetchMetricNames(qt, accountID, projectID, metricIDs, deadline)
|
|
|
|
}
|
2020-01-30 00:59:43 +01:00
|
|
|
}
|
2022-10-23 11:15:24 +02:00
|
|
|
// It is ok to call Init on non-nil err.
|
2019-05-22 23:16:55 +02:00
|
|
|
// Init must be called before returning because it will fail
|
2023-02-13 13:27:13 +01:00
|
|
|
// on Search.MustClose otherwise.
|
2020-04-27 07:13:41 +02:00
|
|
|
s.ts.Init(storage.tb, tsids, tr)
|
2022-06-01 01:31:40 +02:00
|
|
|
qt.Printf("search for parts with data for %d series", len(tsids))
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
|
|
|
s.err = err
|
2020-08-06 18:17:51 +02:00
|
|
|
return 0
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2020-08-06 18:17:51 +02:00
|
|
|
return len(tsids)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// MustClose closes the Search.
|
|
|
|
func (s *Search) MustClose() {
|
|
|
|
if !s.needClosing {
|
|
|
|
logger.Panicf("BUG: missing Init call before MustClose")
|
|
|
|
}
|
|
|
|
s.ts.MustClose()
|
|
|
|
s.reset()
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error returns the last error from s.
|
|
|
|
func (s *Search) Error() error {
|
2020-08-10 12:36:00 +02:00
|
|
|
if s.err == io.EOF || s.err == nil {
|
2019-05-22 23:16:55 +02:00
|
|
|
return nil
|
|
|
|
}
|
2020-08-10 12:45:44 +02:00
|
|
|
return fmt.Errorf("error when searching for tagFilters=%s on the time range %s: %w", s.tfss, s.tr.String(), s.err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2020-04-27 07:13:41 +02:00
|
|
|
// NextMetricBlock proceeds to the next MetricBlockRef.
|
2019-05-22 23:16:55 +02:00
|
|
|
func (s *Search) NextMetricBlock() bool {
|
|
|
|
if s.err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for s.ts.NextBlock() {
|
2020-08-07 07:37:33 +02:00
|
|
|
if s.loops&paceLimiterSlowIterationsMask == 0 {
|
2020-07-23 19:42:57 +02:00
|
|
|
if err := checkSearchDeadlineAndPace(s.deadline); err != nil {
|
|
|
|
s.err = err
|
|
|
|
return false
|
|
|
|
}
|
2020-07-23 18:21:49 +02:00
|
|
|
}
|
|
|
|
s.loops++
|
2020-04-27 07:13:41 +02:00
|
|
|
tsid := &s.ts.BlockRef.bh.TSID
|
2021-03-23 16:56:47 +01:00
|
|
|
if tsid.MetricID != s.prevMetricID {
|
2022-10-24 01:52:38 +02:00
|
|
|
if s.ts.BlockRef.bh.MaxTimestamp < s.retentionDeadline {
|
|
|
|
// Skip the block, since it contains only data outside the configured retention.
|
|
|
|
continue
|
|
|
|
}
|
2023-09-22 11:32:59 +02:00
|
|
|
var ok bool
|
|
|
|
s.MetricBlockRef.MetricName, ok = s.idb.searchMetricNameWithCache(s.MetricBlockRef.MetricName[:0], tsid.MetricID, tsid.AccountID, tsid.ProjectID)
|
|
|
|
if !ok {
|
|
|
|
// Skip missing metricName for tsid.MetricID.
|
|
|
|
// It should be automatically fixed. See indexDB.searchMetricNameWithCache for details.
|
|
|
|
continue
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2021-03-23 16:56:47 +01:00
|
|
|
s.prevMetricID = tsid.MetricID
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2020-04-27 07:13:41 +02:00
|
|
|
s.MetricBlockRef.BlockRef = s.ts.BlockRef
|
2019-05-22 23:16:55 +02:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
if err := s.ts.Error(); err != nil {
|
|
|
|
s.err = err
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
s.err = io.EOF
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// SearchQuery is used for sending search queries from vmselect to vmstorage.
|
|
|
|
type SearchQuery struct {
|
2022-03-26 09:17:37 +01:00
|
|
|
AccountID uint32
|
|
|
|
ProjectID uint32
|
|
|
|
|
|
|
|
// The time range for searching time series
|
2019-05-22 23:16:55 +02:00
|
|
|
MinTimestamp int64
|
|
|
|
MaxTimestamp int64
|
2022-03-26 09:17:37 +01:00
|
|
|
|
|
|
|
// Tag filters for the search query
|
|
|
|
TagFilterss [][]TagFilter
|
|
|
|
|
|
|
|
// The maximum number of time series the search query can return.
|
|
|
|
MaxMetrics int
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2022-07-05 23:53:03 +02:00
|
|
|
// GetTimeRange returns time range for the given sq.
|
|
|
|
func (sq *SearchQuery) GetTimeRange() TimeRange {
|
|
|
|
return TimeRange{
|
|
|
|
MinTimestamp: sq.MinTimestamp,
|
|
|
|
MaxTimestamp: sq.MaxTimestamp,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-16 17:00:50 +01:00
|
|
|
// NewSearchQuery creates new search query for the given args.
|
2022-03-26 09:17:37 +01:00
|
|
|
func NewSearchQuery(accountID, projectID uint32, start, end int64, tagFilterss [][]TagFilter, maxMetrics int) *SearchQuery {
|
2024-01-21 22:41:11 +01:00
|
|
|
if start < 0 {
|
|
|
|
// This is needed for https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5553
|
|
|
|
start = 0
|
|
|
|
}
|
2022-03-26 09:17:37 +01:00
|
|
|
if maxMetrics <= 0 {
|
|
|
|
maxMetrics = 2e9
|
|
|
|
}
|
2020-11-16 17:00:50 +01:00
|
|
|
return &SearchQuery{
|
|
|
|
AccountID: accountID,
|
|
|
|
ProjectID: projectID,
|
|
|
|
MinTimestamp: start,
|
|
|
|
MaxTimestamp: end,
|
|
|
|
TagFilterss: tagFilterss,
|
2022-03-26 09:17:37 +01:00
|
|
|
MaxMetrics: maxMetrics,
|
2020-11-16 17:00:50 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
// TagFilter represents a single tag filter from SearchQuery.
|
|
|
|
type TagFilter struct {
|
|
|
|
Key []byte
|
|
|
|
Value []byte
|
|
|
|
IsNegative bool
|
|
|
|
IsRegexp bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// String returns string representation of tf.
|
|
|
|
func (tf *TagFilter) String() string {
|
2022-06-01 01:31:40 +02:00
|
|
|
op := tf.getOp()
|
2023-11-11 12:30:08 +01:00
|
|
|
value := stringsutil.LimitStringLen(string(tf.Value), 60)
|
2022-06-01 01:31:40 +02:00
|
|
|
if len(tf.Key) == 0 {
|
2022-06-30 17:17:07 +02:00
|
|
|
return fmt.Sprintf("__name__%s%q", op, value)
|
2022-06-01 01:31:40 +02:00
|
|
|
}
|
2022-06-30 17:17:07 +02:00
|
|
|
return fmt.Sprintf("%s%s%q", tf.Key, op, value)
|
2022-06-01 01:31:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (tf *TagFilter) getOp() string {
|
|
|
|
if tf.IsNegative {
|
|
|
|
if tf.IsRegexp {
|
|
|
|
return "!~"
|
|
|
|
}
|
|
|
|
return "!="
|
|
|
|
}
|
|
|
|
if tf.IsRegexp {
|
|
|
|
return "=~"
|
|
|
|
}
|
|
|
|
return "="
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Marshal appends marshaled tf to dst and returns the result.
|
|
|
|
func (tf *TagFilter) Marshal(dst []byte) []byte {
|
|
|
|
dst = encoding.MarshalBytes(dst, tf.Key)
|
|
|
|
dst = encoding.MarshalBytes(dst, tf.Value)
|
|
|
|
|
|
|
|
x := 0
|
|
|
|
if tf.IsNegative {
|
|
|
|
x = 2
|
|
|
|
}
|
|
|
|
if tf.IsRegexp {
|
|
|
|
x |= 1
|
|
|
|
}
|
|
|
|
dst = append(dst, byte(x))
|
|
|
|
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unmarshal unmarshals tf from src and returns the tail.
|
|
|
|
func (tf *TagFilter) Unmarshal(src []byte) ([]byte, error) {
|
|
|
|
tail, k, err := encoding.UnmarshalBytes(src)
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return tail, fmt.Errorf("cannot unmarshal Key: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
tf.Key = append(tf.Key[:0], k...)
|
|
|
|
src = tail
|
|
|
|
|
|
|
|
tail, v, err := encoding.UnmarshalBytes(src)
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return tail, fmt.Errorf("cannot unmarshal Value: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
tf.Value = append(tf.Value[:0], v...)
|
|
|
|
src = tail
|
|
|
|
|
|
|
|
if len(src) < 1 {
|
|
|
|
return src, fmt.Errorf("cannot unmarshal IsNegative+IsRegexp from empty src")
|
|
|
|
}
|
|
|
|
x := src[0]
|
|
|
|
switch x {
|
|
|
|
case 0:
|
|
|
|
tf.IsNegative = false
|
|
|
|
tf.IsRegexp = false
|
|
|
|
case 1:
|
|
|
|
tf.IsNegative = false
|
|
|
|
tf.IsRegexp = true
|
|
|
|
case 2:
|
|
|
|
tf.IsNegative = true
|
|
|
|
tf.IsRegexp = false
|
|
|
|
case 3:
|
|
|
|
tf.IsNegative = true
|
|
|
|
tf.IsRegexp = true
|
|
|
|
default:
|
|
|
|
return src, fmt.Errorf("unexpected value for IsNegative+IsRegexp: %d; must be in the range [0..3]", x)
|
|
|
|
}
|
|
|
|
src = src[1:]
|
|
|
|
|
|
|
|
return src, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// String returns string representation of the search query.
|
|
|
|
func (sq *SearchQuery) String() string {
|
2022-06-01 01:31:40 +02:00
|
|
|
a := make([]string, len(sq.TagFilterss))
|
|
|
|
for i, tfs := range sq.TagFilterss {
|
|
|
|
a[i] = tagFiltersToString(tfs)
|
|
|
|
}
|
2022-06-27 12:32:47 +02:00
|
|
|
start := TimestampToHumanReadableFormat(sq.MinTimestamp)
|
|
|
|
end := TimestampToHumanReadableFormat(sq.MaxTimestamp)
|
|
|
|
return fmt.Sprintf("accountID=%d, projectID=%d, filters=%s, timeRange=[%s..%s]", sq.AccountID, sq.ProjectID, a, start, end)
|
2022-06-01 01:31:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func tagFiltersToString(tfs []TagFilter) string {
|
|
|
|
a := make([]string, len(tfs))
|
|
|
|
for i, tf := range tfs {
|
|
|
|
a[i] = tf.String()
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-06-01 01:31:40 +02:00
|
|
|
return "{" + strings.Join(a, ",") + "}"
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Marshal appends marshaled sq to dst and returns the result.
|
|
|
|
func (sq *SearchQuery) Marshal(dst []byte) []byte {
|
2019-05-22 23:23:23 +02:00
|
|
|
dst = encoding.MarshalUint32(dst, sq.AccountID)
|
|
|
|
dst = encoding.MarshalUint32(dst, sq.ProjectID)
|
2019-05-22 23:16:55 +02:00
|
|
|
dst = encoding.MarshalVarInt64(dst, sq.MinTimestamp)
|
|
|
|
dst = encoding.MarshalVarInt64(dst, sq.MaxTimestamp)
|
|
|
|
dst = encoding.MarshalVarUint64(dst, uint64(len(sq.TagFilterss)))
|
|
|
|
for _, tagFilters := range sq.TagFilterss {
|
|
|
|
dst = encoding.MarshalVarUint64(dst, uint64(len(tagFilters)))
|
|
|
|
for i := range tagFilters {
|
|
|
|
dst = tagFilters[i].Marshal(dst)
|
|
|
|
}
|
|
|
|
}
|
2022-03-26 09:17:37 +01:00
|
|
|
dst = encoding.MarshalUint32(dst, uint32(sq.MaxMetrics))
|
2019-05-22 23:16:55 +02:00
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unmarshal unmarshals sq from src and returns the tail.
|
|
|
|
func (sq *SearchQuery) Unmarshal(src []byte) ([]byte, error) {
|
2019-05-22 23:23:23 +02:00
|
|
|
if len(src) < 4 {
|
|
|
|
return src, fmt.Errorf("cannot unmarshal AccountID: too short src len: %d; must be at least %d bytes", len(src), 4)
|
|
|
|
}
|
|
|
|
sq.AccountID = encoding.UnmarshalUint32(src)
|
|
|
|
src = src[4:]
|
|
|
|
|
|
|
|
if len(src) < 4 {
|
|
|
|
return src, fmt.Errorf("cannot unmarshal ProjectID: too short src len: %d; must be at least %d bytes", len(src), 4)
|
|
|
|
}
|
|
|
|
sq.ProjectID = encoding.UnmarshalUint32(src)
|
|
|
|
src = src[4:]
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
tail, minTs, err := encoding.UnmarshalVarInt64(src)
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return src, fmt.Errorf("cannot unmarshal MinTimestamp: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
sq.MinTimestamp = minTs
|
|
|
|
src = tail
|
|
|
|
|
|
|
|
tail, maxTs, err := encoding.UnmarshalVarInt64(src)
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return src, fmt.Errorf("cannot unmarshal MaxTimestamp: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
sq.MaxTimestamp = maxTs
|
|
|
|
src = tail
|
|
|
|
|
|
|
|
tail, tfssCount, err := encoding.UnmarshalVarUint64(src)
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return src, fmt.Errorf("cannot unmarshal the count of TagFilterss: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
if n := int(tfssCount) - cap(sq.TagFilterss); n > 0 {
|
|
|
|
sq.TagFilterss = append(sq.TagFilterss[:cap(sq.TagFilterss)], make([][]TagFilter, n)...)
|
|
|
|
}
|
|
|
|
sq.TagFilterss = sq.TagFilterss[:tfssCount]
|
|
|
|
src = tail
|
|
|
|
|
|
|
|
for i := 0; i < int(tfssCount); i++ {
|
|
|
|
tail, tfsCount, err := encoding.UnmarshalVarUint64(src)
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return src, fmt.Errorf("cannot unmarshal the count of TagFilters: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
src = tail
|
|
|
|
|
|
|
|
tagFilters := sq.TagFilterss[i]
|
|
|
|
if n := int(tfsCount) - cap(tagFilters); n > 0 {
|
|
|
|
tagFilters = append(tagFilters[:cap(tagFilters)], make([]TagFilter, n)...)
|
|
|
|
}
|
|
|
|
tagFilters = tagFilters[:tfsCount]
|
|
|
|
for j := 0; j < int(tfsCount); j++ {
|
|
|
|
tail, err := tagFilters[j].Unmarshal(src)
|
|
|
|
if err != nil {
|
2020-06-30 21:58:18 +02:00
|
|
|
return tail, fmt.Errorf("cannot unmarshal TagFilter #%d: %w", j, err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
src = tail
|
|
|
|
}
|
|
|
|
sq.TagFilterss[i] = tagFilters
|
|
|
|
}
|
|
|
|
|
2022-03-26 09:17:37 +01:00
|
|
|
if len(src) < 4 {
|
|
|
|
return src, fmt.Errorf("cannot unmarshal MaxMetrics: too short src len: %d; must be at least %d bytes", len(src), 4)
|
|
|
|
}
|
|
|
|
sq.MaxMetrics = int(encoding.UnmarshalUint32(src))
|
|
|
|
src = src[4:]
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
return src, nil
|
|
|
|
}
|
2020-07-23 19:42:57 +02:00
|
|
|
|
|
|
|
func checkSearchDeadlineAndPace(deadline uint64) error {
|
|
|
|
if fasttime.UnixTimestamp() > deadline {
|
2020-08-10 12:17:12 +02:00
|
|
|
return ErrDeadlineExceeded
|
2020-07-23 19:42:57 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2020-08-07 07:37:33 +02:00
|
|
|
|
|
|
|
const (
|
|
|
|
paceLimiterFastIterationsMask = 1<<16 - 1
|
|
|
|
paceLimiterMediumIterationsMask = 1<<14 - 1
|
|
|
|
paceLimiterSlowIterationsMask = 1<<12 - 1
|
|
|
|
)
|