VictoriaMetrics/lib/storage/part_search.go

284 lines
7.4 KiB
Go
Raw Normal View History

2019-05-22 23:16:55 +02:00
package storage
import (
"fmt"
"io"
"os"
2019-05-22 23:16:55 +02:00
"sort"
"strings"
2019-05-22 23:16:55 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/blockcache"
2019-05-22 23:16:55 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
)
// partSearch represents blocks stream for the given search args
// passed to Init.
type partSearch struct {
// BlockRef contains the reference to the found block after NextBlock call.
BlockRef BlockRef
2019-05-22 23:16:55 +02:00
// p is the part to search.
p *part
// tsids contains sorted tsids to search.
tsids []TSID
// tsidIdx points to the currently searched tsid in tsids.
tsidIdx int
// tr is a time range to search.
tr TimeRange
metaindex []metaindexRow
bhs []blockHeader
compressedIndexBuf []byte
indexBuf []byte
err error
}
func (ps *partSearch) reset() {
ps.BlockRef.reset()
2019-05-22 23:16:55 +02:00
ps.p = nil
ps.tsids = nil
2019-05-22 23:16:55 +02:00
ps.tsidIdx = 0
ps.metaindex = nil
ps.bhs = nil
ps.compressedIndexBuf = ps.compressedIndexBuf[:0]
ps.indexBuf = ps.indexBuf[:0]
ps.err = nil
}
var isInTest = func() bool {
return strings.HasSuffix(os.Args[0], ".test")
}()
2019-05-22 23:16:55 +02:00
// Init initializes the ps with the given p, tsids and tr.
//
// tsids must be sorted.
// tsids cannot be modified after the Init call, since it is owned by ps.
func (ps *partSearch) Init(p *part, tsids []TSID, tr TimeRange) {
2019-05-22 23:16:55 +02:00
ps.reset()
ps.p = p
if p.ph.MinTimestamp <= tr.MaxTimestamp && p.ph.MaxTimestamp >= tr.MinTimestamp {
if isInTest && !sort.SliceIsSorted(tsids, func(i, j int) bool { return tsids[i].Less(&tsids[j]) }) {
2019-05-22 23:16:55 +02:00
logger.Panicf("BUG: tsids must be sorted; got %+v", tsids)
}
// take ownership of of tsids.
ps.tsids = tsids
2019-05-22 23:16:55 +02:00
}
ps.tr = tr
ps.metaindex = p.metaindex
// Advance to the first tsid. There is no need in checking
// the returned result, since it will be checked in NextBlock.
ps.nextTSID()
}
// NextBlock advances to the next BlockRef.
2019-05-22 23:16:55 +02:00
//
// Returns true on success.
//
// The blocks are sorted by (TDIS, MinTimestamp). Two subsequent blocks
// for the same TSID may contain overlapped time ranges.
func (ps *partSearch) NextBlock() bool {
for {
if ps.err != nil {
return false
}
if len(ps.bhs) == 0 {
if !ps.nextBHS() {
return false
}
}
if ps.searchBHS() {
return true
}
}
}
// Error returns the last error.
func (ps *partSearch) Error() error {
if ps.err == io.EOF {
return nil
}
return ps.err
}
func (ps *partSearch) nextTSID() bool {
if ps.tsidIdx >= len(ps.tsids) {
ps.err = io.EOF
return false
}
ps.BlockRef.bh.TSID = ps.tsids[ps.tsidIdx]
2019-05-22 23:16:55 +02:00
ps.tsidIdx++
return true
}
func (ps *partSearch) nextBHS() bool {
for len(ps.metaindex) > 0 {
// Optimization: skip tsid values smaller than the minimum value
// from ps.metaindex.
for ps.BlockRef.bh.TSID.Less(&ps.metaindex[0].TSID) {
2019-05-22 23:16:55 +02:00
if !ps.nextTSID() {
return false
}
}
// Invariant: ps.BlockRef.bh.TSID >= ps.metaindex[0].TSID
2019-05-22 23:16:55 +02:00
ps.metaindex = skipSmallMetaindexRows(ps.metaindex, &ps.BlockRef.bh.TSID)
// Invariant: len(ps.metaindex) > 0 && ps.BlockRef.bh.TSID >= ps.metaindex[0].TSID
2019-05-22 23:16:55 +02:00
mr := &ps.metaindex[0]
ps.metaindex = ps.metaindex[1:]
if ps.BlockRef.bh.TSID.Less(&mr.TSID) {
logger.Panicf("BUG: invariant violation: ps.BlockRef.bh.TSID cannot be smaller than mr.TSID; got %+v vs %+v", &ps.BlockRef.bh.TSID, &mr.TSID)
2019-05-22 23:16:55 +02:00
}
if mr.MaxTimestamp < ps.tr.MinTimestamp {
// Skip mr with too small timestamps.
continue
}
if mr.MinTimestamp > ps.tr.MaxTimestamp {
// Skip mr with too big timestamps.
continue
}
// Found the index block which may contain the required data
// for the ps.BlockRef.bh.TSID and the given timestamp range.
indexBlockKey := blockcache.Key{
Part: ps.p,
Offset: mr.IndexBlockOffset,
}
b := ibCache.GetBlock(indexBlockKey)
if b == nil {
2019-05-22 23:16:55 +02:00
// Slow path - actually read and unpack the index block.
ib, err := ps.readIndexBlock(mr)
2019-05-22 23:16:55 +02:00
if err != nil {
ps.err = fmt.Errorf("cannot read index block for part %q at offset %d with size %d: %w",
2019-05-22 23:16:55 +02:00
&ps.p.ph, mr.IndexBlockOffset, mr.IndexBlockSize, err)
return false
}
b = ib
ibCache.PutBlock(indexBlockKey, b)
2019-05-22 23:16:55 +02:00
}
ib := b.(*indexBlock)
2019-05-22 23:16:55 +02:00
ps.bhs = ib.bhs
return true
}
// No more metaindex rows to search.
ps.err = io.EOF
return false
}
func skipSmallMetaindexRows(metaindex []metaindexRow, tsid *TSID) []metaindexRow {
// Invariant: len(metaindex) > 0 && tsid >= metaindex[0].TSID.
if tsid.Less(&metaindex[0].TSID) {
logger.Panicf("BUG: invariant violation: tsid cannot be smaller than metaindex[0]; got %+v vs %+v", tsid, &metaindex[0].TSID)
}
if tsid.MetricID == metaindex[0].TSID.MetricID {
return metaindex
}
// Invariant: tsid > metaindex[0].TSID, so sort.Search cannot return 0.
n := sort.Search(len(metaindex), func(i int) bool {
return !metaindex[i].TSID.Less(tsid)
})
if n == 0 {
logger.Panicf("BUG: invariant violation: sort.Search returned 0 for tsid > metaindex[0].TSID; tsid=%+v; metaindex[0].TSID=%+v",
tsid, &metaindex[0].TSID)
}
// The given tsid may be located in the previous metaindex row,
// so go to the previous row.
// Suppose the following metaindex rows exist [tsid10, tsid20, tsid30].
// The following table contains the corresponding rows to start search for
// for the given tsid values greater than tsid10:
//
// * tsid11 -> tsid10
// * tsid20 -> tsid10, since tsid20 items may present in the index block [tsid10...tsid20]
// * tsid21 -> tsid20
// * tsid30 -> tsid20
// * tsid99 -> tsid30, since tsid99 items may be present in the index block [tsid30...tsidInf]
return metaindex[n-1:]
}
func (ps *partSearch) readIndexBlock(mr *metaindexRow) (*indexBlock, error) {
ps.compressedIndexBuf = bytesutil.ResizeNoCopyMayOverallocate(ps.compressedIndexBuf, int(mr.IndexBlockSize))
ps.p.indexFile.MustReadAt(ps.compressedIndexBuf, int64(mr.IndexBlockOffset))
2019-05-22 23:16:55 +02:00
var err error
ps.indexBuf, err = encoding.DecompressZSTD(ps.indexBuf[:0], ps.compressedIndexBuf)
if err != nil {
return nil, fmt.Errorf("cannot decompress index block: %w", err)
2019-05-22 23:16:55 +02:00
}
ib := &indexBlock{}
2019-05-22 23:16:55 +02:00
ib.bhs, err = unmarshalBlockHeaders(ib.bhs[:0], ps.indexBuf, int(mr.BlockHeadersCount))
if err != nil {
return nil, fmt.Errorf("cannot unmarshal index block: %w", err)
2019-05-22 23:16:55 +02:00
}
return ib, nil
}
func (ps *partSearch) searchBHS() bool {
bhs := ps.bhs
for len(bhs) > 0 {
// Skip block headers with tsids smaller than the given tsid.
tsid := &ps.BlockRef.bh.TSID
n := sort.Search(len(bhs), func(i int) bool {
return !bhs[i].TSID.Less(tsid)
})
if n == len(bhs) {
// Nothing found.
break
2019-05-22 23:16:55 +02:00
}
bhs = bhs[n:]
2019-05-22 23:16:55 +02:00
// Invariant: tsid <= bh.TSID
2019-05-22 23:16:55 +02:00
bh := &bhs[0]
if bh.TSID.MetricID != tsid.MetricID {
// tsid < bh.TSID: no more blocks with the given tsid.
2019-05-22 23:16:55 +02:00
// Proceed to the next (bigger) tsid.
if !ps.nextTSID() {
return false
}
continue
2019-05-22 23:16:55 +02:00
}
// Found the block with the given tsid. Verify timestamp range.
// While blocks for the same TSID are sorted by MinTimestamp,
// the may contain overlapped time ranges.
// So use linear search instead of binary search.
if bh.MaxTimestamp < ps.tr.MinTimestamp {
// Skip the block with too small timestamps.
bhs = bhs[1:]
2019-05-22 23:16:55 +02:00
continue
}
if bh.MinTimestamp > ps.tr.MaxTimestamp {
// Proceed to the next tsid, since the remaining blocks
// for the current tsid contain too big timestamps.
if !ps.nextTSID() {
return false
}
continue
}
// Found the tsid block with the matching timestamp range.
// Read it.
ps.BlockRef.init(ps.p, bh)
2019-05-22 23:16:55 +02:00
ps.bhs = bhs[1:]
2019-05-22 23:16:55 +02:00
return true
}
ps.bhs = nil
return false
}