2019-05-22 23:16:55 +02:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
2020-09-09 22:18:32 +02:00
|
|
|
"bytes"
|
2019-05-22 23:16:55 +02:00
|
|
|
"path/filepath"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
|
|
|
)
|
|
|
|
|
|
|
|
// blockStreamWriter represents block stream writer.
|
|
|
|
type blockStreamWriter struct {
|
|
|
|
compressLevel int
|
|
|
|
|
2023-04-14 23:32:43 +02:00
|
|
|
timestampsWriter filestream.WriteCloser
|
|
|
|
valuesWriter filestream.WriteCloser
|
|
|
|
indexWriter filestream.WriteCloser
|
|
|
|
metaindexWriter filestream.WriteCloser
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
mr metaindexRow
|
|
|
|
|
|
|
|
timestampsBlockOffset uint64
|
|
|
|
valuesBlockOffset uint64
|
|
|
|
indexBlockOffset uint64
|
|
|
|
|
|
|
|
indexData []byte
|
|
|
|
compressedIndexData []byte
|
|
|
|
|
|
|
|
metaindexData []byte
|
|
|
|
compressedMetaindexData []byte
|
2020-09-09 22:18:32 +02:00
|
|
|
|
|
|
|
// prevTimestamps* is used as an optimization for reducing disk space usage
|
|
|
|
// when serially written blocks have identical timestamps.
|
2021-03-09 08:18:19 +01:00
|
|
|
// This is usually the case when adjacent blocks contain metrics scraped from the same target,
|
2020-09-09 22:18:32 +02:00
|
|
|
// since such metrics have identical timestamps.
|
|
|
|
prevTimestampsData []byte
|
|
|
|
prevTimestampsBlockOffset uint64
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Init initializes bsw with the given writers.
|
|
|
|
func (bsw *blockStreamWriter) reset() {
|
|
|
|
bsw.compressLevel = 0
|
|
|
|
|
|
|
|
bsw.timestampsWriter = nil
|
|
|
|
bsw.valuesWriter = nil
|
|
|
|
bsw.indexWriter = nil
|
|
|
|
bsw.metaindexWriter = nil
|
|
|
|
|
|
|
|
bsw.mr.Reset()
|
|
|
|
|
|
|
|
bsw.timestampsBlockOffset = 0
|
|
|
|
bsw.valuesBlockOffset = 0
|
|
|
|
bsw.indexBlockOffset = 0
|
|
|
|
|
|
|
|
bsw.indexData = bsw.indexData[:0]
|
|
|
|
bsw.compressedIndexData = bsw.compressedIndexData[:0]
|
|
|
|
|
|
|
|
bsw.metaindexData = bsw.metaindexData[:0]
|
|
|
|
bsw.compressedMetaindexData = bsw.compressedMetaindexData[:0]
|
2020-09-09 22:18:32 +02:00
|
|
|
|
|
|
|
bsw.prevTimestampsData = bsw.prevTimestampsData[:0]
|
|
|
|
bsw.prevTimestampsBlockOffset = 0
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-04-15 00:46:09 +02:00
|
|
|
// MustInitFromInmemoryPart initializes bsw from inmemory part.
|
|
|
|
func (bsw *blockStreamWriter) MustInitFromInmemoryPart(mp *inmemoryPart, compressLevel int) {
|
2019-05-22 23:16:55 +02:00
|
|
|
bsw.reset()
|
2020-05-15 12:11:30 +02:00
|
|
|
|
2022-12-04 07:45:53 +01:00
|
|
|
bsw.compressLevel = compressLevel
|
2019-05-22 23:16:55 +02:00
|
|
|
bsw.timestampsWriter = &mp.timestampsData
|
|
|
|
bsw.valuesWriter = &mp.valuesData
|
|
|
|
bsw.indexWriter = &mp.indexData
|
|
|
|
bsw.metaindexWriter = &mp.metaindexData
|
|
|
|
}
|
|
|
|
|
2023-04-15 00:12:45 +02:00
|
|
|
// MustInitFromFilePart initializes bsw from a file-based part on the given path.
|
2019-05-22 23:16:55 +02:00
|
|
|
//
|
|
|
|
// The bsw doesn't pollute OS page cache if nocache is set.
|
2023-04-15 00:12:45 +02:00
|
|
|
func (bsw *blockStreamWriter) MustInitFromFilePart(path string, nocache bool, compressLevel int) {
|
2019-05-22 23:16:55 +02:00
|
|
|
path = filepath.Clean(path)
|
|
|
|
|
|
|
|
// Create the directory
|
2023-04-14 07:11:56 +02:00
|
|
|
fs.MustMkdirFailIfExist(path)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
// Create part files in the directory.
|
2023-03-25 22:33:54 +01:00
|
|
|
timestampsPath := filepath.Join(path, timestampsFilename)
|
2023-04-15 00:12:45 +02:00
|
|
|
timestampsFile := filestream.MustCreate(timestampsPath, nocache)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2023-03-25 22:33:54 +01:00
|
|
|
valuesPath := filepath.Join(path, valuesFilename)
|
2023-04-15 00:12:45 +02:00
|
|
|
valuesFile := filestream.MustCreate(valuesPath, nocache)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2023-03-25 22:33:54 +01:00
|
|
|
indexPath := filepath.Join(path, indexFilename)
|
2023-04-15 00:12:45 +02:00
|
|
|
indexFile := filestream.MustCreate(indexPath, nocache)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
// Always cache metaindex file in OS page cache, since it is immediately
|
|
|
|
// read after the merge.
|
2023-03-25 22:33:54 +01:00
|
|
|
metaindexPath := filepath.Join(path, metaindexFilename)
|
2023-04-15 00:12:45 +02:00
|
|
|
metaindexFile := filestream.MustCreate(metaindexPath, false)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
bsw.reset()
|
|
|
|
bsw.compressLevel = compressLevel
|
|
|
|
|
|
|
|
bsw.timestampsWriter = timestampsFile
|
|
|
|
bsw.valuesWriter = valuesFile
|
|
|
|
bsw.indexWriter = indexFile
|
|
|
|
bsw.metaindexWriter = metaindexFile
|
|
|
|
}
|
|
|
|
|
|
|
|
// MustClose closes the bsw.
|
|
|
|
//
|
|
|
|
// It closes *Writer files passed to Init*.
|
|
|
|
func (bsw *blockStreamWriter) MustClose() {
|
|
|
|
// Flush remaining data.
|
|
|
|
bsw.flushIndexData()
|
|
|
|
|
|
|
|
// Write metaindex data.
|
|
|
|
bsw.compressedMetaindexData = encoding.CompressZSTDLevel(bsw.compressedMetaindexData[:0], bsw.metaindexData, bsw.compressLevel)
|
|
|
|
fs.MustWriteData(bsw.metaindexWriter, bsw.compressedMetaindexData)
|
|
|
|
|
|
|
|
// Close writers.
|
2023-04-14 23:32:43 +02:00
|
|
|
bsw.timestampsWriter.MustClose()
|
|
|
|
bsw.valuesWriter.MustClose()
|
2019-05-22 23:16:55 +02:00
|
|
|
bsw.indexWriter.MustClose()
|
|
|
|
bsw.metaindexWriter.MustClose()
|
|
|
|
|
|
|
|
bsw.reset()
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteExternalBlock writes b to bsw and updates ph and rowsMerged.
|
2021-12-15 14:58:27 +01:00
|
|
|
func (bsw *blockStreamWriter) WriteExternalBlock(b *Block, ph *partHeader, rowsMerged *uint64) {
|
2020-01-31 00:09:44 +01:00
|
|
|
atomic.AddUint64(rowsMerged, uint64(b.rowsCount()))
|
2021-12-15 14:58:27 +01:00
|
|
|
b.deduplicateSamplesDuringMerge()
|
2019-05-22 23:16:55 +02:00
|
|
|
headerData, timestampsData, valuesData := b.MarshalData(bsw.timestampsBlockOffset, bsw.valuesBlockOffset)
|
2020-09-09 22:18:32 +02:00
|
|
|
usePrevTimestamps := len(bsw.prevTimestampsData) > 0 && bytes.Equal(timestampsData, bsw.prevTimestampsData)
|
|
|
|
if usePrevTimestamps {
|
|
|
|
// The current timestamps block equals to the previous timestamps block.
|
|
|
|
// Update headerData so it points to the previous timestamps block. This saves disk space.
|
|
|
|
headerData, timestampsData, valuesData = b.MarshalData(bsw.prevTimestampsBlockOffset, bsw.valuesBlockOffset)
|
|
|
|
atomic.AddUint64(×tampsBlocksMerged, 1)
|
|
|
|
atomic.AddUint64(×tampsBytesSaved, uint64(len(timestampsData)))
|
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
bsw.indexData = append(bsw.indexData, headerData...)
|
|
|
|
bsw.mr.RegisterBlockHeader(&b.bh)
|
|
|
|
if len(bsw.indexData) >= maxBlockSize {
|
|
|
|
bsw.flushIndexData()
|
|
|
|
}
|
2020-09-09 22:18:32 +02:00
|
|
|
if !usePrevTimestamps {
|
|
|
|
bsw.prevTimestampsData = append(bsw.prevTimestampsData[:0], timestampsData...)
|
|
|
|
bsw.prevTimestampsBlockOffset = bsw.timestampsBlockOffset
|
|
|
|
fs.MustWriteData(bsw.timestampsWriter, timestampsData)
|
|
|
|
bsw.timestampsBlockOffset += uint64(len(timestampsData))
|
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
fs.MustWriteData(bsw.valuesWriter, valuesData)
|
|
|
|
bsw.valuesBlockOffset += uint64(len(valuesData))
|
|
|
|
updatePartHeader(b, ph)
|
|
|
|
}
|
|
|
|
|
2020-09-09 22:18:32 +02:00
|
|
|
var (
|
|
|
|
timestampsBlocksMerged uint64
|
|
|
|
timestampsBytesSaved uint64
|
|
|
|
)
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
func updatePartHeader(b *Block, ph *partHeader) {
|
|
|
|
ph.BlocksCount++
|
|
|
|
ph.RowsCount += uint64(b.bh.RowsCount)
|
|
|
|
if b.bh.MinTimestamp < ph.MinTimestamp {
|
|
|
|
ph.MinTimestamp = b.bh.MinTimestamp
|
|
|
|
}
|
|
|
|
if b.bh.MaxTimestamp > ph.MaxTimestamp {
|
|
|
|
ph.MaxTimestamp = b.bh.MaxTimestamp
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (bsw *blockStreamWriter) flushIndexData() {
|
|
|
|
if len(bsw.indexData) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write compressed index block to index data.
|
|
|
|
bsw.compressedIndexData = encoding.CompressZSTDLevel(bsw.compressedIndexData[:0], bsw.indexData, bsw.compressLevel)
|
|
|
|
indexBlockSize := len(bsw.compressedIndexData)
|
|
|
|
if uint64(indexBlockSize) >= 1<<32 {
|
|
|
|
logger.Panicf("BUG: indexBlock size must fit uint32; got %d", indexBlockSize)
|
|
|
|
}
|
|
|
|
fs.MustWriteData(bsw.indexWriter, bsw.compressedIndexData)
|
|
|
|
|
|
|
|
// Write metaindex row to metaindex data.
|
|
|
|
bsw.mr.IndexBlockOffset = bsw.indexBlockOffset
|
|
|
|
bsw.mr.IndexBlockSize = uint32(indexBlockSize)
|
|
|
|
bsw.metaindexData = bsw.mr.Marshal(bsw.metaindexData)
|
|
|
|
|
|
|
|
// Update offsets.
|
|
|
|
bsw.indexBlockOffset += uint64(indexBlockSize)
|
|
|
|
|
|
|
|
bsw.indexData = bsw.indexData[:0]
|
|
|
|
bsw.mr.Reset()
|
|
|
|
}
|
|
|
|
|
|
|
|
func getBlockStreamWriter() *blockStreamWriter {
|
|
|
|
v := bswPool.Get()
|
|
|
|
if v == nil {
|
|
|
|
return &blockStreamWriter{}
|
|
|
|
}
|
|
|
|
return v.(*blockStreamWriter)
|
|
|
|
}
|
|
|
|
|
|
|
|
func putBlockStreamWriter(bsw *blockStreamWriter) {
|
|
|
|
bsw.reset()
|
|
|
|
bswPool.Put(bsw)
|
|
|
|
}
|
|
|
|
|
|
|
|
var bswPool sync.Pool
|