2019-05-22 23:16:55 +02:00
|
|
|
package storage
|
|
|
|
|
|
|
|
import (
|
2023-03-19 09:36:05 +01:00
|
|
|
"encoding/json"
|
2020-09-17 02:02:35 +02:00
|
|
|
"errors"
|
2019-05-22 23:16:55 +02:00
|
|
|
"fmt"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"sort"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
"unsafe"
|
|
|
|
|
2020-12-08 19:49:32 +01:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
|
2019-05-22 23:16:55 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
2020-05-14 21:01:51 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
2019-05-22 23:16:55 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
2022-12-06 00:15:00 +01:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/mergeset"
|
2019-05-22 23:16:55 +02:00
|
|
|
)
|
|
|
|
|
2021-08-25 08:35:03 +02:00
|
|
|
// The maximum size of big part.
|
2019-05-22 23:16:55 +02:00
|
|
|
//
|
|
|
|
// This number limits the maximum time required for building big part.
|
|
|
|
// This time shouldn't exceed a few days.
|
2021-08-25 08:35:03 +02:00
|
|
|
const maxBigPartSize = 1e12
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
// The maximum number of inmemory parts in the partition.
|
2022-12-13 01:49:21 +01:00
|
|
|
//
|
|
|
|
// If the number of inmemory parts reaches this value, then assisted merge runs during data ingestion.
|
2022-12-28 23:32:18 +01:00
|
|
|
const maxInmemoryPartsPerPartition = 20
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-13 01:49:21 +01:00
|
|
|
// The maximum number of small parts in the partition.
|
|
|
|
//
|
|
|
|
// If the number of small parts reaches this value, then assisted merge runs during data ingestion.
|
2022-12-28 23:32:18 +01:00
|
|
|
const maxSmallPartsPerPartition = 30
|
2022-12-13 01:49:21 +01:00
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
// Default number of parts to merge at once.
|
|
|
|
//
|
|
|
|
// This number has been obtained empirically - it gives the lowest possible overhead.
|
|
|
|
// See appendPartsToMerge tests for details.
|
|
|
|
const defaultPartsToMerge = 15
|
|
|
|
|
|
|
|
// The final number of parts to merge at once.
|
|
|
|
//
|
|
|
|
// It must be smaller than defaultPartsToMerge.
|
|
|
|
// Lower value improves select performance at the cost of increased
|
|
|
|
// write amplification.
|
2019-11-05 16:26:13 +01:00
|
|
|
const finalPartsToMerge = 3
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2019-12-19 17:12:02 +01:00
|
|
|
// The number of shards for rawRow entries per partition.
|
|
|
|
//
|
|
|
|
// Higher number of shards reduces CPU contention and increases the max bandwidth on multi-core systems.
|
2022-10-17 17:17:42 +02:00
|
|
|
var rawRowsShardsPerPartition = (cgroup.AvailableCPUs() + 1) / 2
|
2019-12-19 17:12:02 +01:00
|
|
|
|
2023-02-13 13:27:13 +01:00
|
|
|
// The interval for flushing buffered rows into parts, so they become visible to search.
|
2022-12-06 00:15:00 +01:00
|
|
|
const pendingRowsFlushInterval = time.Second
|
|
|
|
|
|
|
|
// The interval for guaranteed flush of recently ingested data from memory to on-disk parts,
|
|
|
|
// so they survive process crash.
|
|
|
|
var dataFlushInterval = 5 * time.Second
|
|
|
|
|
|
|
|
// SetDataFlushInterval sets the interval for guaranteed flush of recently ingested data from memory to disk.
|
|
|
|
//
|
|
|
|
// The data can be flushed from memory to disk more frequently if it doesn't fit the memory limit.
|
|
|
|
//
|
|
|
|
// This function must be called before initializing the storage.
|
|
|
|
func SetDataFlushInterval(d time.Duration) {
|
|
|
|
if d > pendingRowsFlushInterval {
|
|
|
|
dataFlushInterval = d
|
|
|
|
mergeset.SetDataFlushInterval(d)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-11 09:52:14 +02:00
|
|
|
// getMaxRawRowsPerShard returns the maximum number of rows that haven't been converted into parts yet.
|
|
|
|
func getMaxRawRowsPerShard() int {
|
2019-05-22 23:16:55 +02:00
|
|
|
maxRawRowsPerPartitionOnce.Do(func() {
|
2021-06-11 09:49:02 +02:00
|
|
|
n := memory.Allowed() / rawRowsShardsPerPartition / 256 / int(unsafe.Sizeof(rawRow{}))
|
2019-05-22 23:16:55 +02:00
|
|
|
if n < 1e4 {
|
|
|
|
n = 1e4
|
|
|
|
}
|
|
|
|
if n > 500e3 {
|
|
|
|
n = 500e3
|
|
|
|
}
|
2020-01-04 18:49:30 +01:00
|
|
|
maxRawRowsPerPartition = n
|
2019-05-22 23:16:55 +02:00
|
|
|
})
|
|
|
|
return maxRawRowsPerPartition
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
maxRawRowsPerPartition int
|
|
|
|
maxRawRowsPerPartitionOnce sync.Once
|
|
|
|
)
|
|
|
|
|
|
|
|
// partition represents a partition.
|
|
|
|
type partition struct {
|
2019-10-17 17:22:56 +02:00
|
|
|
// Put atomic counters to the top of struct, so they are aligned to 8 bytes on 32-bit arch.
|
|
|
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/212
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
activeInmemoryMerges uint64
|
|
|
|
activeSmallMerges uint64
|
|
|
|
activeBigMerges uint64
|
|
|
|
|
|
|
|
inmemoryMergesCount uint64
|
|
|
|
smallMergesCount uint64
|
|
|
|
bigMergesCount uint64
|
2019-10-17 17:22:56 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
inmemoryRowsMerged uint64
|
|
|
|
smallRowsMerged uint64
|
|
|
|
bigRowsMerged uint64
|
2019-10-17 17:22:56 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
inmemoryRowsDeleted uint64
|
|
|
|
smallRowsDeleted uint64
|
|
|
|
bigRowsDeleted uint64
|
|
|
|
|
|
|
|
inmemoryAssistedMerges uint64
|
2022-12-13 01:49:21 +01:00
|
|
|
smallAssistedMerges uint64
|
2022-12-06 00:15:00 +01:00
|
|
|
|
2019-10-17 17:22:56 +02:00
|
|
|
mergeIdx uint64
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
smallPartsPath string
|
|
|
|
bigPartsPath string
|
|
|
|
|
2022-10-23 15:08:54 +02:00
|
|
|
// The parent storage.
|
|
|
|
s *Storage
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
// Name is the name of the partition in the form YYYY_MM.
|
|
|
|
name string
|
|
|
|
|
|
|
|
// The time range for the partition. Usually this is a whole month.
|
|
|
|
tr TimeRange
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
// rawRows contains recently added rows that haven't been converted into parts yet.
|
|
|
|
// rawRows are periodically converted into inmemroyParts.
|
|
|
|
// rawRows aren't used in search for performance reasons.
|
|
|
|
rawRows rawRowsShards
|
|
|
|
|
|
|
|
// partsLock protects inmemoryParts, smallParts and bigParts.
|
2019-05-22 23:16:55 +02:00
|
|
|
partsLock sync.Mutex
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
// Contains inmemory parts with recently ingested data.
|
|
|
|
inmemoryParts []*partWrapper
|
|
|
|
|
|
|
|
// Contains file-based parts with small number of items.
|
2019-05-22 23:16:55 +02:00
|
|
|
smallParts []*partWrapper
|
|
|
|
|
|
|
|
// Contains file-based parts with big number of items.
|
|
|
|
bigParts []*partWrapper
|
|
|
|
|
2023-01-18 10:09:03 +01:00
|
|
|
// This channel is used for signaling the background mergers that there are parts,
|
|
|
|
// which may need to be merged.
|
|
|
|
needMergeCh chan struct{}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
stopCh chan struct{}
|
|
|
|
|
2022-12-04 08:03:05 +01:00
|
|
|
wg sync.WaitGroup
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// partWrapper is a wrapper for the part.
|
|
|
|
type partWrapper struct {
|
2019-10-17 17:22:56 +02:00
|
|
|
// The number of references to the part.
|
2023-03-19 09:36:05 +01:00
|
|
|
refCount uint32
|
|
|
|
|
|
|
|
// The flag, which is set when the part must be deleted after refCount reaches zero.
|
2023-06-15 11:17:45 +02:00
|
|
|
// This field should be updated only after partWrapper
|
|
|
|
// was removed from the list of active parts.
|
2023-03-19 09:36:05 +01:00
|
|
|
mustBeDeleted uint32
|
2019-10-17 17:22:56 +02:00
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
// The part itself.
|
|
|
|
p *part
|
|
|
|
|
|
|
|
// non-nil if the part is inmemoryPart.
|
|
|
|
mp *inmemoryPart
|
|
|
|
|
|
|
|
// Whether the part is in merge now.
|
|
|
|
isInMerge bool
|
2022-12-06 00:15:00 +01:00
|
|
|
|
|
|
|
// The deadline when in-memory part must be flushed to disk.
|
|
|
|
flushToDiskDeadline time.Time
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (pw *partWrapper) incRef() {
|
2023-03-19 09:36:05 +01:00
|
|
|
atomic.AddUint32(&pw.refCount, 1)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func (pw *partWrapper) decRef() {
|
2023-03-19 09:36:05 +01:00
|
|
|
n := atomic.AddUint32(&pw.refCount, ^uint32(0))
|
|
|
|
if int32(n) < 0 {
|
|
|
|
logger.Panicf("BUG: pw.refCount must be bigger than 0; got %d", int32(n))
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
if n > 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
deletePath := ""
|
|
|
|
if pw.mp == nil && atomic.LoadUint32(&pw.mustBeDeleted) != 0 {
|
|
|
|
deletePath = pw.p.path
|
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
if pw.mp != nil {
|
|
|
|
putInmemoryPart(pw.mp)
|
|
|
|
pw.mp = nil
|
|
|
|
}
|
|
|
|
pw.p.MustClose()
|
|
|
|
pw.p = nil
|
2023-03-19 09:36:05 +01:00
|
|
|
|
|
|
|
if deletePath != "" {
|
|
|
|
fs.MustRemoveAll(deletePath)
|
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-04-14 07:11:56 +02:00
|
|
|
// mustCreatePartition creates new partition for the given timestamp and the given paths
|
2019-05-22 23:16:55 +02:00
|
|
|
// to small and big partitions.
|
2023-04-14 07:11:56 +02:00
|
|
|
func mustCreatePartition(timestamp int64, smallPartitionsPath, bigPartitionsPath string, s *Storage) *partition {
|
2019-05-22 23:16:55 +02:00
|
|
|
name := timestampToPartitionName(timestamp)
|
2023-03-25 22:33:54 +01:00
|
|
|
smallPartsPath := filepath.Join(filepath.Clean(smallPartitionsPath), name)
|
|
|
|
bigPartsPath := filepath.Join(filepath.Clean(bigPartitionsPath), name)
|
2019-05-22 23:16:55 +02:00
|
|
|
logger.Infof("creating a partition %q with smallPartsPath=%q, bigPartsPath=%q", name, smallPartsPath, bigPartsPath)
|
|
|
|
|
2023-04-14 07:11:56 +02:00
|
|
|
fs.MustMkdirFailIfExist(smallPartsPath)
|
|
|
|
fs.MustMkdirFailIfExist(bigPartsPath)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-10-24 00:30:50 +02:00
|
|
|
pt := newPartition(name, smallPartsPath, bigPartsPath, s)
|
2019-05-22 23:16:55 +02:00
|
|
|
pt.tr.fromPartitionTimestamp(timestamp)
|
2022-12-04 09:01:04 +01:00
|
|
|
pt.startBackgroundWorkers()
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
logger.Infof("partition %q has been created", name)
|
|
|
|
|
2023-04-14 07:11:56 +02:00
|
|
|
return pt
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2022-12-04 09:01:04 +01:00
|
|
|
func (pt *partition) startBackgroundWorkers() {
|
|
|
|
pt.startMergeWorkers()
|
|
|
|
pt.startInmemoryPartsFlusher()
|
2022-12-06 00:15:00 +01:00
|
|
|
pt.startPendingRowsFlusher()
|
2022-12-04 09:01:04 +01:00
|
|
|
pt.startStalePartsRemover()
|
|
|
|
}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
// Drop drops all the data on the storage for the given pt.
|
|
|
|
//
|
|
|
|
// The pt must be detached from table before calling pt.Drop.
|
|
|
|
func (pt *partition) Drop() {
|
|
|
|
logger.Infof("dropping partition %q at smallPartsPath=%q, bigPartsPath=%q", pt.name, pt.smallPartsPath, pt.bigPartsPath)
|
2020-12-25 10:45:47 +01:00
|
|
|
|
2022-09-13 12:37:34 +02:00
|
|
|
fs.MustRemoveDirAtomic(pt.smallPartsPath)
|
|
|
|
fs.MustRemoveDirAtomic(pt.bigPartsPath)
|
2019-05-22 23:16:55 +02:00
|
|
|
logger.Infof("partition %q has been dropped", pt.name)
|
|
|
|
}
|
|
|
|
|
2023-04-15 08:01:20 +02:00
|
|
|
// mustOpenPartition opens the existing partition from the given paths.
|
|
|
|
func mustOpenPartition(smallPartsPath, bigPartsPath string, s *Storage) *partition {
|
2019-05-22 23:16:55 +02:00
|
|
|
smallPartsPath = filepath.Clean(smallPartsPath)
|
|
|
|
bigPartsPath = filepath.Clean(bigPartsPath)
|
|
|
|
|
2023-03-25 19:57:37 +01:00
|
|
|
name := filepath.Base(smallPartsPath)
|
2023-03-25 19:43:19 +01:00
|
|
|
if !strings.HasSuffix(bigPartsPath, name) {
|
2023-11-21 10:52:53 +01:00
|
|
|
logger.Panicf("FATAL: partition name in bigPartsPath %q doesn't match smallPartsPath %q; want %q", bigPartsPath, smallPartsPath, name)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
partNamesSmall, partNamesBig := mustReadPartNames(smallPartsPath, bigPartsPath)
|
|
|
|
|
2023-04-15 07:08:43 +02:00
|
|
|
smallParts := mustOpenParts(smallPartsPath, partNamesSmall)
|
|
|
|
bigParts := mustOpenParts(bigPartsPath, partNamesBig)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2023-06-15 11:19:22 +02:00
|
|
|
partNamesPath := filepath.Join(smallPartsPath, partsFilename)
|
|
|
|
if !fs.IsPathExist(partNamesPath) {
|
2023-07-07 02:05:59 +02:00
|
|
|
// Create parts.json file if it doesn't exist yet.
|
|
|
|
// This should protect from possible carshloops just after the migration from versions below v1.90.0
|
|
|
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4336
|
2023-06-15 11:19:22 +02:00
|
|
|
mustWritePartNames(smallParts, bigParts, smallPartsPath)
|
|
|
|
}
|
|
|
|
|
2022-10-24 00:30:50 +02:00
|
|
|
pt := newPartition(name, smallPartsPath, bigPartsPath, s)
|
2019-05-22 23:16:55 +02:00
|
|
|
pt.smallParts = smallParts
|
|
|
|
pt.bigParts = bigParts
|
|
|
|
if err := pt.tr.fromPartitionName(name); err != nil {
|
2023-04-15 08:01:20 +02:00
|
|
|
logger.Panicf("FATAL: cannot obtain partition time range from smallPartsPath %q: %s", smallPartsPath, err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-12-04 09:01:04 +01:00
|
|
|
pt.startBackgroundWorkers()
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2023-01-18 10:09:03 +01:00
|
|
|
// Wake up a single background merger, so it could start merging parts if needed.
|
|
|
|
pt.notifyBackgroundMergers()
|
|
|
|
|
2023-04-15 08:01:20 +02:00
|
|
|
return pt
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2022-10-24 00:30:50 +02:00
|
|
|
func newPartition(name, smallPartsPath, bigPartsPath string, s *Storage) *partition {
|
2019-12-19 17:12:02 +01:00
|
|
|
p := &partition{
|
2019-05-22 23:16:55 +02:00
|
|
|
name: name,
|
|
|
|
smallPartsPath: smallPartsPath,
|
|
|
|
bigPartsPath: bigPartsPath,
|
|
|
|
|
2022-10-24 00:30:50 +02:00
|
|
|
s: s,
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2023-01-18 10:09:03 +01:00
|
|
|
mergeIdx: uint64(time.Now().UnixNano()),
|
|
|
|
needMergeCh: make(chan struct{}, cgroup.AvailableCPUs()),
|
|
|
|
|
|
|
|
stopCh: make(chan struct{}),
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2019-12-19 17:12:02 +01:00
|
|
|
p.rawRows.init()
|
|
|
|
return p
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// partitionMetrics contains essential metrics for the partition.
|
|
|
|
type partitionMetrics struct {
|
|
|
|
PendingRows uint64
|
|
|
|
|
2022-01-20 17:34:59 +01:00
|
|
|
IndexBlocksCacheSize uint64
|
|
|
|
IndexBlocksCacheSizeBytes uint64
|
|
|
|
IndexBlocksCacheSizeMaxBytes uint64
|
|
|
|
IndexBlocksCacheRequests uint64
|
|
|
|
IndexBlocksCacheMisses uint64
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
InmemorySizeBytes uint64
|
|
|
|
SmallSizeBytes uint64
|
|
|
|
BigSizeBytes uint64
|
|
|
|
|
|
|
|
InmemoryRowsCount uint64
|
|
|
|
SmallRowsCount uint64
|
|
|
|
BigRowsCount uint64
|
2019-07-04 18:09:40 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
InmemoryBlocksCount uint64
|
|
|
|
SmallBlocksCount uint64
|
|
|
|
BigBlocksCount uint64
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
InmemoryPartsCount uint64
|
|
|
|
SmallPartsCount uint64
|
|
|
|
BigPartsCount uint64
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
ActiveInmemoryMerges uint64
|
|
|
|
ActiveSmallMerges uint64
|
|
|
|
ActiveBigMerges uint64
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
InmemoryMergesCount uint64
|
|
|
|
SmallMergesCount uint64
|
|
|
|
BigMergesCount uint64
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
InmemoryRowsMerged uint64
|
|
|
|
SmallRowsMerged uint64
|
|
|
|
BigRowsMerged uint64
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
InmemoryRowsDeleted uint64
|
|
|
|
SmallRowsDeleted uint64
|
|
|
|
BigRowsDeleted uint64
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
InmemoryPartsRefCount uint64
|
|
|
|
SmallPartsRefCount uint64
|
|
|
|
BigPartsRefCount uint64
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
InmemoryAssistedMerges uint64
|
2022-12-13 01:49:21 +01:00
|
|
|
SmallAssistedMerges uint64
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
2020-09-29 20:47:40 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
// TotalRowsCount returns total number of rows in tm.
|
|
|
|
func (pm *partitionMetrics) TotalRowsCount() uint64 {
|
|
|
|
return pm.PendingRows + pm.InmemoryRowsCount + pm.SmallRowsCount + pm.BigRowsCount
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateMetrics updates m with metrics from pt.
|
|
|
|
func (pt *partition) UpdateMetrics(m *partitionMetrics) {
|
2022-12-06 00:15:00 +01:00
|
|
|
m.PendingRows += uint64(pt.rawRows.Len())
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
pt.partsLock.Lock()
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
for _, pw := range pt.inmemoryParts {
|
2019-05-22 23:16:55 +02:00
|
|
|
p := pw.p
|
2022-12-06 00:15:00 +01:00
|
|
|
m.InmemoryRowsCount += p.ph.RowsCount
|
|
|
|
m.InmemoryBlocksCount += p.ph.BlocksCount
|
|
|
|
m.InmemorySizeBytes += p.size
|
2023-03-19 09:36:05 +01:00
|
|
|
m.InmemoryPartsRefCount += uint64(atomic.LoadUint32(&pw.refCount))
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
for _, pw := range pt.smallParts {
|
|
|
|
p := pw.p
|
|
|
|
m.SmallRowsCount += p.ph.RowsCount
|
|
|
|
m.SmallBlocksCount += p.ph.BlocksCount
|
2019-07-04 18:09:40 +02:00
|
|
|
m.SmallSizeBytes += p.size
|
2023-03-19 09:36:05 +01:00
|
|
|
m.SmallPartsRefCount += uint64(atomic.LoadUint32(&pw.refCount))
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
for _, pw := range pt.bigParts {
|
|
|
|
p := pw.p
|
|
|
|
m.BigRowsCount += p.ph.RowsCount
|
|
|
|
m.BigBlocksCount += p.ph.BlocksCount
|
|
|
|
m.BigSizeBytes += p.size
|
2023-03-19 09:36:05 +01:00
|
|
|
m.BigPartsRefCount += uint64(atomic.LoadUint32(&pw.refCount))
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
m.InmemoryPartsCount += uint64(len(pt.inmemoryParts))
|
2019-05-22 23:16:55 +02:00
|
|
|
m.SmallPartsCount += uint64(len(pt.smallParts))
|
2022-12-06 00:15:00 +01:00
|
|
|
m.BigPartsCount += uint64(len(pt.bigParts))
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
pt.partsLock.Unlock()
|
|
|
|
|
2022-01-20 17:34:59 +01:00
|
|
|
m.IndexBlocksCacheSize = uint64(ibCache.Len())
|
|
|
|
m.IndexBlocksCacheSizeBytes = uint64(ibCache.SizeBytes())
|
|
|
|
m.IndexBlocksCacheSizeMaxBytes = uint64(ibCache.SizeMaxBytes())
|
|
|
|
m.IndexBlocksCacheRequests = ibCache.Requests()
|
|
|
|
m.IndexBlocksCacheMisses = ibCache.Misses()
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
m.ActiveInmemoryMerges += atomic.LoadUint64(&pt.activeInmemoryMerges)
|
2019-05-22 23:16:55 +02:00
|
|
|
m.ActiveSmallMerges += atomic.LoadUint64(&pt.activeSmallMerges)
|
2022-12-06 00:15:00 +01:00
|
|
|
m.ActiveBigMerges += atomic.LoadUint64(&pt.activeBigMerges)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
m.InmemoryMergesCount += atomic.LoadUint64(&pt.inmemoryMergesCount)
|
2019-05-22 23:16:55 +02:00
|
|
|
m.SmallMergesCount += atomic.LoadUint64(&pt.smallMergesCount)
|
2022-12-06 00:15:00 +01:00
|
|
|
m.BigMergesCount += atomic.LoadUint64(&pt.bigMergesCount)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
m.InmemoryRowsMerged += atomic.LoadUint64(&pt.inmemoryRowsMerged)
|
2019-05-22 23:16:55 +02:00
|
|
|
m.SmallRowsMerged += atomic.LoadUint64(&pt.smallRowsMerged)
|
2022-12-06 00:15:00 +01:00
|
|
|
m.BigRowsMerged += atomic.LoadUint64(&pt.bigRowsMerged)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
m.InmemoryRowsDeleted += atomic.LoadUint64(&pt.inmemoryRowsDeleted)
|
2019-05-22 23:16:55 +02:00
|
|
|
m.SmallRowsDeleted += atomic.LoadUint64(&pt.smallRowsDeleted)
|
2022-12-06 00:15:00 +01:00
|
|
|
m.BigRowsDeleted += atomic.LoadUint64(&pt.bigRowsDeleted)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
m.InmemoryAssistedMerges += atomic.LoadUint64(&pt.inmemoryAssistedMerges)
|
2022-12-13 01:49:21 +01:00
|
|
|
m.SmallAssistedMerges += atomic.LoadUint64(&pt.smallAssistedMerges)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// AddRows adds the given rows to the partition pt.
|
|
|
|
//
|
|
|
|
// All the rows must fit the partition by timestamp range
|
|
|
|
// and must have valid PrecisionBits.
|
|
|
|
func (pt *partition) AddRows(rows []rawRow) {
|
|
|
|
if len(rows) == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-04-15 05:52:36 +02:00
|
|
|
if isDebug {
|
|
|
|
// Validate all the rows.
|
|
|
|
for i := range rows {
|
|
|
|
r := &rows[i]
|
|
|
|
if !pt.HasTimestamp(r.Timestamp) {
|
|
|
|
logger.Panicf("BUG: row %+v has Timestamp outside partition %q range %+v", r, pt.smallPartsPath, &pt.tr)
|
|
|
|
}
|
|
|
|
if err := encoding.CheckPrecisionBits(r.PrecisionBits); err != nil {
|
|
|
|
logger.Panicf("BUG: row %+v has invalid PrecisionBits: %s", r, err)
|
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-19 17:12:02 +01:00
|
|
|
pt.rawRows.addRows(pt, rows)
|
|
|
|
}
|
|
|
|
|
2023-04-15 05:52:36 +02:00
|
|
|
var isDebug = false
|
|
|
|
|
2019-12-19 17:12:02 +01:00
|
|
|
type rawRowsShards struct {
|
2021-04-27 15:41:22 +02:00
|
|
|
shardIdx uint32
|
2019-12-19 17:12:02 +01:00
|
|
|
|
|
|
|
// Shards reduce lock contention when adding rows on multi-CPU systems.
|
|
|
|
shards []rawRowsShard
|
|
|
|
}
|
|
|
|
|
2021-04-27 14:36:31 +02:00
|
|
|
func (rrss *rawRowsShards) init() {
|
|
|
|
rrss.shards = make([]rawRowsShard, rawRowsShardsPerPartition)
|
2019-12-19 17:12:02 +01:00
|
|
|
}
|
|
|
|
|
2021-04-27 14:36:31 +02:00
|
|
|
func (rrss *rawRowsShards) addRows(pt *partition, rows []rawRow) {
|
|
|
|
shards := rrss.shards
|
2022-12-06 00:15:00 +01:00
|
|
|
shardsLen := uint32(len(shards))
|
|
|
|
for len(rows) > 0 {
|
|
|
|
n := atomic.AddUint32(&rrss.shardIdx, 1)
|
|
|
|
idx := n % shardsLen
|
|
|
|
rows = shards[idx].addRows(pt, rows)
|
|
|
|
}
|
2019-12-19 17:12:02 +01:00
|
|
|
}
|
|
|
|
|
2021-04-27 14:36:31 +02:00
|
|
|
func (rrss *rawRowsShards) Len() int {
|
2019-12-19 17:12:02 +01:00
|
|
|
n := 0
|
2021-04-27 14:36:31 +02:00
|
|
|
for i := range rrss.shards[:] {
|
|
|
|
n += rrss.shards[i].Len()
|
2019-12-19 17:12:02 +01:00
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2022-10-20 15:17:09 +02:00
|
|
|
type rawRowsShardNopad struct {
|
|
|
|
// Put lastFlushTime to the top in order to avoid unaligned memory access on 32-bit architectures
|
2020-05-14 21:01:51 +02:00
|
|
|
lastFlushTime uint64
|
2022-10-20 15:17:09 +02:00
|
|
|
|
|
|
|
mu sync.Mutex
|
|
|
|
rows []rawRow
|
|
|
|
}
|
|
|
|
|
|
|
|
type rawRowsShard struct {
|
|
|
|
rawRowsShardNopad
|
|
|
|
|
|
|
|
// The padding prevents false sharing on widespread platforms with
|
|
|
|
// 128 mod (cache line size) = 0 .
|
|
|
|
_ [128 - unsafe.Sizeof(rawRowsShardNopad{})%128]byte
|
2019-12-19 17:12:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (rrs *rawRowsShard) Len() int {
|
2021-04-27 14:36:31 +02:00
|
|
|
rrs.mu.Lock()
|
2019-12-19 17:12:02 +01:00
|
|
|
n := len(rrs.rows)
|
2021-04-27 14:36:31 +02:00
|
|
|
rrs.mu.Unlock()
|
2019-12-19 17:12:02 +01:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func (rrs *rawRowsShard) addRows(pt *partition, rows []rawRow) []rawRow {
|
|
|
|
var rrb *rawRowsBlock
|
2019-12-19 17:12:02 +01:00
|
|
|
|
2021-04-27 14:36:31 +02:00
|
|
|
rrs.mu.Lock()
|
2019-12-19 17:12:02 +01:00
|
|
|
if cap(rrs.rows) == 0 {
|
2023-01-18 09:01:03 +01:00
|
|
|
rrs.rows = newRawRows()
|
2019-12-19 17:12:02 +01:00
|
|
|
}
|
2022-10-21 13:39:27 +02:00
|
|
|
n := copy(rrs.rows[len(rrs.rows):cap(rrs.rows)], rows)
|
|
|
|
rrs.rows = rrs.rows[:len(rrs.rows)+n]
|
|
|
|
rows = rows[n:]
|
|
|
|
if len(rows) > 0 {
|
2022-12-06 00:15:00 +01:00
|
|
|
rrb = getRawRowsBlock()
|
|
|
|
rrb.rows, rrs.rows = rrs.rows, rrb.rows
|
|
|
|
n = copy(rrs.rows[:cap(rrs.rows)], rows)
|
|
|
|
rrs.rows = rrs.rows[:n]
|
|
|
|
rows = rows[n:]
|
2022-10-17 17:01:26 +02:00
|
|
|
atomic.StoreUint64(&rrs.lastFlushTime, fasttime.UnixTimestamp())
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2021-04-27 14:36:31 +02:00
|
|
|
rrs.mu.Unlock()
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
if rrb != nil {
|
|
|
|
pt.flushRowsToParts(rrb.rows)
|
|
|
|
putRawRowsBlock(rrb)
|
2023-01-18 09:20:56 +01:00
|
|
|
|
|
|
|
// Run assisted merges if needed.
|
|
|
|
flushConcurrencyCh <- struct{}{}
|
|
|
|
pt.assistedMergeForInmemoryParts()
|
|
|
|
pt.assistedMergeForSmallParts()
|
|
|
|
// There is no need in assisted merges for big parts,
|
|
|
|
// since the bottleneck is possible only at inmemory and small parts.
|
|
|
|
<-flushConcurrencyCh
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return rows
|
|
|
|
}
|
|
|
|
|
|
|
|
type rawRowsBlock struct {
|
|
|
|
rows []rawRow
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-01-18 09:01:03 +01:00
|
|
|
func newRawRows() []rawRow {
|
2022-10-21 13:46:06 +02:00
|
|
|
n := getMaxRawRowsPerShard()
|
|
|
|
return make([]rawRow, 0, n)
|
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func getRawRowsBlock() *rawRowsBlock {
|
|
|
|
v := rawRowsBlockPool.Get()
|
|
|
|
if v == nil {
|
|
|
|
return &rawRowsBlock{
|
2023-01-18 09:01:03 +01:00
|
|
|
rows: newRawRows(),
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return v.(*rawRowsBlock)
|
|
|
|
}
|
|
|
|
|
|
|
|
func putRawRowsBlock(rrb *rawRowsBlock) {
|
|
|
|
rrb.rows = rrb.rows[:0]
|
|
|
|
rawRowsBlockPool.Put(rrb)
|
|
|
|
}
|
|
|
|
|
|
|
|
var rawRowsBlockPool sync.Pool
|
|
|
|
|
2021-06-17 12:42:32 +02:00
|
|
|
func (pt *partition) flushRowsToParts(rows []rawRow) {
|
2022-12-06 00:15:00 +01:00
|
|
|
if len(rows) == 0 {
|
|
|
|
return
|
|
|
|
}
|
2021-06-17 12:42:32 +02:00
|
|
|
maxRows := getMaxRawRowsPerShard()
|
2022-12-06 00:15:00 +01:00
|
|
|
var pwsLock sync.Mutex
|
|
|
|
pws := make([]*partWrapper, 0, (len(rows)+maxRows-1)/maxRows)
|
2022-04-06 12:34:00 +02:00
|
|
|
wg := getWaitGroup()
|
2021-06-17 12:42:32 +02:00
|
|
|
for len(rows) > 0 {
|
|
|
|
n := maxRows
|
|
|
|
if n > len(rows) {
|
|
|
|
n = len(rows)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2021-06-17 12:42:32 +02:00
|
|
|
wg.Add(1)
|
2022-12-06 00:15:00 +01:00
|
|
|
flushConcurrencyCh <- struct{}{}
|
|
|
|
go func(rowsChunk []rawRow) {
|
|
|
|
defer func() {
|
|
|
|
<-flushConcurrencyCh
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
pw := pt.createInmemoryPart(rowsChunk)
|
|
|
|
if pw == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
pwsLock.Lock()
|
|
|
|
pws = append(pws, pw)
|
|
|
|
pwsLock.Unlock()
|
2021-06-17 12:42:32 +02:00
|
|
|
}(rows[:n])
|
|
|
|
rows = rows[n:]
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2021-06-17 12:42:32 +02:00
|
|
|
wg.Wait()
|
2022-04-06 12:34:00 +02:00
|
|
|
putWaitGroup(wg)
|
2022-12-06 00:15:00 +01:00
|
|
|
|
|
|
|
pt.partsLock.Lock()
|
|
|
|
pt.inmemoryParts = append(pt.inmemoryParts, pws...)
|
2023-01-18 10:09:03 +01:00
|
|
|
for range pws {
|
|
|
|
if !pt.notifyBackgroundMergers() {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
pt.partsLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2023-01-18 10:09:03 +01:00
|
|
|
func (pt *partition) notifyBackgroundMergers() bool {
|
|
|
|
select {
|
|
|
|
case pt.needMergeCh <- struct{}{}:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-11 21:06:18 +01:00
|
|
|
var flushConcurrencyLimit = func() int {
|
|
|
|
n := cgroup.AvailableCPUs()
|
|
|
|
if n < 3 {
|
|
|
|
// Allow at least 3 concurrent flushers on systems with a single CPU core
|
|
|
|
// in order to guarantee that in-memory data flushes and background merges can be continued
|
|
|
|
// when a single flusher is busy with the long merge of big parts,
|
|
|
|
// while another flusher is busy with the long merge of small parts.
|
|
|
|
n = 3
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}()
|
|
|
|
|
|
|
|
var flushConcurrencyCh = make(chan struct{}, flushConcurrencyLimit)
|
2022-12-06 00:15:00 +01:00
|
|
|
|
2022-12-28 23:32:18 +01:00
|
|
|
func needAssistedMerge(pws []*partWrapper, maxParts int) bool {
|
|
|
|
if len(pws) < maxParts {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return getNotInMergePartsCount(pws) >= defaultPartsToMerge
|
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func (pt *partition) assistedMergeForInmemoryParts() {
|
2023-10-01 22:17:38 +02:00
|
|
|
pt.partsLock.Lock()
|
|
|
|
needMerge := needAssistedMerge(pt.inmemoryParts, maxInmemoryPartsPerPartition)
|
|
|
|
pt.partsLock.Unlock()
|
|
|
|
if !needMerge {
|
|
|
|
return
|
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
|
2023-10-01 22:17:38 +02:00
|
|
|
atomic.AddUint64(&pt.inmemoryAssistedMerges, 1)
|
|
|
|
err := pt.mergeInmemoryParts()
|
|
|
|
if err == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) {
|
|
|
|
return
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
2023-10-01 22:17:38 +02:00
|
|
|
logger.Panicf("FATAL: cannot merge inmemory parts: %s", err)
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
|
|
|
|
2022-12-13 01:49:21 +01:00
|
|
|
func (pt *partition) assistedMergeForSmallParts() {
|
2023-10-01 22:17:38 +02:00
|
|
|
pt.partsLock.Lock()
|
|
|
|
needMerge := needAssistedMerge(pt.smallParts, maxSmallPartsPerPartition)
|
|
|
|
pt.partsLock.Unlock()
|
|
|
|
if !needMerge {
|
|
|
|
return
|
|
|
|
}
|
2022-12-13 01:49:21 +01:00
|
|
|
|
2023-10-01 22:17:38 +02:00
|
|
|
atomic.AddUint64(&pt.smallAssistedMerges, 1)
|
|
|
|
err := pt.mergeExistingParts(false)
|
|
|
|
if err == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if errors.Is(err, errNothingToMerge) || errors.Is(err, errForciblyStopped) || errors.Is(err, errReadOnlyMode) {
|
|
|
|
return
|
2022-12-13 01:49:21 +01:00
|
|
|
}
|
2023-10-01 22:17:38 +02:00
|
|
|
logger.Panicf("FATAL: cannot merge small parts: %s", err)
|
2022-12-13 01:49:21 +01:00
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func getNotInMergePartsCount(pws []*partWrapper) int {
|
|
|
|
n := 0
|
|
|
|
for _, pw := range pws {
|
|
|
|
if !pw.isInMerge {
|
|
|
|
n++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return n
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2022-04-06 12:34:00 +02:00
|
|
|
func getWaitGroup() *sync.WaitGroup {
|
|
|
|
v := wgPool.Get()
|
|
|
|
if v == nil {
|
|
|
|
return &sync.WaitGroup{}
|
|
|
|
}
|
|
|
|
return v.(*sync.WaitGroup)
|
|
|
|
}
|
|
|
|
|
|
|
|
func putWaitGroup(wg *sync.WaitGroup) {
|
|
|
|
wgPool.Put(wg)
|
|
|
|
}
|
|
|
|
|
|
|
|
var wgPool sync.Pool
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func (pt *partition) createInmemoryPart(rows []rawRow) *partWrapper {
|
2019-05-22 23:16:55 +02:00
|
|
|
if len(rows) == 0 {
|
2022-12-06 00:15:00 +01:00
|
|
|
return nil
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
mp := getInmemoryPart()
|
|
|
|
mp.InitFromRows(rows)
|
|
|
|
|
|
|
|
// Make sure the part may be added.
|
|
|
|
if mp.ph.MinTimestamp > mp.ph.MaxTimestamp {
|
|
|
|
logger.Panicf("BUG: the part %q cannot be added to partition %q because its MinTimestamp exceeds MaxTimestamp; %d vs %d",
|
|
|
|
&mp.ph, pt.smallPartsPath, mp.ph.MinTimestamp, mp.ph.MaxTimestamp)
|
|
|
|
}
|
|
|
|
if mp.ph.MinTimestamp < pt.tr.MinTimestamp {
|
|
|
|
logger.Panicf("BUG: the part %q cannot be added to partition %q because of too small MinTimestamp; got %d; want at least %d",
|
|
|
|
&mp.ph, pt.smallPartsPath, mp.ph.MinTimestamp, pt.tr.MinTimestamp)
|
|
|
|
}
|
|
|
|
if mp.ph.MaxTimestamp > pt.tr.MaxTimestamp {
|
|
|
|
logger.Panicf("BUG: the part %q cannot be added to partition %q because of too big MaxTimestamp; got %d; want at least %d",
|
|
|
|
&mp.ph, pt.smallPartsPath, mp.ph.MaxTimestamp, pt.tr.MaxTimestamp)
|
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
flushToDiskDeadline := time.Now().Add(dataFlushInterval)
|
|
|
|
return newPartWrapperFromInmemoryPart(mp, flushToDiskDeadline)
|
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func newPartWrapperFromInmemoryPart(mp *inmemoryPart, flushToDiskDeadline time.Time) *partWrapper {
|
2023-04-15 00:46:09 +02:00
|
|
|
p := mp.NewPart()
|
2019-05-22 23:16:55 +02:00
|
|
|
pw := &partWrapper{
|
2022-12-06 00:15:00 +01:00
|
|
|
p: p,
|
|
|
|
mp: mp,
|
|
|
|
refCount: 1,
|
|
|
|
flushToDiskDeadline: flushToDiskDeadline,
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
return pw
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// HasTimestamp returns true if the pt contains the given timestamp.
|
|
|
|
func (pt *partition) HasTimestamp(timestamp int64) bool {
|
|
|
|
return timestamp >= pt.tr.MinTimestamp && timestamp <= pt.tr.MaxTimestamp
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetParts appends parts snapshot to dst and returns it.
|
|
|
|
//
|
|
|
|
// The appended parts must be released with PutParts.
|
2023-02-01 18:54:21 +01:00
|
|
|
func (pt *partition) GetParts(dst []*partWrapper, addInMemory bool) []*partWrapper {
|
2019-05-22 23:16:55 +02:00
|
|
|
pt.partsLock.Lock()
|
2023-02-01 18:54:21 +01:00
|
|
|
if addInMemory {
|
2023-03-19 09:36:05 +01:00
|
|
|
incRefForParts(pt.inmemoryParts)
|
2023-02-01 18:54:21 +01:00
|
|
|
dst = append(dst, pt.inmemoryParts...)
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
2023-03-19 09:36:05 +01:00
|
|
|
incRefForParts(pt.smallParts)
|
2019-05-22 23:16:55 +02:00
|
|
|
dst = append(dst, pt.smallParts...)
|
2023-03-19 09:36:05 +01:00
|
|
|
incRefForParts(pt.bigParts)
|
2019-05-22 23:16:55 +02:00
|
|
|
dst = append(dst, pt.bigParts...)
|
|
|
|
pt.partsLock.Unlock()
|
|
|
|
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
// PutParts releases the given pws obtained via GetParts.
|
|
|
|
func (pt *partition) PutParts(pws []*partWrapper) {
|
|
|
|
for _, pw := range pws {
|
|
|
|
pw.decRef()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
func incRefForParts(pws []*partWrapper) {
|
|
|
|
for _, pw := range pws {
|
|
|
|
pw.incRef()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
// MustClose closes the pt, so the app may safely exit.
|
|
|
|
//
|
|
|
|
// The pt must be detached from table before calling pt.MustClose.
|
|
|
|
func (pt *partition) MustClose() {
|
|
|
|
close(pt.stopCh)
|
|
|
|
|
2023-05-17 00:14:18 +02:00
|
|
|
// Waiting for service workers to stop
|
2022-12-04 08:03:05 +01:00
|
|
|
pt.wg.Wait()
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
pt.flushInmemoryRows()
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
// Remove references from inmemoryParts, smallParts and bigParts, so they may be eventually closed
|
2019-05-25 20:51:11 +02:00
|
|
|
// after all the searches are done.
|
2019-05-22 23:16:55 +02:00
|
|
|
pt.partsLock.Lock()
|
2022-12-06 00:15:00 +01:00
|
|
|
inmemoryParts := pt.inmemoryParts
|
2019-05-22 23:16:55 +02:00
|
|
|
smallParts := pt.smallParts
|
2022-12-06 00:15:00 +01:00
|
|
|
bigParts := pt.bigParts
|
|
|
|
pt.inmemoryParts = nil
|
2019-05-22 23:16:55 +02:00
|
|
|
pt.smallParts = nil
|
2022-12-06 00:15:00 +01:00
|
|
|
pt.bigParts = nil
|
2019-05-22 23:16:55 +02:00
|
|
|
pt.partsLock.Unlock()
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
for _, pw := range inmemoryParts {
|
|
|
|
pw.decRef()
|
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
for _, pw := range smallParts {
|
|
|
|
pw.decRef()
|
|
|
|
}
|
|
|
|
for _, pw := range bigParts {
|
|
|
|
pw.decRef()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func (pt *partition) startInmemoryPartsFlusher() {
|
|
|
|
pt.wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
pt.inmemoryPartsFlusher()
|
|
|
|
pt.wg.Done()
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pt *partition) startPendingRowsFlusher() {
|
2022-12-04 08:03:05 +01:00
|
|
|
pt.wg.Add(1)
|
2019-05-22 23:16:55 +02:00
|
|
|
go func() {
|
2022-12-06 00:15:00 +01:00
|
|
|
pt.pendingRowsFlusher()
|
2022-12-04 08:03:05 +01:00
|
|
|
pt.wg.Done()
|
2019-05-22 23:16:55 +02:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func (pt *partition) inmemoryPartsFlusher() {
|
|
|
|
ticker := time.NewTicker(dataFlushInterval)
|
|
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-pt.stopCh:
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
pt.flushInmemoryParts(false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pt *partition) pendingRowsFlusher() {
|
|
|
|
ticker := time.NewTicker(pendingRowsFlushInterval)
|
2020-02-13 11:55:58 +01:00
|
|
|
defer ticker.Stop()
|
2022-12-06 00:15:00 +01:00
|
|
|
var rows []rawRow
|
2019-05-22 23:16:55 +02:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-pt.stopCh:
|
|
|
|
return
|
2020-02-13 11:55:58 +01:00
|
|
|
case <-ticker.C:
|
2022-12-06 00:15:00 +01:00
|
|
|
rows = pt.flushPendingRows(rows[:0], false)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2019-12-19 17:12:02 +01:00
|
|
|
}
|
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func (pt *partition) flushPendingRows(dst []rawRow, isFinal bool) []rawRow {
|
|
|
|
return pt.rawRows.flush(pt, dst, isFinal)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pt *partition) flushInmemoryRows() {
|
|
|
|
pt.rawRows.flush(pt, nil, true)
|
|
|
|
pt.flushInmemoryParts(true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pt *partition) flushInmemoryParts(isFinal bool) {
|
2023-03-19 08:10:24 +01:00
|
|
|
currentTime := time.Now()
|
|
|
|
var pws []*partWrapper
|
2022-12-06 00:15:00 +01:00
|
|
|
|
2023-03-19 08:10:24 +01:00
|
|
|
pt.partsLock.Lock()
|
|
|
|
for _, pw := range pt.inmemoryParts {
|
|
|
|
if !pw.isInMerge && (isFinal || pw.flushToDiskDeadline.Before(currentTime)) {
|
|
|
|
pw.isInMerge = true
|
|
|
|
pws = append(pws, pw)
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
2023-03-19 08:10:24 +01:00
|
|
|
}
|
|
|
|
pt.partsLock.Unlock()
|
2022-12-06 00:15:00 +01:00
|
|
|
|
2023-03-19 08:10:24 +01:00
|
|
|
if err := pt.mergePartsOptimal(pws, nil); err != nil {
|
|
|
|
logger.Panicf("FATAL: cannot merge in-memory parts: %s", err)
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
2019-12-19 17:12:02 +01:00
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func (rrss *rawRowsShards) flush(pt *partition, dst []rawRow, isFinal bool) []rawRow {
|
2021-04-27 14:36:31 +02:00
|
|
|
for i := range rrss.shards {
|
2023-09-01 09:34:16 +02:00
|
|
|
dst = rrss.shards[i].appendRawRowsToFlush(dst, isFinal)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
pt.flushRowsToParts(dst)
|
|
|
|
return dst
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-09-01 09:34:16 +02:00
|
|
|
func (rrs *rawRowsShard) appendRawRowsToFlush(dst []rawRow, isFinal bool) []rawRow {
|
2020-05-14 21:01:51 +02:00
|
|
|
currentTime := fasttime.UnixTimestamp()
|
2022-12-06 00:15:00 +01:00
|
|
|
flushSeconds := int64(pendingRowsFlushInterval.Seconds())
|
2020-05-14 21:01:51 +02:00
|
|
|
if flushSeconds <= 0 {
|
|
|
|
flushSeconds = 1
|
|
|
|
}
|
2022-10-17 17:01:26 +02:00
|
|
|
lastFlushTime := atomic.LoadUint64(&rrs.lastFlushTime)
|
2022-12-06 00:15:00 +01:00
|
|
|
if !isFinal && currentTime < lastFlushTime+uint64(flushSeconds) {
|
2022-10-21 13:33:03 +02:00
|
|
|
// Fast path - nothing to flush
|
|
|
|
return dst
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-10-21 13:33:03 +02:00
|
|
|
// Slow path - move rrs.rows to dst.
|
|
|
|
rrs.mu.Lock()
|
|
|
|
dst = append(dst, rrs.rows...)
|
|
|
|
rrs.rows = rrs.rows[:0]
|
|
|
|
atomic.StoreUint64(&rrs.lastFlushTime, currentTime)
|
|
|
|
rrs.mu.Unlock()
|
2021-06-17 12:42:32 +02:00
|
|
|
return dst
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func (pt *partition) mergePartsOptimal(pws []*partWrapper, stopCh <-chan struct{}) error {
|
|
|
|
sortPartsForOptimalMerge(pws)
|
|
|
|
for len(pws) > 0 {
|
|
|
|
n := defaultPartsToMerge
|
|
|
|
if n > len(pws) {
|
|
|
|
n = len(pws)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
pwsChunk := pws[:n]
|
|
|
|
pws = pws[n:]
|
|
|
|
err := pt.mergeParts(pwsChunk, stopCh, true)
|
|
|
|
if err == nil {
|
2019-05-22 23:16:55 +02:00
|
|
|
continue
|
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
pt.releasePartsToMerge(pws)
|
|
|
|
if errors.Is(err, errForciblyStopped) {
|
|
|
|
return nil
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
return fmt.Errorf("cannot merge parts optimally: %w", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
// ForceMergeAllParts runs merge for all the parts in pt.
|
2020-09-17 11:01:53 +02:00
|
|
|
func (pt *partition) ForceMergeAllParts() error {
|
2022-12-06 00:15:00 +01:00
|
|
|
pws := pt.getAllPartsForMerge()
|
2020-09-17 11:01:53 +02:00
|
|
|
if len(pws) == 0 {
|
|
|
|
// Nothing to merge.
|
|
|
|
return nil
|
|
|
|
}
|
2021-12-15 14:58:27 +01:00
|
|
|
|
2023-09-15 19:04:54 +02:00
|
|
|
// Check whether there is enough disk space for merging pws.
|
|
|
|
newPartSize := getPartsSize(pws)
|
|
|
|
maxOutBytes := fs.MustGetFreeSpace(pt.bigPartsPath)
|
|
|
|
if newPartSize > maxOutBytes {
|
|
|
|
freeSpaceNeededBytes := newPartSize - maxOutBytes
|
|
|
|
forceMergeLogger.Warnf("cannot initiate force merge for the partition %s; additional space needed: %d bytes", pt.name, freeSpaceNeededBytes)
|
|
|
|
pt.releasePartsToMerge(pws)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// If len(pws) == 1, then the merge must run anyway.
|
|
|
|
// This allows applying the configured retention, removing the deleted series
|
|
|
|
// and performing de-duplication if needed.
|
|
|
|
if err := pt.mergePartsOptimal(pws, pt.stopCh); err != nil {
|
|
|
|
return fmt.Errorf("cannot force merge %d parts from partition %q: %w", len(pws), pt.name, err)
|
2020-09-17 11:01:53 +02:00
|
|
|
}
|
2023-09-15 19:04:54 +02:00
|
|
|
|
|
|
|
return nil
|
2020-09-17 11:01:53 +02:00
|
|
|
}
|
|
|
|
|
2022-06-27 11:31:16 +02:00
|
|
|
var forceMergeLogger = logger.WithThrottler("forceMerge", time.Minute)
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func (pt *partition) getAllPartsForMerge() []*partWrapper {
|
|
|
|
var pws []*partWrapper
|
|
|
|
pt.partsLock.Lock()
|
|
|
|
if !hasActiveMerges(pt.inmemoryParts) && !hasActiveMerges(pt.smallParts) && !hasActiveMerges(pt.bigParts) {
|
|
|
|
pws = appendAllPartsForMerge(pws, pt.inmemoryParts)
|
|
|
|
pws = appendAllPartsForMerge(pws, pt.smallParts)
|
|
|
|
pws = appendAllPartsForMerge(pws, pt.bigParts)
|
|
|
|
}
|
|
|
|
pt.partsLock.Unlock()
|
|
|
|
return pws
|
|
|
|
}
|
|
|
|
|
|
|
|
func appendAllPartsForMerge(dst, src []*partWrapper) []*partWrapper {
|
2020-09-17 11:01:53 +02:00
|
|
|
for _, pw := range src {
|
|
|
|
if pw.isInMerge {
|
|
|
|
logger.Panicf("BUG: part %q is already in merge", pw.p.path)
|
|
|
|
}
|
|
|
|
pw.isInMerge = true
|
|
|
|
dst = append(dst, pw)
|
|
|
|
}
|
|
|
|
return dst
|
|
|
|
}
|
|
|
|
|
|
|
|
func hasActiveMerges(pws []*partWrapper) bool {
|
|
|
|
for _, pw := range pws {
|
|
|
|
if pw.isInMerge {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2023-04-14 08:36:06 +02:00
|
|
|
var mergeWorkersLimitCh = make(chan struct{}, getDefaultMergeConcurrency(16))
|
2022-12-06 00:15:00 +01:00
|
|
|
|
2022-06-07 13:55:09 +02:00
|
|
|
func getDefaultMergeConcurrency(max int) int {
|
|
|
|
v := (cgroup.AvailableCPUs() + 1) / 2
|
|
|
|
if v > max {
|
|
|
|
v = max
|
|
|
|
}
|
2023-04-14 08:36:06 +02:00
|
|
|
return adjustMergeWorkersLimit(v)
|
2022-06-07 13:55:09 +02:00
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
// SetMergeWorkersCount sets the maximum number of concurrent mergers for parts.
|
2019-10-31 15:16:53 +01:00
|
|
|
//
|
|
|
|
// The function must be called before opening or creating any storage.
|
2022-12-06 00:15:00 +01:00
|
|
|
func SetMergeWorkersCount(n int) {
|
2019-10-31 15:16:53 +01:00
|
|
|
if n <= 0 {
|
|
|
|
// Do nothing
|
|
|
|
return
|
|
|
|
}
|
2023-02-11 21:06:18 +01:00
|
|
|
n = adjustMergeWorkersLimit(n)
|
2022-12-06 00:15:00 +01:00
|
|
|
mergeWorkersLimitCh = make(chan struct{}, n)
|
2019-10-31 15:16:53 +01:00
|
|
|
}
|
2019-10-29 11:45:19 +01:00
|
|
|
|
2023-02-11 21:06:18 +01:00
|
|
|
func adjustMergeWorkersLimit(n int) int {
|
2023-04-14 08:36:06 +02:00
|
|
|
if n < 4 {
|
|
|
|
// Allow at least 4 merge workers on systems with small CPUs count
|
2023-02-11 21:06:18 +01:00
|
|
|
// in order to guarantee that background merges can be continued
|
2023-04-14 08:36:06 +02:00
|
|
|
// when multiple workers are busy with big merges.
|
|
|
|
n = 4
|
2023-02-11 21:06:18 +01:00
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
func (pt *partition) startMergeWorkers() {
|
2022-12-13 01:49:21 +01:00
|
|
|
// The actual number of concurrent merges is limited inside mergeWorker() below.
|
2023-04-14 08:36:06 +02:00
|
|
|
for i := 0; i < cap(mergeWorkersLimitCh); i++ {
|
2022-12-04 08:03:05 +01:00
|
|
|
pt.wg.Add(1)
|
2019-05-22 23:16:55 +02:00
|
|
|
go func() {
|
2022-12-06 00:15:00 +01:00
|
|
|
pt.mergeWorker()
|
2022-12-04 08:03:05 +01:00
|
|
|
pt.wg.Done()
|
2019-05-22 23:16:55 +02:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func (pt *partition) mergeWorker() {
|
2020-05-14 21:01:51 +02:00
|
|
|
var lastMergeTime uint64
|
2019-05-22 23:16:55 +02:00
|
|
|
isFinal := false
|
|
|
|
for {
|
2022-12-13 01:49:21 +01:00
|
|
|
// Limit the number of concurrent calls to mergeExistingParts, since the total number of merge workers
|
|
|
|
// across partitions may exceed the the cap(mergeWorkersLimitCh).
|
2022-12-06 00:15:00 +01:00
|
|
|
mergeWorkersLimitCh <- struct{}{}
|
|
|
|
err := pt.mergeExistingParts(isFinal)
|
|
|
|
<-mergeWorkersLimitCh
|
2019-05-22 23:16:55 +02:00
|
|
|
if err == nil {
|
|
|
|
// Try merging additional parts.
|
2020-05-14 21:01:51 +02:00
|
|
|
lastMergeTime = fasttime.UnixTimestamp()
|
2019-05-22 23:16:55 +02:00
|
|
|
isFinal = false
|
|
|
|
continue
|
|
|
|
}
|
2020-09-17 02:02:35 +02:00
|
|
|
if errors.Is(err, errForciblyStopped) {
|
2019-05-22 23:16:55 +02:00
|
|
|
// The merger has been stopped.
|
2022-12-06 00:15:00 +01:00
|
|
|
return
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-08-09 11:17:00 +02:00
|
|
|
if !errors.Is(err, errNothingToMerge) && !errors.Is(err, errReadOnlyMode) {
|
2022-12-06 00:15:00 +01:00
|
|
|
// Unexpected error.
|
|
|
|
logger.Panicf("FATAL: unrecoverable error when merging parts in the partition (%q, %q): %s", pt.smallPartsPath, pt.bigPartsPath, err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2021-01-07 23:09:00 +01:00
|
|
|
if finalMergeDelaySeconds > 0 && fasttime.UnixTimestamp()-lastMergeTime > finalMergeDelaySeconds {
|
2019-05-22 23:16:55 +02:00
|
|
|
// We have free time for merging into bigger parts.
|
|
|
|
// This should improve select performance.
|
2020-05-14 21:01:51 +02:00
|
|
|
lastMergeTime = fasttime.UnixTimestamp()
|
2019-05-22 23:16:55 +02:00
|
|
|
isFinal = true
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-01-18 10:09:03 +01:00
|
|
|
// Nothing to merge. Wait for the notification of new merge.
|
2019-05-22 23:16:55 +02:00
|
|
|
select {
|
|
|
|
case <-pt.stopCh:
|
2022-12-06 00:15:00 +01:00
|
|
|
return
|
2023-01-18 10:09:03 +01:00
|
|
|
case <-pt.needMergeCh:
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-07 23:09:00 +01:00
|
|
|
// Disable final merge by default, since it may lead to high disk IO and CPU usage
|
|
|
|
// at the beginning of every month when merging data for the previous month.
|
|
|
|
var finalMergeDelaySeconds = uint64(0)
|
2020-10-07 16:35:42 +02:00
|
|
|
|
|
|
|
// SetFinalMergeDelay sets the delay before doing final merge for partitions without newly ingested data.
|
|
|
|
//
|
|
|
|
// This function may be called only before Storage initialization.
|
|
|
|
func SetFinalMergeDelay(delay time.Duration) {
|
2021-01-07 23:09:00 +01:00
|
|
|
if delay <= 0 {
|
2020-10-07 16:35:42 +02:00
|
|
|
return
|
|
|
|
}
|
2021-01-07 23:09:00 +01:00
|
|
|
finalMergeDelaySeconds = uint64(delay.Seconds() + 1)
|
2022-12-06 00:15:00 +01:00
|
|
|
mergeset.SetFinalMergeDelay(delay)
|
|
|
|
}
|
|
|
|
|
|
|
|
func getMaxInmemoryPartSize() uint64 {
|
|
|
|
// Allocate 10% of allowed memory for in-memory parts.
|
|
|
|
n := uint64(0.1 * float64(memory.Allowed()) / maxInmemoryPartsPerPartition)
|
|
|
|
if n < 1e6 {
|
|
|
|
n = 1e6
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pt *partition) getMaxSmallPartSize() uint64 {
|
|
|
|
// Small parts are cached in the OS page cache,
|
|
|
|
// so limit their size by the remaining free RAM.
|
|
|
|
mem := memory.Remaining()
|
|
|
|
// It is expected no more than defaultPartsToMerge/2 parts exist
|
|
|
|
// in the OS page cache before they are merged into bigger part.
|
|
|
|
// Half of the remaining RAM must be left for lib/mergeset parts,
|
|
|
|
// so the maxItems is calculated using the below code:
|
|
|
|
n := uint64(mem) / defaultPartsToMerge
|
|
|
|
if n < 10e6 {
|
|
|
|
n = 10e6
|
|
|
|
}
|
|
|
|
// Make sure the output part fits available disk space for small parts.
|
|
|
|
sizeLimit := getMaxOutBytes(pt.smallPartsPath, cap(mergeWorkersLimitCh))
|
|
|
|
if n > sizeLimit {
|
|
|
|
n = sizeLimit
|
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pt *partition) getMaxBigPartSize() uint64 {
|
2023-09-25 17:15:43 +02:00
|
|
|
// Always use 4 workers for big merges due to historical reasons.
|
|
|
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4915#issuecomment-1733922830
|
|
|
|
workersCount := 4
|
2023-04-14 05:33:33 +02:00
|
|
|
return getMaxOutBytes(pt.bigPartsPath, workersCount)
|
2020-10-07 16:35:42 +02:00
|
|
|
}
|
|
|
|
|
2021-08-25 08:35:03 +02:00
|
|
|
func getMaxOutBytes(path string, workersCount int) uint64 {
|
|
|
|
n := fs.MustGetFreeSpace(path)
|
2022-12-13 01:49:21 +01:00
|
|
|
// Do not subtract freeDiskSpaceLimitBytes from n before calculating the maxOutBytes,
|
2021-12-01 09:56:21 +01:00
|
|
|
// since this will result in sub-optimal merges - e.g. many small parts will be left unmerged.
|
|
|
|
|
2022-12-13 01:49:21 +01:00
|
|
|
// Divide free space by the max number of concurrent merges.
|
2021-08-25 08:35:03 +02:00
|
|
|
maxOutBytes := n / uint64(workersCount)
|
|
|
|
if maxOutBytes > maxBigPartSize {
|
|
|
|
maxOutBytes = maxBigPartSize
|
2019-08-25 13:10:43 +02:00
|
|
|
}
|
2021-08-25 08:35:03 +02:00
|
|
|
return maxOutBytes
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2022-06-01 13:21:12 +02:00
|
|
|
func (pt *partition) canBackgroundMerge() bool {
|
2022-10-24 00:30:50 +02:00
|
|
|
return atomic.LoadUint32(&pt.s.isReadOnly) == 0
|
2022-06-01 13:21:12 +02:00
|
|
|
}
|
|
|
|
|
2022-08-09 11:17:00 +02:00
|
|
|
var errReadOnlyMode = fmt.Errorf("storage is in readonly mode")
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func (pt *partition) mergeInmemoryParts() error {
|
|
|
|
maxOutBytes := pt.getMaxBigPartSize()
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
pt.partsLock.Lock()
|
2023-09-25 16:52:37 +02:00
|
|
|
pws := getPartsToMerge(pt.inmemoryParts, maxOutBytes, false)
|
2019-05-22 23:16:55 +02:00
|
|
|
pt.partsLock.Unlock()
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
return pt.mergeParts(pws, pt.stopCh, false)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func (pt *partition) mergeExistingParts(isFinal bool) error {
|
2022-06-01 13:21:12 +02:00
|
|
|
if !pt.canBackgroundMerge() {
|
|
|
|
// Do not perform merge in read-only mode, since this may result in disk space shortage.
|
|
|
|
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2603
|
2022-08-09 11:17:00 +02:00
|
|
|
return errReadOnlyMode
|
2022-06-01 13:21:12 +02:00
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
maxOutBytes := pt.getMaxBigPartSize()
|
2020-12-18 22:14:35 +01:00
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
pt.partsLock.Lock()
|
2022-12-06 00:15:00 +01:00
|
|
|
dst := make([]*partWrapper, 0, len(pt.inmemoryParts)+len(pt.smallParts)+len(pt.bigParts))
|
|
|
|
dst = append(dst, pt.inmemoryParts...)
|
|
|
|
dst = append(dst, pt.smallParts...)
|
|
|
|
dst = append(dst, pt.bigParts...)
|
2023-09-25 16:52:37 +02:00
|
|
|
pws := getPartsToMerge(dst, maxOutBytes, isFinal)
|
2019-05-22 23:16:55 +02:00
|
|
|
pt.partsLock.Unlock()
|
2020-12-18 22:14:35 +01:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
return pt.mergeParts(pws, pt.stopCh, isFinal)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-10-02 08:04:59 +02:00
|
|
|
func assertIsInMerge(pws []*partWrapper) {
|
|
|
|
for _, pw := range pws {
|
|
|
|
if !pw.isInMerge {
|
|
|
|
logger.Panicf("BUG: partWrapper.isInMerge unexpectedly set to false")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-18 22:14:35 +01:00
|
|
|
func (pt *partition) releasePartsToMerge(pws []*partWrapper) {
|
|
|
|
pt.partsLock.Lock()
|
|
|
|
for _, pw := range pws {
|
|
|
|
if !pw.isInMerge {
|
|
|
|
logger.Panicf("BUG: missing isInMerge flag on the part %q", pw.p.path)
|
|
|
|
}
|
|
|
|
pw.isInMerge = false
|
|
|
|
}
|
|
|
|
pt.partsLock.Unlock()
|
|
|
|
}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
var errNothingToMerge = fmt.Errorf("nothing to merge")
|
|
|
|
|
2021-12-15 14:58:27 +01:00
|
|
|
func (pt *partition) runFinalDedup() error {
|
2021-12-17 19:11:15 +01:00
|
|
|
requiredDedupInterval, actualDedupInterval := pt.getRequiredDedupInterval()
|
2021-12-15 14:58:27 +01:00
|
|
|
t := time.Now()
|
2021-12-17 19:11:15 +01:00
|
|
|
logger.Infof("starting final dedup for partition %s using requiredDedupInterval=%d ms, since the partition has smaller actualDedupInterval=%d ms",
|
|
|
|
pt.bigPartsPath, requiredDedupInterval, actualDedupInterval)
|
2021-12-15 14:58:27 +01:00
|
|
|
if err := pt.ForceMergeAllParts(); err != nil {
|
2021-12-17 19:11:15 +01:00
|
|
|
return fmt.Errorf("cannot perform final dedup for partition %s: %w", pt.bigPartsPath, err)
|
2021-12-15 14:58:27 +01:00
|
|
|
}
|
2021-12-17 19:11:15 +01:00
|
|
|
logger.Infof("final dedup for partition %s has been finished in %.3f seconds", pt.bigPartsPath, time.Since(t).Seconds())
|
2021-12-15 14:58:27 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-12-20 19:11:38 +01:00
|
|
|
func (pt *partition) isFinalDedupNeeded() bool {
|
|
|
|
requiredDedupInterval, actualDedupInterval := pt.getRequiredDedupInterval()
|
|
|
|
return requiredDedupInterval > actualDedupInterval
|
|
|
|
}
|
|
|
|
|
2021-12-17 19:11:15 +01:00
|
|
|
func (pt *partition) getRequiredDedupInterval() (int64, int64) {
|
2023-02-01 18:54:21 +01:00
|
|
|
pws := pt.GetParts(nil, false)
|
2021-12-15 14:58:27 +01:00
|
|
|
defer pt.PutParts(pws)
|
|
|
|
dedupInterval := GetDedupInterval()
|
|
|
|
minDedupInterval := getMinDedupInterval(pws)
|
2021-12-17 19:11:15 +01:00
|
|
|
return dedupInterval, minDedupInterval
|
|
|
|
}
|
|
|
|
|
|
|
|
func getMinDedupInterval(pws []*partWrapper) int64 {
|
|
|
|
if len(pws) == 0 {
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
dMin := pws[0].p.ph.MinDedupInterval
|
|
|
|
for _, pw := range pws[1:] {
|
|
|
|
d := pw.p.ph.MinDedupInterval
|
|
|
|
if d < dMin {
|
|
|
|
dMin = d
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return dMin
|
2021-12-15 14:58:27 +01:00
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
// mergeParts merges pws to a single resulting part.
|
2020-09-17 01:05:54 +02:00
|
|
|
//
|
|
|
|
// Merging is immediately stopped if stopCh is closed.
|
|
|
|
//
|
2022-12-06 00:15:00 +01:00
|
|
|
// if isFinal is set, then the resulting part will be saved to disk.
|
|
|
|
//
|
2020-09-17 01:05:54 +02:00
|
|
|
// All the parts inside pws must have isInMerge field set to true.
|
2023-10-02 08:04:59 +02:00
|
|
|
// The isInMerge field inside pws parts is set to false before returning from the function.
|
2022-12-06 00:15:00 +01:00
|
|
|
func (pt *partition) mergeParts(pws []*partWrapper, stopCh <-chan struct{}, isFinal bool) error {
|
2019-05-22 23:16:55 +02:00
|
|
|
if len(pws) == 0 {
|
|
|
|
// Nothing to merge.
|
|
|
|
return errNothingToMerge
|
|
|
|
}
|
|
|
|
|
2023-10-02 08:04:59 +02:00
|
|
|
assertIsInMerge(pws)
|
|
|
|
defer pt.releasePartsToMerge(pws)
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
startTime := time.Now()
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
// Initialize destination paths.
|
|
|
|
dstPartType := pt.getDstPartType(pws, isFinal)
|
2023-03-19 09:36:05 +01:00
|
|
|
mergeIdx := pt.nextMergeIdx()
|
|
|
|
dstPartPath := pt.getDstPartPath(dstPartType, mergeIdx)
|
2022-12-06 00:15:00 +01:00
|
|
|
|
2023-02-01 18:54:21 +01:00
|
|
|
if !isDedupEnabled() && isFinal && len(pws) == 1 && pws[0].mp != nil {
|
2022-12-06 00:15:00 +01:00
|
|
|
// Fast path: flush a single in-memory part to disk.
|
|
|
|
mp := pws[0].mp
|
2023-04-14 07:11:56 +02:00
|
|
|
mp.MustStoreToDisk(dstPartPath)
|
2023-03-19 09:36:05 +01:00
|
|
|
pwNew := pt.openCreatedPart(&mp.ph, pws, nil, dstPartPath)
|
2022-12-06 00:15:00 +01:00
|
|
|
pt.swapSrcWithDstParts(pws, pwNew, dstPartType)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
// Prepare BlockStreamReaders for source parts.
|
2023-04-15 00:46:09 +02:00
|
|
|
bsrs := mustOpenBlockStreamReaders(pws)
|
2022-12-06 00:15:00 +01:00
|
|
|
|
|
|
|
// Prepare BlockStreamWriter for destination part.
|
|
|
|
srcSize := uint64(0)
|
|
|
|
srcRowsCount := uint64(0)
|
|
|
|
srcBlocksCount := uint64(0)
|
2019-05-22 23:16:55 +02:00
|
|
|
for _, pw := range pws {
|
2022-12-06 00:15:00 +01:00
|
|
|
srcSize += pw.p.size
|
|
|
|
srcRowsCount += pw.p.ph.RowsCount
|
|
|
|
srcBlocksCount += pw.p.ph.BlocksCount
|
|
|
|
}
|
|
|
|
rowsPerBlock := float64(srcRowsCount) / float64(srcBlocksCount)
|
|
|
|
compressLevel := getCompressLevel(rowsPerBlock)
|
|
|
|
bsw := getBlockStreamWriter()
|
|
|
|
var mpNew *inmemoryPart
|
|
|
|
if dstPartType == partInmemory {
|
|
|
|
mpNew = getInmemoryPart()
|
2023-04-15 00:46:09 +02:00
|
|
|
bsw.MustInitFromInmemoryPart(mpNew, compressLevel)
|
2022-12-06 00:15:00 +01:00
|
|
|
} else {
|
2023-03-19 09:36:05 +01:00
|
|
|
if dstPartPath == "" {
|
|
|
|
logger.Panicf("BUG: dstPartPath must be non-empty")
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
|
|
|
nocache := dstPartType == partBig
|
2023-04-15 00:12:45 +02:00
|
|
|
bsw.MustInitFromFilePart(dstPartPath, nocache, compressLevel)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
// Merge source parts to destination part.
|
2023-03-19 09:36:05 +01:00
|
|
|
ph, err := pt.mergePartsInternal(dstPartPath, bsw, bsrs, dstPartType, stopCh)
|
2022-12-06 00:15:00 +01:00
|
|
|
putBlockStreamWriter(bsw)
|
2023-04-15 00:46:09 +02:00
|
|
|
for _, bsr := range bsrs {
|
|
|
|
putBlockStreamReader(bsr)
|
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
if err != nil {
|
2023-03-19 09:36:05 +01:00
|
|
|
return err
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
|
|
|
if mpNew != nil {
|
|
|
|
// Update partHeader for destination inmemory part after the merge.
|
|
|
|
mpNew.ph = *ph
|
2023-04-14 06:03:06 +02:00
|
|
|
} else {
|
|
|
|
// Make sure the created part directory listing is synced.
|
|
|
|
fs.MustSyncPath(dstPartPath)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
// Atomically swap the source parts with the newly created part.
|
|
|
|
pwNew := pt.openCreatedPart(ph, pws, mpNew, dstPartPath)
|
2022-12-06 00:15:00 +01:00
|
|
|
|
|
|
|
dstRowsCount := uint64(0)
|
|
|
|
dstBlocksCount := uint64(0)
|
|
|
|
dstSize := uint64(0)
|
|
|
|
if pwNew != nil {
|
|
|
|
pDst := pwNew.p
|
|
|
|
dstRowsCount = pDst.ph.RowsCount
|
|
|
|
dstBlocksCount = pDst.ph.BlocksCount
|
|
|
|
dstSize = pDst.size
|
|
|
|
}
|
2023-03-03 12:33:42 +01:00
|
|
|
|
|
|
|
pt.swapSrcWithDstParts(pws, pwNew, dstPartType)
|
|
|
|
|
|
|
|
d := time.Since(startTime)
|
|
|
|
if d <= 30*time.Second {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Log stats for long merges.
|
2022-12-06 00:15:00 +01:00
|
|
|
durationSecs := d.Seconds()
|
|
|
|
rowsPerSec := int(float64(srcRowsCount) / durationSecs)
|
|
|
|
logger.Infof("merged (%d parts, %d rows, %d blocks, %d bytes) into (1 part, %d rows, %d blocks, %d bytes) in %.3f seconds at %d rows/sec to %q",
|
|
|
|
len(pws), srcRowsCount, srcBlocksCount, srcSize, dstRowsCount, dstBlocksCount, dstSize, durationSecs, rowsPerSec, dstPartPath)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func getFlushToDiskDeadline(pws []*partWrapper) time.Time {
|
2023-04-14 08:17:10 +02:00
|
|
|
d := time.Now().Add(dataFlushInterval)
|
|
|
|
for _, pw := range pws {
|
|
|
|
if pw.mp != nil && pw.flushToDiskDeadline.Before(d) {
|
2022-12-06 00:15:00 +01:00
|
|
|
d = pw.flushToDiskDeadline
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return d
|
|
|
|
}
|
|
|
|
|
|
|
|
type partType int
|
|
|
|
|
|
|
|
var (
|
|
|
|
partInmemory = partType(0)
|
|
|
|
partSmall = partType(1)
|
|
|
|
partBig = partType(2)
|
|
|
|
)
|
|
|
|
|
|
|
|
func (pt *partition) getDstPartType(pws []*partWrapper, isFinal bool) partType {
|
|
|
|
dstPartSize := getPartsSize(pws)
|
|
|
|
if dstPartSize > pt.getMaxSmallPartSize() {
|
|
|
|
return partBig
|
|
|
|
}
|
|
|
|
if isFinal || dstPartSize > getMaxInmemoryPartSize() {
|
|
|
|
return partSmall
|
|
|
|
}
|
|
|
|
if !areAllInmemoryParts(pws) {
|
|
|
|
// If at least a single source part is located in file,
|
|
|
|
// then the destination part must be in file for durability reasons.
|
|
|
|
return partSmall
|
|
|
|
}
|
|
|
|
return partInmemory
|
|
|
|
}
|
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
func (pt *partition) getDstPartPath(dstPartType partType, mergeIdx uint64) string {
|
2022-12-06 00:15:00 +01:00
|
|
|
ptPath := ""
|
|
|
|
switch dstPartType {
|
|
|
|
case partSmall:
|
|
|
|
ptPath = pt.smallPartsPath
|
|
|
|
case partBig:
|
2019-05-22 23:16:55 +02:00
|
|
|
ptPath = pt.bigPartsPath
|
2022-12-06 00:15:00 +01:00
|
|
|
case partInmemory:
|
|
|
|
ptPath = pt.smallPartsPath
|
|
|
|
default:
|
|
|
|
logger.Panicf("BUG: unknown partType=%d", dstPartType)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-03-19 09:36:05 +01:00
|
|
|
dstPartPath := ""
|
2022-12-06 00:15:00 +01:00
|
|
|
if dstPartType != partInmemory {
|
2023-03-25 22:33:54 +01:00
|
|
|
dstPartPath = filepath.Join(ptPath, fmt.Sprintf("%016X", mergeIdx))
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-03-19 09:36:05 +01:00
|
|
|
return dstPartPath
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2023-04-15 00:46:09 +02:00
|
|
|
func mustOpenBlockStreamReaders(pws []*partWrapper) []*blockStreamReader {
|
2022-12-06 00:15:00 +01:00
|
|
|
bsrs := make([]*blockStreamReader, 0, len(pws))
|
|
|
|
for _, pw := range pws {
|
|
|
|
bsr := getBlockStreamReader()
|
|
|
|
if pw.mp != nil {
|
2023-04-15 00:46:09 +02:00
|
|
|
bsr.MustInitFromInmemoryPart(pw.mp)
|
2022-12-06 00:15:00 +01:00
|
|
|
} else {
|
2023-04-15 00:46:09 +02:00
|
|
|
bsr.MustInitFromFilePart(pw.p.path)
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
|
|
|
bsrs = append(bsrs, bsr)
|
|
|
|
}
|
2023-04-15 00:46:09 +02:00
|
|
|
return bsrs
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
func (pt *partition) mergePartsInternal(dstPartPath string, bsw *blockStreamWriter, bsrs []*blockStreamReader, dstPartType partType, stopCh <-chan struct{}) (*partHeader, error) {
|
2019-05-22 23:16:55 +02:00
|
|
|
var ph partHeader
|
2022-12-06 00:15:00 +01:00
|
|
|
var rowsMerged *uint64
|
|
|
|
var rowsDeleted *uint64
|
|
|
|
var mergesCount *uint64
|
|
|
|
var activeMerges *uint64
|
|
|
|
switch dstPartType {
|
|
|
|
case partInmemory:
|
|
|
|
rowsMerged = &pt.inmemoryRowsMerged
|
|
|
|
rowsDeleted = &pt.inmemoryRowsDeleted
|
|
|
|
mergesCount = &pt.inmemoryMergesCount
|
|
|
|
activeMerges = &pt.activeInmemoryMerges
|
|
|
|
case partSmall:
|
|
|
|
rowsMerged = &pt.smallRowsMerged
|
|
|
|
rowsDeleted = &pt.smallRowsDeleted
|
|
|
|
mergesCount = &pt.smallMergesCount
|
|
|
|
activeMerges = &pt.activeSmallMerges
|
|
|
|
case partBig:
|
2019-05-22 23:16:55 +02:00
|
|
|
rowsMerged = &pt.bigRowsMerged
|
|
|
|
rowsDeleted = &pt.bigRowsDeleted
|
2022-12-06 00:15:00 +01:00
|
|
|
mergesCount = &pt.bigMergesCount
|
|
|
|
activeMerges = &pt.activeBigMerges
|
|
|
|
default:
|
|
|
|
logger.Panicf("BUG: unknown partType=%d", dstPartType)
|
2020-07-22 23:58:48 +02:00
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
retentionDeadline := timestampFromTime(time.Now()) - pt.s.retentionMsecs
|
|
|
|
atomic.AddUint64(activeMerges, 1)
|
2022-10-23 15:08:54 +02:00
|
|
|
err := mergeBlockStreams(&ph, bsw, bsrs, stopCh, pt.s, retentionDeadline, rowsMerged, rowsDeleted)
|
2022-12-06 00:15:00 +01:00
|
|
|
atomic.AddUint64(activeMerges, ^uint64(0))
|
|
|
|
atomic.AddUint64(mergesCount, 1)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
2023-03-19 09:36:05 +01:00
|
|
|
return nil, fmt.Errorf("cannot merge %d parts to %s: %w", len(bsrs), dstPartPath, err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-03-19 09:36:05 +01:00
|
|
|
if dstPartPath != "" {
|
2022-12-06 00:15:00 +01:00
|
|
|
ph.MinDedupInterval = GetDedupInterval()
|
2023-04-14 06:33:15 +02:00
|
|
|
ph.MustWriteMetadata(dstPartPath)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
return &ph, nil
|
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
func (pt *partition) openCreatedPart(ph *partHeader, pws []*partWrapper, mpNew *inmemoryPart, dstPartPath string) *partWrapper {
|
2022-12-06 00:15:00 +01:00
|
|
|
// Open the created part.
|
|
|
|
if ph.RowsCount == 0 {
|
2023-03-19 09:36:05 +01:00
|
|
|
// The created part is empty. Remove it
|
|
|
|
if mpNew == nil {
|
|
|
|
fs.MustRemoveAll(dstPartPath)
|
|
|
|
}
|
|
|
|
return nil
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
if mpNew != nil {
|
|
|
|
// Open the created part from memory.
|
|
|
|
flushToDiskDeadline := getFlushToDiskDeadline(pws)
|
|
|
|
pwNew := newPartWrapperFromInmemoryPart(mpNew, flushToDiskDeadline)
|
2023-03-19 09:36:05 +01:00
|
|
|
return pwNew
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
// Open the created part from disk.
|
2023-04-15 00:46:09 +02:00
|
|
|
pNew := mustOpenFilePart(dstPartPath)
|
2022-12-06 00:15:00 +01:00
|
|
|
pwNew := &partWrapper{
|
|
|
|
p: pNew,
|
|
|
|
refCount: 1,
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-03-19 09:36:05 +01:00
|
|
|
return pwNew
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func areAllInmemoryParts(pws []*partWrapper) bool {
|
|
|
|
for _, pw := range pws {
|
|
|
|
if pw.mp == nil {
|
|
|
|
return false
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
return true
|
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func (pt *partition) swapSrcWithDstParts(pws []*partWrapper, pwNew *partWrapper, dstPartType partType) {
|
|
|
|
// Atomically unregister old parts and add new part to pt.
|
2019-05-22 23:16:55 +02:00
|
|
|
m := make(map[*partWrapper]bool, len(pws))
|
|
|
|
for _, pw := range pws {
|
|
|
|
m[pw] = true
|
|
|
|
}
|
|
|
|
if len(m) != len(pws) {
|
2022-12-06 00:15:00 +01:00
|
|
|
logger.Panicf("BUG: %d duplicate parts found when merging %d parts", len(pws)-len(m), len(pws))
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
removedInmemoryParts := 0
|
2019-05-22 23:16:55 +02:00
|
|
|
removedSmallParts := 0
|
|
|
|
removedBigParts := 0
|
2022-12-06 00:15:00 +01:00
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
pt.partsLock.Lock()
|
2023-03-19 09:36:05 +01:00
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
pt.inmemoryParts, removedInmemoryParts = removeParts(pt.inmemoryParts, m)
|
|
|
|
pt.smallParts, removedSmallParts = removeParts(pt.smallParts, m)
|
|
|
|
pt.bigParts, removedBigParts = removeParts(pt.bigParts, m)
|
|
|
|
if pwNew != nil {
|
|
|
|
switch dstPartType {
|
|
|
|
case partInmemory:
|
|
|
|
pt.inmemoryParts = append(pt.inmemoryParts, pwNew)
|
|
|
|
case partSmall:
|
|
|
|
pt.smallParts = append(pt.smallParts, pwNew)
|
|
|
|
case partBig:
|
|
|
|
pt.bigParts = append(pt.bigParts, pwNew)
|
|
|
|
default:
|
|
|
|
logger.Panicf("BUG: unknown partType=%d", dstPartType)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-01-18 10:09:03 +01:00
|
|
|
pt.notifyBackgroundMergers()
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-03-19 09:36:05 +01:00
|
|
|
|
|
|
|
// Atomically store the updated list of file-based parts on disk.
|
|
|
|
// This must be performed under partsLock in order to prevent from races
|
|
|
|
// when multiple concurrently running goroutines update the list.
|
|
|
|
if removedSmallParts > 0 || removedBigParts > 0 || pwNew != nil && (dstPartType == partSmall || dstPartType == partBig) {
|
|
|
|
mustWritePartNames(pt.smallParts, pt.bigParts, pt.smallPartsPath)
|
|
|
|
}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
pt.partsLock.Unlock()
|
2022-12-06 00:15:00 +01:00
|
|
|
|
|
|
|
removedParts := removedInmemoryParts + removedSmallParts + removedBigParts
|
|
|
|
if removedParts != len(m) {
|
|
|
|
logger.Panicf("BUG: unexpected number of parts removed; got %d, want %d", removedParts, len(m))
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
// Mark old parts as must be deleted and decrement reference count,
|
|
|
|
// so they are eventually closed and deleted.
|
2019-05-22 23:16:55 +02:00
|
|
|
for _, pw := range pws {
|
2023-03-19 09:36:05 +01:00
|
|
|
atomic.StoreUint32(&pw.mustBeDeleted, 1)
|
2019-05-22 23:16:55 +02:00
|
|
|
pw.decRef()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-04 08:10:16 +01:00
|
|
|
func getCompressLevel(rowsPerBlock float64) int {
|
2022-02-25 14:32:27 +01:00
|
|
|
// See https://github.com/facebook/zstd/releases/tag/v1.3.4 about negative compression levels.
|
2022-12-04 08:10:16 +01:00
|
|
|
if rowsPerBlock <= 10 {
|
2022-02-25 14:32:27 +01:00
|
|
|
return -5
|
|
|
|
}
|
2022-12-04 08:10:16 +01:00
|
|
|
if rowsPerBlock <= 50 {
|
2022-02-25 14:32:27 +01:00
|
|
|
return -2
|
|
|
|
}
|
2022-12-04 08:10:16 +01:00
|
|
|
if rowsPerBlock <= 200 {
|
2020-05-15 12:11:30 +02:00
|
|
|
return -1
|
|
|
|
}
|
2022-12-04 08:10:16 +01:00
|
|
|
if rowsPerBlock <= 500 {
|
2019-05-22 23:16:55 +02:00
|
|
|
return 1
|
|
|
|
}
|
2022-12-04 08:10:16 +01:00
|
|
|
if rowsPerBlock <= 1000 {
|
2019-05-22 23:16:55 +02:00
|
|
|
return 2
|
|
|
|
}
|
2022-12-04 08:10:16 +01:00
|
|
|
if rowsPerBlock <= 2000 {
|
2019-05-22 23:16:55 +02:00
|
|
|
return 3
|
|
|
|
}
|
2022-12-04 08:10:16 +01:00
|
|
|
if rowsPerBlock <= 4000 {
|
2019-05-22 23:16:55 +02:00
|
|
|
return 4
|
|
|
|
}
|
|
|
|
return 5
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pt *partition) nextMergeIdx() uint64 {
|
|
|
|
return atomic.AddUint64(&pt.mergeIdx, 1)
|
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func removeParts(pws []*partWrapper, partsToRemove map[*partWrapper]bool) ([]*partWrapper, int) {
|
2019-05-22 23:16:55 +02:00
|
|
|
dst := pws[:0]
|
|
|
|
for _, pw := range pws {
|
2020-09-17 01:05:54 +02:00
|
|
|
if !partsToRemove[pw] {
|
|
|
|
dst = append(dst, pw)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
for i := len(dst); i < len(pws); i++ {
|
|
|
|
pws[i] = nil
|
|
|
|
}
|
|
|
|
return dst, len(pws) - len(dst)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2020-12-22 18:48:27 +01:00
|
|
|
func (pt *partition) startStalePartsRemover() {
|
2022-12-04 08:03:05 +01:00
|
|
|
pt.wg.Add(1)
|
2020-12-22 18:48:27 +01:00
|
|
|
go func() {
|
|
|
|
pt.stalePartsRemover()
|
2022-12-04 08:03:05 +01:00
|
|
|
pt.wg.Done()
|
2020-12-22 18:48:27 +01:00
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pt *partition) stalePartsRemover() {
|
|
|
|
ticker := time.NewTicker(7 * time.Minute)
|
|
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-pt.stopCh:
|
|
|
|
return
|
|
|
|
case <-ticker.C:
|
|
|
|
pt.removeStaleParts()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pt *partition) removeStaleParts() {
|
|
|
|
startTime := time.Now()
|
2022-10-24 00:30:50 +02:00
|
|
|
retentionDeadline := timestampFromTime(startTime) - pt.s.retentionMsecs
|
2020-12-22 18:48:27 +01:00
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
var pws []*partWrapper
|
2020-12-22 18:48:27 +01:00
|
|
|
pt.partsLock.Lock()
|
2022-12-06 00:15:00 +01:00
|
|
|
for _, pw := range pt.inmemoryParts {
|
2020-12-24 07:50:10 +01:00
|
|
|
if !pw.isInMerge && pw.p.ph.MaxTimestamp < retentionDeadline {
|
2022-12-06 00:15:00 +01:00
|
|
|
atomic.AddUint64(&pt.inmemoryRowsDeleted, pw.p.ph.RowsCount)
|
2023-03-19 09:36:05 +01:00
|
|
|
pw.isInMerge = true
|
|
|
|
pws = append(pws, pw)
|
2020-12-22 18:48:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, pw := range pt.smallParts {
|
2020-12-24 07:50:10 +01:00
|
|
|
if !pw.isInMerge && pw.p.ph.MaxTimestamp < retentionDeadline {
|
2020-12-22 18:48:27 +01:00
|
|
|
atomic.AddUint64(&pt.smallRowsDeleted, pw.p.ph.RowsCount)
|
2023-03-19 09:36:05 +01:00
|
|
|
pw.isInMerge = true
|
|
|
|
pws = append(pws, pw)
|
2020-12-22 18:48:27 +01:00
|
|
|
}
|
|
|
|
}
|
2022-12-06 00:15:00 +01:00
|
|
|
for _, pw := range pt.bigParts {
|
|
|
|
if !pw.isInMerge && pw.p.ph.MaxTimestamp < retentionDeadline {
|
|
|
|
atomic.AddUint64(&pt.bigRowsDeleted, pw.p.ph.RowsCount)
|
2023-03-19 09:36:05 +01:00
|
|
|
pw.isInMerge = true
|
|
|
|
pws = append(pws, pw)
|
2022-12-06 00:15:00 +01:00
|
|
|
}
|
|
|
|
}
|
2020-12-22 18:48:27 +01:00
|
|
|
pt.partsLock.Unlock()
|
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
pt.swapSrcWithDstParts(pws, nil, partSmall)
|
2020-12-22 18:48:27 +01:00
|
|
|
}
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
// getPartsToMerge returns optimal parts to merge from pws.
|
|
|
|
//
|
2021-08-25 08:35:03 +02:00
|
|
|
// The summary size of the returned parts must be smaller than maxOutBytes.
|
2023-09-25 16:52:37 +02:00
|
|
|
func getPartsToMerge(pws []*partWrapper, maxOutBytes uint64, isFinal bool) []*partWrapper {
|
2019-05-22 23:16:55 +02:00
|
|
|
pwsRemaining := make([]*partWrapper, 0, len(pws))
|
|
|
|
for _, pw := range pws {
|
|
|
|
if !pw.isInMerge {
|
|
|
|
pwsRemaining = append(pwsRemaining, pw)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
maxPartsToMerge := defaultPartsToMerge
|
|
|
|
var pms []*partWrapper
|
|
|
|
if isFinal {
|
|
|
|
for len(pms) == 0 && maxPartsToMerge >= finalPartsToMerge {
|
2023-09-25 16:52:37 +02:00
|
|
|
pms = appendPartsToMerge(pms[:0], pwsRemaining, maxPartsToMerge, maxOutBytes)
|
2019-05-22 23:16:55 +02:00
|
|
|
maxPartsToMerge--
|
|
|
|
}
|
|
|
|
} else {
|
2023-09-25 16:52:37 +02:00
|
|
|
pms = appendPartsToMerge(pms[:0], pwsRemaining, maxPartsToMerge, maxOutBytes)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
for _, pw := range pms {
|
|
|
|
if pw.isInMerge {
|
|
|
|
logger.Panicf("BUG: partWrapper.isInMerge cannot be set")
|
|
|
|
}
|
|
|
|
pw.isInMerge = true
|
|
|
|
}
|
2023-09-25 16:52:37 +02:00
|
|
|
return pms
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2021-08-25 08:35:03 +02:00
|
|
|
// minMergeMultiplier is the minimum multiplier for the size of the output part
|
|
|
|
// compared to the size of the maximum input part for the merge.
|
|
|
|
//
|
|
|
|
// Higher value reduces write amplification (disk write IO induced by the merge),
|
|
|
|
// while increases the number of unmerged parts.
|
|
|
|
// The 1.7 is good enough for production workloads.
|
|
|
|
const minMergeMultiplier = 1.7
|
|
|
|
|
2023-09-25 16:52:37 +02:00
|
|
|
// appendPartsToMerge finds optimal parts to merge from src, appends them to dst and returns the result.
|
|
|
|
func appendPartsToMerge(dst, src []*partWrapper, maxPartsToMerge int, maxOutBytes uint64) []*partWrapper {
|
2019-05-22 23:16:55 +02:00
|
|
|
if len(src) < 2 {
|
|
|
|
// There is no need in merging zero or one part :)
|
2023-09-25 16:52:37 +02:00
|
|
|
return dst
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
if maxPartsToMerge < 2 {
|
|
|
|
logger.Panicf("BUG: maxPartsToMerge cannot be smaller than 2; got %d", maxPartsToMerge)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Filter out too big parts.
|
2020-07-31 12:48:35 +02:00
|
|
|
// This should reduce N for O(N^2) algorithm below.
|
2021-08-25 08:35:03 +02:00
|
|
|
maxInPartBytes := uint64(float64(maxOutBytes) / minMergeMultiplier)
|
2019-05-22 23:16:55 +02:00
|
|
|
tmp := make([]*partWrapper, 0, len(src))
|
|
|
|
for _, pw := range src {
|
2021-08-25 08:35:03 +02:00
|
|
|
if pw.p.size > maxInPartBytes {
|
2019-05-22 23:16:55 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
tmp = append(tmp, pw)
|
|
|
|
}
|
|
|
|
src = tmp
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
sortPartsForOptimalMerge(src)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2020-12-18 19:00:06 +01:00
|
|
|
maxSrcParts := maxPartsToMerge
|
2021-07-02 16:24:14 +02:00
|
|
|
if maxSrcParts > len(src) {
|
2020-12-18 19:00:06 +01:00
|
|
|
maxSrcParts = len(src)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2021-07-02 16:24:14 +02:00
|
|
|
minSrcParts := (maxSrcParts + 1) / 2
|
|
|
|
if minSrcParts < 2 {
|
|
|
|
minSrcParts = 2
|
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2020-12-18 19:00:06 +01:00
|
|
|
// Exhaustive search for parts giving the lowest write amplification when merged.
|
2019-05-22 23:16:55 +02:00
|
|
|
var pws []*partWrapper
|
|
|
|
maxM := float64(0)
|
2020-12-18 19:00:06 +01:00
|
|
|
for i := minSrcParts; i <= maxSrcParts; i++ {
|
2019-05-22 23:16:55 +02:00
|
|
|
for j := 0; j <= len(src)-i; j++ {
|
2019-10-29 11:45:19 +01:00
|
|
|
a := src[j : j+i]
|
2021-08-25 08:35:03 +02:00
|
|
|
if a[0].p.size*uint64(len(a)) < a[len(a)-1].p.size {
|
|
|
|
// Do not merge parts with too big difference in size,
|
2020-12-18 19:00:06 +01:00
|
|
|
// since this results in unbalanced merges.
|
|
|
|
continue
|
|
|
|
}
|
2023-09-25 16:52:37 +02:00
|
|
|
outSize := getPartsSize(a)
|
2021-08-25 08:35:03 +02:00
|
|
|
if outSize > maxOutBytes {
|
|
|
|
// There is no need in verifying remaining parts with bigger sizes.
|
2019-10-29 11:45:19 +01:00
|
|
|
break
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2021-08-25 08:35:03 +02:00
|
|
|
m := float64(outSize) / float64(a[len(a)-1].p.size)
|
2019-05-22 23:16:55 +02:00
|
|
|
if m < maxM {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
maxM = m
|
2019-10-29 11:45:19 +01:00
|
|
|
pws = a
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-29 11:45:19 +01:00
|
|
|
minM := float64(maxPartsToMerge) / 2
|
2021-08-25 08:35:03 +02:00
|
|
|
if minM < minMergeMultiplier {
|
|
|
|
minM = minMergeMultiplier
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
if maxM < minM {
|
2021-08-25 08:35:03 +02:00
|
|
|
// There is no sense in merging parts with too small m,
|
|
|
|
// since this leads to high disk write IO.
|
2023-09-25 16:52:37 +02:00
|
|
|
return dst
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-09-25 16:52:37 +02:00
|
|
|
return append(dst, pws...)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2022-12-06 00:15:00 +01:00
|
|
|
func sortPartsForOptimalMerge(pws []*partWrapper) {
|
|
|
|
// Sort src parts by size and backwards timestamp.
|
|
|
|
// This should improve adjanced points' locality in the merged parts.
|
|
|
|
sort.Slice(pws, func(i, j int) bool {
|
|
|
|
a := pws[i].p
|
|
|
|
b := pws[j].p
|
|
|
|
if a.size == b.size {
|
|
|
|
return a.ph.MinTimestamp > b.ph.MinTimestamp
|
|
|
|
}
|
|
|
|
return a.size < b.size
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-08-25 08:35:03 +02:00
|
|
|
func getPartsSize(pws []*partWrapper) uint64 {
|
2020-12-18 22:14:35 +01:00
|
|
|
n := uint64(0)
|
|
|
|
for _, pw := range pws {
|
2021-08-25 08:35:03 +02:00
|
|
|
n += pw.p.size
|
2020-12-18 22:14:35 +01:00
|
|
|
}
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2023-04-15 07:08:43 +02:00
|
|
|
func mustOpenParts(path string, partNames []string) []*partWrapper {
|
2019-11-02 01:26:02 +01:00
|
|
|
// The path can be missing after restoring from backup, so create it if needed.
|
2023-04-14 07:11:56 +02:00
|
|
|
fs.MustMkdirIfNotExist(path)
|
2022-09-13 14:28:01 +02:00
|
|
|
fs.MustRemoveTemporaryDirs(path)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
// Remove txn and tmp directories, which may be left after the upgrade
|
|
|
|
// to v1.90.0 and newer versions.
|
2023-03-25 22:33:54 +01:00
|
|
|
fs.MustRemoveAll(filepath.Join(path, "txn"))
|
|
|
|
fs.MustRemoveAll(filepath.Join(path, "tmp"))
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
// Remove dirs missing in partNames. These dirs may be left after unclean shutdown
|
|
|
|
// or after the update from versions prior to v1.90.0.
|
2023-04-15 07:08:43 +02:00
|
|
|
des := fs.MustReadDir(path)
|
2023-03-19 09:36:05 +01:00
|
|
|
m := make(map[string]struct{}, len(partNames))
|
|
|
|
for _, partName := range partNames {
|
2023-09-19 11:17:41 +02:00
|
|
|
// Make sure the partName exists on disk.
|
|
|
|
// If it is missing, then manual action from the user is needed,
|
|
|
|
// since this is unexpected state, which cannot occur under normal operation,
|
|
|
|
// including unclean shutdown.
|
|
|
|
partPath := filepath.Join(path, partName)
|
|
|
|
if !fs.IsPathExist(partPath) {
|
|
|
|
partsFile := filepath.Join(path, partsFilename)
|
|
|
|
logger.Panicf("FATAL: part %q is listed in %q, but is missing on disk; "+
|
|
|
|
"ensure %q contents is not corrupted; remove %q to rebuild its' content from the list of existing parts",
|
|
|
|
partPath, partsFile, partsFile, partsFile)
|
|
|
|
}
|
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
m[partName] = struct{}{}
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-03-18 05:03:34 +01:00
|
|
|
for _, de := range des {
|
|
|
|
if !fs.IsDirOrSymlink(de) {
|
2019-05-22 23:16:55 +02:00
|
|
|
// Skip non-directories.
|
|
|
|
continue
|
|
|
|
}
|
2023-03-18 05:03:34 +01:00
|
|
|
fn := de.Name()
|
2023-03-19 09:36:05 +01:00
|
|
|
if _, ok := m[fn]; !ok {
|
2023-03-25 22:33:54 +01:00
|
|
|
deletePath := filepath.Join(path, fn)
|
2023-03-19 09:36:05 +01:00
|
|
|
fs.MustRemoveAll(deletePath)
|
2021-04-22 11:58:53 +02:00
|
|
|
}
|
2023-03-19 09:36:05 +01:00
|
|
|
}
|
|
|
|
fs.MustSyncPath(path)
|
|
|
|
|
|
|
|
// Open parts
|
|
|
|
var pws []*partWrapper
|
|
|
|
for _, partName := range partNames {
|
2023-03-25 22:33:54 +01:00
|
|
|
partPath := filepath.Join(path, partName)
|
2023-04-15 00:46:09 +02:00
|
|
|
p := mustOpenFilePart(partPath)
|
2019-05-22 23:16:55 +02:00
|
|
|
pw := &partWrapper{
|
|
|
|
p: p,
|
|
|
|
refCount: 1,
|
|
|
|
}
|
|
|
|
pws = append(pws, pw)
|
|
|
|
}
|
|
|
|
|
2023-04-15 07:08:43 +02:00
|
|
|
return pws
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-04-14 08:02:55 +02:00
|
|
|
// MustCreateSnapshotAt creates pt snapshot at the given smallPath and bigPath dirs.
|
2019-05-22 23:16:55 +02:00
|
|
|
//
|
2023-03-19 09:36:05 +01:00
|
|
|
// Snapshot is created using linux hard links, so it is usually created very quickly.
|
2023-04-14 08:02:55 +02:00
|
|
|
func (pt *partition) MustCreateSnapshotAt(smallPath, bigPath string) {
|
2019-05-22 23:16:55 +02:00
|
|
|
logger.Infof("creating partition snapshot of %q and %q...", pt.smallPartsPath, pt.bigPartsPath)
|
|
|
|
startTime := time.Now()
|
|
|
|
|
|
|
|
// Flush inmemory data to disk.
|
2022-12-06 00:15:00 +01:00
|
|
|
pt.flushInmemoryRows()
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
pt.partsLock.Lock()
|
|
|
|
incRefForParts(pt.smallParts)
|
|
|
|
pwsSmall := append([]*partWrapper{}, pt.smallParts...)
|
|
|
|
incRefForParts(pt.bigParts)
|
|
|
|
pwsBig := append([]*partWrapper{}, pt.bigParts...)
|
|
|
|
pt.partsLock.Unlock()
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
pt.PutParts(pwsSmall)
|
|
|
|
pt.PutParts(pwsBig)
|
|
|
|
}()
|
|
|
|
|
2023-04-14 07:11:56 +02:00
|
|
|
fs.MustMkdirFailIfExist(smallPath)
|
|
|
|
fs.MustMkdirFailIfExist(bigPath)
|
2023-03-19 09:36:05 +01:00
|
|
|
|
|
|
|
// Create a file with part names at smallPath
|
|
|
|
mustWritePartNames(pwsSmall, pwsBig, smallPath)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2023-04-14 08:02:55 +02:00
|
|
|
pt.mustCreateSnapshot(pt.smallPartsPath, smallPath, pwsSmall)
|
|
|
|
pt.mustCreateSnapshot(pt.bigPartsPath, bigPath, pwsBig)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2020-01-22 17:27:44 +01:00
|
|
|
logger.Infof("created partition snapshot of %q and %q at %q and %q in %.3f seconds",
|
|
|
|
pt.smallPartsPath, pt.bigPartsPath, smallPath, bigPath, time.Since(startTime).Seconds())
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-04-14 08:02:55 +02:00
|
|
|
// mustCreateSnapshot creates a snapshot from srcDir to dstDir.
|
|
|
|
func (pt *partition) mustCreateSnapshot(srcDir, dstDir string, pws []*partWrapper) {
|
2023-03-19 09:36:05 +01:00
|
|
|
// Make hardlinks for pws at dstDir
|
|
|
|
for _, pw := range pws {
|
|
|
|
srcPartPath := pw.p.path
|
2023-03-25 22:33:54 +01:00
|
|
|
dstPartPath := filepath.Join(dstDir, filepath.Base(srcPartPath))
|
2023-04-14 07:48:05 +02:00
|
|
|
fs.MustHardLinkFiles(srcPartPath, dstPartPath)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-03-25 22:33:54 +01:00
|
|
|
// Copy the appliedRetentionFilename to dstDir.
|
2023-03-19 09:36:05 +01:00
|
|
|
// This file can be created by VictoriaMetrics enterprise.
|
|
|
|
// See https://docs.victoriametrics.com/#retention-filters .
|
|
|
|
// Do not make hard link to this file, since it can be modified over time.
|
2023-03-25 22:33:54 +01:00
|
|
|
srcPath := filepath.Join(srcDir, appliedRetentionFilename)
|
2023-03-19 09:36:05 +01:00
|
|
|
if fs.IsPathExist(srcPath) {
|
2023-03-25 22:33:54 +01:00
|
|
|
dstPath := filepath.Join(dstDir, filepath.Base(srcPath))
|
2023-04-14 08:02:55 +02:00
|
|
|
fs.MustCopyFile(srcPath, dstPath)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
fs.MustSyncPath(dstDir)
|
|
|
|
parentDir := filepath.Dir(dstDir)
|
|
|
|
fs.MustSyncPath(parentDir)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
type partNamesJSON struct {
|
|
|
|
Small []string
|
|
|
|
Big []string
|
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
func mustWritePartNames(pwsSmall, pwsBig []*partWrapper, dstDir string) {
|
|
|
|
partNamesSmall := getPartNames(pwsSmall)
|
|
|
|
partNamesBig := getPartNames(pwsBig)
|
|
|
|
partNames := &partNamesJSON{
|
|
|
|
Small: partNamesSmall,
|
|
|
|
Big: partNamesBig,
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-03-19 09:36:05 +01:00
|
|
|
data, err := json.Marshal(partNames)
|
|
|
|
if err != nil {
|
|
|
|
logger.Panicf("BUG: cannot marshal partNames to JSON: %s", err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-03-25 22:33:54 +01:00
|
|
|
partNamesPath := filepath.Join(dstDir, partsFilename)
|
2023-04-14 07:41:12 +02:00
|
|
|
fs.MustWriteAtomic(partNamesPath, data, true)
|
2023-03-19 09:36:05 +01:00
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
func getPartNames(pws []*partWrapper) []string {
|
|
|
|
partNames := make([]string, 0, len(pws))
|
|
|
|
for _, pw := range pws {
|
|
|
|
if pw.mp != nil {
|
|
|
|
// Skip in-memory parts
|
|
|
|
continue
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-03-19 09:36:05 +01:00
|
|
|
partName := filepath.Base(pw.p.path)
|
|
|
|
partNames = append(partNames, partName)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-03-19 09:36:05 +01:00
|
|
|
sort.Strings(partNames)
|
|
|
|
return partNames
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
func mustReadPartNames(smallPartsPath, bigPartsPath string) ([]string, []string) {
|
2023-03-25 22:33:54 +01:00
|
|
|
partNamesPath := filepath.Join(smallPartsPath, partsFilename)
|
2023-04-15 08:16:26 +02:00
|
|
|
if fs.IsPathExist(partNamesPath) {
|
|
|
|
data, err := os.ReadFile(partNamesPath)
|
|
|
|
if err != nil {
|
|
|
|
logger.Panicf("FATAL: cannot read %s file: %s", partsFilename, err)
|
|
|
|
}
|
2023-03-19 09:36:05 +01:00
|
|
|
var partNames partNamesJSON
|
|
|
|
if err := json.Unmarshal(data, &partNames); err != nil {
|
|
|
|
logger.Panicf("FATAL: cannot parse %s: %s", partNamesPath, err)
|
|
|
|
}
|
|
|
|
return partNames.Small, partNames.Big
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-03-25 22:33:54 +01:00
|
|
|
// The partsFilename is missing. This is the upgrade from versions previous to v1.90.0.
|
2023-03-19 09:36:05 +01:00
|
|
|
// Read part names from smallPartsPath and bigPartsPath directories
|
|
|
|
partNamesSmall := mustReadPartNamesFromDir(smallPartsPath)
|
|
|
|
partNamesBig := mustReadPartNamesFromDir(bigPartsPath)
|
|
|
|
return partNamesSmall, partNamesBig
|
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
func mustReadPartNamesFromDir(srcDir string) []string {
|
2023-04-15 07:08:43 +02:00
|
|
|
if !fs.IsPathExist(srcDir) {
|
|
|
|
return nil
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-04-15 07:08:43 +02:00
|
|
|
des := fs.MustReadDir(srcDir)
|
2023-03-19 09:36:05 +01:00
|
|
|
var partNames []string
|
|
|
|
for _, de := range des {
|
|
|
|
if !fs.IsDirOrSymlink(de) {
|
|
|
|
// Skip non-directories.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
partName := de.Name()
|
|
|
|
if isSpecialDir(partName) {
|
|
|
|
// Skip special dirs.
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
partNames = append(partNames, partName)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2023-03-19 09:36:05 +01:00
|
|
|
return partNames
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2023-03-19 09:36:05 +01:00
|
|
|
func isSpecialDir(name string) bool {
|
2023-03-25 22:33:54 +01:00
|
|
|
return name == "tmp" || name == "txn" || name == snapshotsDirname || fs.IsScheduledForRemoval(name)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|