mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-15 16:30:55 +01:00
lib/storage: try generating initial parts from inmemory rows with identical sizes under high ingestion rate
This should improve background merge rate under high load a bit
This commit is contained in:
parent
4d71023eb9
commit
edf3b7be47
@ -479,14 +479,24 @@ func (rrs *rawRowsShard) addRows(pt *partition, rows []rawRow) {
|
||||
maxRowsCount := cap(rrs.rows)
|
||||
capacity := maxRowsCount - len(rrs.rows)
|
||||
if capacity >= len(rows) {
|
||||
// Fast path - rows fit capacity.
|
||||
// Fast path - rows fit rrs.rows capacity.
|
||||
rrs.rows = append(rrs.rows, rows...)
|
||||
} else {
|
||||
// Slow path - rows don't fit capacity.
|
||||
// Put rrs.rows and rows to rowsToFlush and convert it to a part.
|
||||
rowsToFlush = append(rowsToFlush, rrs.rows...)
|
||||
rowsToFlush = append(rowsToFlush, rows...)
|
||||
rrs.rows = rrs.rows[:0]
|
||||
// Slow path - rows don't fit rrs.rows capacity.
|
||||
// Fill rrs.rows with rows until capacity,
|
||||
// then put rrs.rows to rowsToFlush and convert it to a part.
|
||||
n := copy(rrs.rows[:cap(rrs.rows)], rows)
|
||||
rows = rows[n:]
|
||||
rowsToFlush = rrs.rows
|
||||
n = getMaxRawRowsPerShard()
|
||||
rrs.rows = make([]rawRow, 0, n)
|
||||
if len(rows) <= n {
|
||||
rrs.rows = append(rrs.rows[:0], rows...)
|
||||
} else {
|
||||
// The slowest path - rows do not fit rrs.rows capacity.
|
||||
// So append them directly to rowsToFlush.
|
||||
rowsToFlush = append(rowsToFlush, rows...)
|
||||
}
|
||||
atomic.StoreUint64(&rrs.lastFlushTime, fasttime.UnixTimestamp())
|
||||
}
|
||||
rrs.mu.Unlock()
|
||||
|
Loading…
Reference in New Issue
Block a user