mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-15 08:23:34 +01:00
lib/storage: fix inmemory inverted index issues found in v1.29
Issues fixed: - Slow startup times. Now the index is loaded from cache during start. - High memory usage related to superflouos index copies every 10 seconds.
This commit is contained in:
parent
87b39222be
commit
f1620ba7c0
@ -355,6 +355,9 @@ func registerStorageMetrics(strg *storage.Storage) {
|
||||
metrics.NewGauge(`vm_recent_hour_inverted_index_entries`, func() float64 {
|
||||
return float64(m().RecentHourInvertedIndexSize)
|
||||
})
|
||||
metrics.NewGauge(`vm_recent_hour_inverted_index_size_bytes`, func() float64 {
|
||||
return float64(m().RecentHourInvertedIndexSizeBytes)
|
||||
})
|
||||
metrics.NewGauge(`vm_recent_hour_inverted_index_unique_tag_pairs`, func() float64 {
|
||||
return float64(m().RecentHourInvertedIndexUniqueTagPairsSize)
|
||||
})
|
||||
|
@ -5,7 +5,9 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"unsafe"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/uint64set"
|
||||
)
|
||||
@ -16,6 +18,121 @@ type inmemoryInvertedIndex struct {
|
||||
pendingEntries []pendingHourMetricIDEntry
|
||||
}
|
||||
|
||||
func (iidx *inmemoryInvertedIndex) Marshal(dst []byte) []byte {
|
||||
iidx.mu.RLock()
|
||||
defer iidx.mu.RUnlock()
|
||||
|
||||
// Marshal iidx.m
|
||||
var metricIDs []uint64
|
||||
dst = encoding.MarshalUint64(dst, uint64(len(iidx.m)))
|
||||
for k, v := range iidx.m {
|
||||
dst = encoding.MarshalBytes(dst, []byte(k))
|
||||
metricIDs = v.AppendTo(metricIDs[:0])
|
||||
dst = marshalMetricIDs(dst, metricIDs)
|
||||
}
|
||||
|
||||
// Marshal iidx.pendingEntries
|
||||
dst = encoding.MarshalUint64(dst, uint64(len(iidx.pendingEntries)))
|
||||
for _, e := range iidx.pendingEntries {
|
||||
dst = encoding.MarshalUint32(dst, e.AccountID)
|
||||
dst = encoding.MarshalUint32(dst, e.ProjectID)
|
||||
dst = encoding.MarshalUint64(dst, e.MetricID)
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func (iidx *inmemoryInvertedIndex) Unmarshal(src []byte) ([]byte, error) {
|
||||
iidx.mu.Lock()
|
||||
defer iidx.mu.Unlock()
|
||||
|
||||
// Unmarshal iidx.m
|
||||
if len(src) < 8 {
|
||||
return src, fmt.Errorf("cannot read len(iidx.m) from %d bytes; want at least 8 bytes", len(src))
|
||||
}
|
||||
mLen := int(encoding.UnmarshalUint64(src))
|
||||
src = src[8:]
|
||||
m := make(map[string]*uint64set.Set, mLen)
|
||||
var metricIDs []uint64
|
||||
for i := 0; i < mLen; i++ {
|
||||
tail, k, err := encoding.UnmarshalBytes(src)
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot unmarshal key #%d for iidx.m: %s", i, err)
|
||||
}
|
||||
src = tail
|
||||
tail, metricIDs, err = unmarshalMetricIDs(metricIDs[:0], src)
|
||||
if err != nil {
|
||||
return tail, fmt.Errorf("cannot unmarshal value #%d for iidx.m: %s", i, err)
|
||||
}
|
||||
src = tail
|
||||
var v uint64set.Set
|
||||
for _, metricID := range metricIDs {
|
||||
v.Add(metricID)
|
||||
}
|
||||
m[string(k)] = &v
|
||||
}
|
||||
iidx.m = m
|
||||
|
||||
// Unmarshal iidx.pendingEntries
|
||||
if len(src) < 8 {
|
||||
return src, fmt.Errorf("cannot unmarshal pendingEntriesLen from %d bytes; want at least %d bytes", len(src), 8)
|
||||
}
|
||||
pendingEntriesLen := int(encoding.UnmarshalUint64(src))
|
||||
src = src[8:]
|
||||
if len(src) < pendingEntriesLen*16 {
|
||||
return src, fmt.Errorf("cannot unmarshal %d pending entries from %d bytes; want at least %d bytes", pendingEntriesLen, len(src), pendingEntriesLen*16)
|
||||
}
|
||||
for i := 0; i < pendingEntriesLen; i++ {
|
||||
var e pendingHourMetricIDEntry
|
||||
e.AccountID = encoding.UnmarshalUint32(src)
|
||||
src = src[4:]
|
||||
e.ProjectID = encoding.UnmarshalUint32(src)
|
||||
src = src[4:]
|
||||
e.MetricID = encoding.UnmarshalUint64(src)
|
||||
src = src[8:]
|
||||
iidx.pendingEntries = append(iidx.pendingEntries, e)
|
||||
}
|
||||
|
||||
return src, nil
|
||||
}
|
||||
|
||||
func marshalMetricIDs(dst []byte, metricIDs []uint64) []byte {
|
||||
dst = encoding.MarshalUint64(dst, uint64(len(metricIDs)))
|
||||
for _, metricID := range metricIDs {
|
||||
dst = encoding.MarshalUint64(dst, metricID)
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
func unmarshalMetricIDs(dst []uint64, src []byte) ([]byte, []uint64, error) {
|
||||
if len(src) < 8 {
|
||||
return src, dst, fmt.Errorf("cannot unmarshal metricIDs len from %d bytes; want at least 8 bytes", len(src))
|
||||
}
|
||||
metricIDsLen := int(encoding.UnmarshalUint64(src))
|
||||
src = src[8:]
|
||||
if len(src) < 8*metricIDsLen {
|
||||
return src, dst, fmt.Errorf("not enough bytes for unmarshaling %d metricIDs; want %d bytes; got %d bytes", metricIDsLen, 8*metricIDsLen, len(src))
|
||||
}
|
||||
for i := 0; i < metricIDsLen; i++ {
|
||||
metricID := encoding.UnmarshalUint64(src)
|
||||
src = src[8:]
|
||||
dst = append(dst, metricID)
|
||||
}
|
||||
return src, dst, nil
|
||||
}
|
||||
|
||||
func (iidx *inmemoryInvertedIndex) SizeBytes() uint64 {
|
||||
n := uint64(0)
|
||||
iidx.mu.RLock()
|
||||
for k, v := range iidx.m {
|
||||
n += uint64(len(k))
|
||||
n += v.SizeBytes()
|
||||
}
|
||||
n += uint64(len(iidx.pendingEntries)) * uint64(unsafe.Sizeof(pendingHourMetricIDEntry{}))
|
||||
iidx.mu.RUnlock()
|
||||
return n
|
||||
}
|
||||
|
||||
func (iidx *inmemoryInvertedIndex) GetUniqueTagPairsLen() int {
|
||||
if iidx == nil {
|
||||
return 0
|
||||
@ -55,23 +172,6 @@ func newInmemoryInvertedIndex() *inmemoryInvertedIndex {
|
||||
}
|
||||
}
|
||||
|
||||
func (iidx *inmemoryInvertedIndex) Clone() *inmemoryInvertedIndex {
|
||||
if iidx == nil {
|
||||
return newInmemoryInvertedIndex()
|
||||
}
|
||||
iidx.mu.RLock()
|
||||
mCopy := make(map[string]*uint64set.Set, len(iidx.m))
|
||||
for k, v := range iidx.m {
|
||||
mCopy[k] = v.Clone()
|
||||
}
|
||||
pendingEntries := append([]pendingHourMetricIDEntry{}, iidx.pendingEntries...)
|
||||
iidx.mu.RUnlock()
|
||||
return &inmemoryInvertedIndex{
|
||||
m: mCopy,
|
||||
pendingEntries: pendingEntries,
|
||||
}
|
||||
}
|
||||
|
||||
func (iidx *inmemoryInvertedIndex) MustUpdate(idb *indexDB, byTenant map[accountProjectKey]*uint64set.Set) {
|
||||
var entries []pendingHourMetricIDEntry
|
||||
var metricIDs []uint64
|
||||
|
47
lib/storage/inmemory_inverted_index_test.go
Normal file
47
lib/storage/inmemory_inverted_index_test.go
Normal file
@ -0,0 +1,47 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestInmemoryInvertedIndexMarshalUnmarshal(t *testing.T) {
|
||||
iidx := newInmemoryInvertedIndex()
|
||||
const keysCount = 100
|
||||
const metricIDsCount = 10000
|
||||
for i := 0; i < metricIDsCount; i++ {
|
||||
k := fmt.Sprintf("key %d", i%keysCount)
|
||||
iidx.addMetricIDLocked([]byte(k), uint64(i))
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
var e pendingHourMetricIDEntry
|
||||
e.AccountID = uint32(i)
|
||||
e.ProjectID = uint32(i + 324)
|
||||
e.MetricID = uint64(i * 43)
|
||||
iidx.pendingEntries = append(iidx.pendingEntries, e)
|
||||
}
|
||||
|
||||
data := iidx.Marshal(nil)
|
||||
|
||||
iidx2 := newInmemoryInvertedIndex()
|
||||
tail, err := iidx2.Unmarshal(data)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot unmarshal iidx: %s", err)
|
||||
}
|
||||
if len(tail) != 0 {
|
||||
t.Fatalf("unexpected tail left after iidx unmarshaling: %d bytes", len(tail))
|
||||
}
|
||||
if len(iidx.m) != len(iidx2.m) {
|
||||
t.Fatalf("unexpected len(iidx2.m); got %d; want %d", len(iidx2.m), len(iidx.m))
|
||||
}
|
||||
if !reflect.DeepEqual(iidx.pendingEntries, iidx2.pendingEntries) {
|
||||
t.Fatalf("unexpected pendingMetricIDs; got\n%v;\nwant\n%v", iidx2.pendingEntries, iidx.pendingEntries)
|
||||
}
|
||||
for k, v := range iidx.m {
|
||||
v2 := iidx2.m[k]
|
||||
if !v.Equal(v2) {
|
||||
t.Fatalf("unexpected set for key %q", k)
|
||||
}
|
||||
}
|
||||
}
|
@ -150,13 +150,6 @@ func OpenStorage(path string, retentionMonths int) (*Storage, error) {
|
||||
idbCurr.SetExtDB(idbPrev)
|
||||
s.idbCurr.Store(idbCurr)
|
||||
|
||||
// Initialize iidx. hmCurr and hmPrev shouldn't be used till now,
|
||||
// so it should be safe initializing it inplace.
|
||||
hmPrev.iidx = newInmemoryInvertedIndex()
|
||||
hmPrev.iidx.MustUpdate(s.idb(), hmPrev.byTenant)
|
||||
hmCurr.iidx = newInmemoryInvertedIndex()
|
||||
hmCurr.iidx.MustUpdate(s.idb(), hmCurr.byTenant)
|
||||
|
||||
// Load data
|
||||
tablePath := path + "/data"
|
||||
tb, err := openTable(tablePath, retentionMonths, s.getDeletedMetricIDs)
|
||||
@ -329,6 +322,7 @@ type Metrics struct {
|
||||
HourMetricIDCacheSize uint64
|
||||
|
||||
RecentHourInvertedIndexSize uint64
|
||||
RecentHourInvertedIndexSizeBytes uint64
|
||||
RecentHourInvertedIndexUniqueTagPairsSize uint64
|
||||
RecentHourInvertedIndexPendingMetricIDsSize uint64
|
||||
|
||||
@ -391,6 +385,9 @@ func (s *Storage) UpdateMetrics(m *Metrics) {
|
||||
m.RecentHourInvertedIndexSize += uint64(hmPrev.iidx.GetEntriesCount())
|
||||
m.RecentHourInvertedIndexSize += uint64(hmCurr.iidx.GetEntriesCount())
|
||||
|
||||
m.RecentHourInvertedIndexSizeBytes += hmPrev.iidx.SizeBytes()
|
||||
m.RecentHourInvertedIndexSizeBytes += hmCurr.iidx.SizeBytes()
|
||||
|
||||
m.RecentHourInvertedIndexUniqueTagPairsSize += uint64(hmPrev.iidx.GetUniqueTagPairsLen())
|
||||
m.RecentHourInvertedIndexUniqueTagPairsSize += uint64(hmCurr.iidx.GetUniqueTagPairsLen())
|
||||
|
||||
@ -511,6 +508,7 @@ func (s *Storage) mustLoadHourMetricIDs(hour uint64, name string) *hourMetricIDs
|
||||
if !fs.IsPathExist(path) {
|
||||
logger.Infof("nothing to load from %q", path)
|
||||
return &hourMetricIDs{
|
||||
iidx: newInmemoryInvertedIndex(),
|
||||
hour: hour,
|
||||
}
|
||||
}
|
||||
@ -522,6 +520,7 @@ func (s *Storage) mustLoadHourMetricIDs(hour uint64, name string) *hourMetricIDs
|
||||
if len(src) < 24 {
|
||||
logger.Errorf("discarding %s, since it has broken header; got %d bytes; want %d bytes", path, len(src), 24)
|
||||
return &hourMetricIDs{
|
||||
iidx: newInmemoryInvertedIndex(),
|
||||
hour: hour,
|
||||
}
|
||||
}
|
||||
@ -534,6 +533,7 @@ func (s *Storage) mustLoadHourMetricIDs(hour uint64, name string) *hourMetricIDs
|
||||
if hourLoaded != hour {
|
||||
logger.Infof("discarding %s, since it contains outdated hour; got %d; want %d", name, hourLoaded, hour)
|
||||
return &hourMetricIDs{
|
||||
iidx: newInmemoryInvertedIndex(),
|
||||
hour: hour,
|
||||
}
|
||||
}
|
||||
@ -542,8 +542,9 @@ func (s *Storage) mustLoadHourMetricIDs(hour uint64, name string) *hourMetricIDs
|
||||
hmLen := encoding.UnmarshalUint64(src)
|
||||
src = src[8:]
|
||||
if uint64(len(src)) < 8*hmLen {
|
||||
logger.Errorf("discarding %s, since it has broken hm.m data; got %d bytes; want %d bytes", path, len(src), 8*hmLen)
|
||||
logger.Errorf("discarding %s, since it has broken hm.m data; got %d bytes; want at least %d bytes", path, len(src), 8*hmLen)
|
||||
return &hourMetricIDs{
|
||||
iidx: newInmemoryInvertedIndex(),
|
||||
hour: hour,
|
||||
}
|
||||
}
|
||||
@ -590,9 +591,28 @@ func (s *Storage) mustLoadHourMetricIDs(hour uint64, name string) *hourMetricIDs
|
||||
byTenant[k] = m
|
||||
}
|
||||
|
||||
// Unmarshal hm.iidx
|
||||
iidx := newInmemoryInvertedIndex()
|
||||
tail, err := iidx.Unmarshal(src)
|
||||
if err != nil {
|
||||
logger.Errorf("discarding %s, since it has broken hm.iidx data: %s", path, err)
|
||||
return &hourMetricIDs{
|
||||
iidx: newInmemoryInvertedIndex(),
|
||||
hour: hour,
|
||||
}
|
||||
}
|
||||
if len(tail) > 0 {
|
||||
logger.Errorf("discarding %s, since it contains superflouos %d bytes of data", path, len(tail))
|
||||
return &hourMetricIDs{
|
||||
iidx: newInmemoryInvertedIndex(),
|
||||
hour: hour,
|
||||
}
|
||||
}
|
||||
|
||||
logger.Infof("loaded %s from %q in %s; entriesCount: %d; sizeBytes: %d", name, path, time.Since(startTime), hmLen, srcOrigLen)
|
||||
return &hourMetricIDs{
|
||||
m: m,
|
||||
iidx: iidx,
|
||||
byTenant: byTenant,
|
||||
hour: hourLoaded,
|
||||
isFull: isFull != 0,
|
||||
@ -631,6 +651,10 @@ func (s *Storage) mustSaveHourMetricIDs(hm *hourMetricIDs, name string) {
|
||||
dst = encoding.MarshalUint64(dst, metricID)
|
||||
}
|
||||
}
|
||||
|
||||
// Marshal hm.iidx
|
||||
dst = hm.iidx.Marshal(dst)
|
||||
|
||||
if err := ioutil.WriteFile(path, dst, 0644); err != nil {
|
||||
logger.Panicf("FATAL: cannot write %d bytes to %q: %s", len(dst), path, err)
|
||||
}
|
||||
@ -1000,7 +1024,7 @@ func (s *Storage) updatePerDateData(rows []rawRow, lastError error) error {
|
||||
continue
|
||||
}
|
||||
|
||||
// Slow path: store the entry (date, metricID) entry in the indexDB.
|
||||
// Slow path: store the (date, metricID) entry in the indexDB.
|
||||
// It is OK if the (date, metricID) entry is added multiple times to db
|
||||
// by concurrent goroutines.
|
||||
if err := idb.storeDateMetricID(date, metricID, r.TSID.AccountID, r.TSID.ProjectID); err != nil {
|
||||
@ -1174,7 +1198,7 @@ func (s *Storage) updateCurrHourMetricIDs() {
|
||||
isFull := hm.isFull
|
||||
if hm.hour == hour {
|
||||
m = hm.m.Clone()
|
||||
iidx = hm.iidx.Clone()
|
||||
iidx = hm.iidx
|
||||
byTenant = make(map[accountProjectKey]*uint64set.Set, len(hm.byTenant))
|
||||
for k, e := range hm.byTenant {
|
||||
byTenant[k] = e.Clone()
|
||||
|
@ -3,6 +3,7 @@ package uint64set
|
||||
import (
|
||||
"math/bits"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Set is a fast set for uint64.
|
||||
@ -47,6 +48,19 @@ func (s *Set) Clone() *Set {
|
||||
return &dst
|
||||
}
|
||||
|
||||
// SizeBytes returns an estimate size of s in RAM.
|
||||
func (s *Set) SizeBytes() uint64 {
|
||||
if s == nil {
|
||||
return 0
|
||||
}
|
||||
n := uint64(unsafe.Sizeof(*s))
|
||||
for _, b := range s.buckets {
|
||||
n += uint64(unsafe.Sizeof(b))
|
||||
n += b.sizeBytes()
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Len returns the number of distinct uint64 values in s.
|
||||
func (s *Set) Len() int {
|
||||
if s == nil {
|
||||
@ -259,6 +273,16 @@ type bucket32 struct {
|
||||
smallPool [14]uint32
|
||||
}
|
||||
|
||||
func (b *bucket32) sizeBytes() uint64 {
|
||||
n := uint64(unsafe.Sizeof(*b))
|
||||
n += 2 * uint64(len(b.b16his))
|
||||
for _, b := range b.buckets {
|
||||
n += uint64(unsafe.Sizeof(b))
|
||||
n += b.sizeBytes()
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (b *bucket32) clone() *bucket32 {
|
||||
var dst bucket32
|
||||
dst.skipSmallPool = b.skipSmallPool
|
||||
@ -463,6 +487,10 @@ type bucket16 struct {
|
||||
bits [wordsPerBucket]uint64
|
||||
}
|
||||
|
||||
func (b *bucket16) sizeBytes() uint64 {
|
||||
return uint64(unsafe.Sizeof(*b))
|
||||
}
|
||||
|
||||
func (b *bucket16) clone() *bucket16 {
|
||||
var dst bucket16
|
||||
copy(dst.bits[:], b.bits[:])
|
||||
|
@ -25,6 +25,9 @@ func testSetBasicOps(t *testing.T, itemsCount int) {
|
||||
// Verify operations on nil set
|
||||
{
|
||||
var sNil *Set
|
||||
if n := sNil.SizeBytes(); n != 0 {
|
||||
t.Fatalf("sNil.SizeBytes must return 0; got %d", n)
|
||||
}
|
||||
if sNil.Has(123) {
|
||||
t.Fatalf("sNil shouldn't contain any item; found 123")
|
||||
}
|
||||
@ -73,6 +76,9 @@ func testSetBasicOps(t *testing.T, itemsCount int) {
|
||||
if n := s.Len(); n != itemsCount/2 {
|
||||
t.Fatalf("unexpected s.Len() after forward Add; got %d; want %d", n, itemsCount/2)
|
||||
}
|
||||
if n := s.SizeBytes(); n == 0 {
|
||||
t.Fatalf("s.SizeBytes() must be greater than 0")
|
||||
}
|
||||
|
||||
// Verify backward Add
|
||||
for i := 0; i < itemsCount/2; i++ {
|
||||
@ -307,6 +313,9 @@ func testSetSparseItems(t *testing.T, itemsCount int) {
|
||||
if n := s.Len(); n != len(m) {
|
||||
t.Fatalf("unexpected Len(); got %d; want %d", n, len(m))
|
||||
}
|
||||
if n := s.SizeBytes(); n == 0 {
|
||||
t.Fatalf("SizeBytes() must return value greater than 0")
|
||||
}
|
||||
|
||||
// Check Has
|
||||
for x := range m {
|
||||
|
Loading…
Reference in New Issue
Block a user