mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-15 00:13:30 +01:00
app/vmstorage: export missing vm_cache_size_bytes
metrics for indexdb and data caches
This commit is contained in:
parent
62574c478a
commit
e8ee9fa7fe
@ -526,6 +526,18 @@ func registerStorageMetrics(strg *storage.Storage) {
|
|||||||
metrics.NewGauge(`vm_cache_size_bytes{type="storage/metricName"}`, func() float64 {
|
metrics.NewGauge(`vm_cache_size_bytes{type="storage/metricName"}`, func() float64 {
|
||||||
return float64(m().MetricNameCacheSizeBytes)
|
return float64(m().MetricNameCacheSizeBytes)
|
||||||
})
|
})
|
||||||
|
metrics.NewGauge(`vm_cache_size_bytes{type="storage/bigIndexBlocks"}`, func() float64 {
|
||||||
|
return float64(tm().BigIndexBlocksCacheSizeBytes)
|
||||||
|
})
|
||||||
|
metrics.NewGauge(`vm_cache_size_bytes{type="storage/smallIndexBlocks"}`, func() float64 {
|
||||||
|
return float64(tm().SmallIndexBlocksCacheSizeBytes)
|
||||||
|
})
|
||||||
|
metrics.NewGauge(`vm_cache_size_bytes{type="indexdb/dataBlocks"}`, func() float64 {
|
||||||
|
return float64(idbm().DataBlocksCacheSizeBytes)
|
||||||
|
})
|
||||||
|
metrics.NewGauge(`vm_cache_size_bytes{type="indexdb/indexBlocks"}`, func() float64 {
|
||||||
|
return float64(idbm().IndexBlocksCacheSizeBytes)
|
||||||
|
})
|
||||||
metrics.NewGauge(`vm_cache_size_bytes{type="storage/date_metricID"}`, func() float64 {
|
metrics.NewGauge(`vm_cache_size_bytes{type="storage/date_metricID"}`, func() float64 {
|
||||||
return float64(m().DateMetricIDCacheSizeBytes)
|
return float64(m().DateMetricIDCacheSizeBytes)
|
||||||
})
|
})
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding"
|
||||||
@ -28,6 +29,10 @@ type inmemoryBlock struct {
|
|||||||
items byteSliceSorter
|
items byteSliceSorter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ib *inmemoryBlock) SizeBytes() int {
|
||||||
|
return int(unsafe.Sizeof(*ib)) + cap(ib.commonPrefix) + cap(ib.data) + cap(ib.items)*int(unsafe.Sizeof([]byte{}))
|
||||||
|
}
|
||||||
|
|
||||||
func (ib *inmemoryBlock) Reset() {
|
func (ib *inmemoryBlock) Reset() {
|
||||||
ib.commonPrefix = ib.commonPrefix[:0]
|
ib.commonPrefix = ib.commonPrefix[:0]
|
||||||
ib.data = ib.data[:0]
|
ib.data = ib.data[:0]
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream"
|
||||||
@ -136,6 +137,10 @@ type indexBlock struct {
|
|||||||
bhs []blockHeader
|
bhs []blockHeader
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (idxb *indexBlock) SizeBytes() int {
|
||||||
|
return cap(idxb.bhs) * int(unsafe.Sizeof(blockHeader{}))
|
||||||
|
}
|
||||||
|
|
||||||
func getIndexBlock() *indexBlock {
|
func getIndexBlock() *indexBlock {
|
||||||
v := indexBlockPool.Get()
|
v := indexBlockPool.Get()
|
||||||
if v == nil {
|
if v == nil {
|
||||||
@ -200,7 +205,7 @@ func (idxbc *indexBlockCache) MustClose() {
|
|||||||
|
|
||||||
// cleaner periodically cleans least recently used items.
|
// cleaner periodically cleans least recently used items.
|
||||||
func (idxbc *indexBlockCache) cleaner() {
|
func (idxbc *indexBlockCache) cleaner() {
|
||||||
ticker := time.NewTicker(5 * time.Second)
|
ticker := time.NewTicker(30 * time.Second)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -216,8 +221,8 @@ func (idxbc *indexBlockCache) cleanByTimeout() {
|
|||||||
currentTime := fasttime.UnixTimestamp()
|
currentTime := fasttime.UnixTimestamp()
|
||||||
idxbc.mu.Lock()
|
idxbc.mu.Lock()
|
||||||
for k, idxbe := range idxbc.m {
|
for k, idxbe := range idxbc.m {
|
||||||
// Delete items accessed more than a minute ago.
|
// Delete items accessed more than two minutes ago.
|
||||||
if currentTime-atomic.LoadUint64(&idxbe.lastAccessTime) > 60 {
|
if currentTime-atomic.LoadUint64(&idxbe.lastAccessTime) > 2*60 {
|
||||||
delete(idxbc.m, k)
|
delete(idxbc.m, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -276,6 +281,16 @@ func (idxbc *indexBlockCache) Len() uint64 {
|
|||||||
return uint64(n)
|
return uint64(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (idxbc *indexBlockCache) SizeBytes() uint64 {
|
||||||
|
n := 0
|
||||||
|
idxbc.mu.RLock()
|
||||||
|
for _, e := range idxbc.m {
|
||||||
|
n += e.idxb.SizeBytes()
|
||||||
|
}
|
||||||
|
idxbc.mu.RUnlock()
|
||||||
|
return uint64(n)
|
||||||
|
}
|
||||||
|
|
||||||
func (idxbc *indexBlockCache) Requests() uint64 {
|
func (idxbc *indexBlockCache) Requests() uint64 {
|
||||||
return atomic.LoadUint64(&idxbc.requests)
|
return atomic.LoadUint64(&idxbc.requests)
|
||||||
}
|
}
|
||||||
@ -347,7 +362,7 @@ func (ibc *inmemoryBlockCache) MustClose() {
|
|||||||
|
|
||||||
// cleaner periodically cleans least recently used items.
|
// cleaner periodically cleans least recently used items.
|
||||||
func (ibc *inmemoryBlockCache) cleaner() {
|
func (ibc *inmemoryBlockCache) cleaner() {
|
||||||
ticker := time.NewTicker(5 * time.Second)
|
ticker := time.NewTicker(30 * time.Second)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -363,8 +378,10 @@ func (ibc *inmemoryBlockCache) cleanByTimeout() {
|
|||||||
currentTime := fasttime.UnixTimestamp()
|
currentTime := fasttime.UnixTimestamp()
|
||||||
ibc.mu.Lock()
|
ibc.mu.Lock()
|
||||||
for k, ibe := range ibc.m {
|
for k, ibe := range ibc.m {
|
||||||
// Delete items accessed more than a minute ago.
|
// Delete items accessed more than a two minutes ago.
|
||||||
if currentTime-atomic.LoadUint64(&ibe.lastAccessTime) > 60 {
|
if currentTime-atomic.LoadUint64(&ibe.lastAccessTime) > 2*60 {
|
||||||
|
// do not call putInmemoryBlock(ibc.m[k]), since it
|
||||||
|
// may be used by concurrent goroutines.
|
||||||
delete(ibc.m, k)
|
delete(ibc.m, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -424,6 +441,16 @@ func (ibc *inmemoryBlockCache) Len() uint64 {
|
|||||||
return uint64(n)
|
return uint64(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ibc *inmemoryBlockCache) SizeBytes() uint64 {
|
||||||
|
n := 0
|
||||||
|
ibc.mu.RLock()
|
||||||
|
for _, e := range ibc.m {
|
||||||
|
n += e.ib.SizeBytes()
|
||||||
|
}
|
||||||
|
ibc.mu.RUnlock()
|
||||||
|
return uint64(n)
|
||||||
|
}
|
||||||
|
|
||||||
func (ibc *inmemoryBlockCache) Requests() uint64 {
|
func (ibc *inmemoryBlockCache) Requests() uint64 {
|
||||||
return atomic.LoadUint64(&ibc.requests)
|
return atomic.LoadUint64(&ibc.requests)
|
||||||
}
|
}
|
||||||
|
@ -294,13 +294,15 @@ type TableMetrics struct {
|
|||||||
ItemsCount uint64
|
ItemsCount uint64
|
||||||
SizeBytes uint64
|
SizeBytes uint64
|
||||||
|
|
||||||
DataBlocksCacheSize uint64
|
DataBlocksCacheSize uint64
|
||||||
DataBlocksCacheRequests uint64
|
DataBlocksCacheSizeBytes uint64
|
||||||
DataBlocksCacheMisses uint64
|
DataBlocksCacheRequests uint64
|
||||||
|
DataBlocksCacheMisses uint64
|
||||||
|
|
||||||
IndexBlocksCacheSize uint64
|
IndexBlocksCacheSize uint64
|
||||||
IndexBlocksCacheRequests uint64
|
IndexBlocksCacheSizeBytes uint64
|
||||||
IndexBlocksCacheMisses uint64
|
IndexBlocksCacheRequests uint64
|
||||||
|
IndexBlocksCacheMisses uint64
|
||||||
|
|
||||||
PartsRefCount uint64
|
PartsRefCount uint64
|
||||||
}
|
}
|
||||||
@ -328,10 +330,12 @@ func (tb *Table) UpdateMetrics(m *TableMetrics) {
|
|||||||
m.SizeBytes += p.size
|
m.SizeBytes += p.size
|
||||||
|
|
||||||
m.DataBlocksCacheSize += p.ibCache.Len()
|
m.DataBlocksCacheSize += p.ibCache.Len()
|
||||||
|
m.DataBlocksCacheSizeBytes += p.ibCache.SizeBytes()
|
||||||
m.DataBlocksCacheRequests += p.ibCache.Requests()
|
m.DataBlocksCacheRequests += p.ibCache.Requests()
|
||||||
m.DataBlocksCacheMisses += p.ibCache.Misses()
|
m.DataBlocksCacheMisses += p.ibCache.Misses()
|
||||||
|
|
||||||
m.IndexBlocksCacheSize += p.idxbCache.Len()
|
m.IndexBlocksCacheSize += p.idxbCache.Len()
|
||||||
|
m.IndexBlocksCacheSizeBytes += p.idxbCache.SizeBytes()
|
||||||
m.IndexBlocksCacheRequests += p.idxbCache.Requests()
|
m.IndexBlocksCacheRequests += p.idxbCache.Requests()
|
||||||
m.IndexBlocksCacheMisses += p.idxbCache.Misses()
|
m.IndexBlocksCacheMisses += p.idxbCache.Misses()
|
||||||
|
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream"
|
||||||
@ -140,6 +141,10 @@ type indexBlock struct {
|
|||||||
bhs []blockHeader
|
bhs []blockHeader
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (idxb *indexBlock) SizeBytes() int {
|
||||||
|
return cap(idxb.bhs) * int(unsafe.Sizeof(blockHeader{}))
|
||||||
|
}
|
||||||
|
|
||||||
func getIndexBlock() *indexBlock {
|
func getIndexBlock() *indexBlock {
|
||||||
v := indexBlockPool.Get()
|
v := indexBlockPool.Get()
|
||||||
if v == nil {
|
if v == nil {
|
||||||
@ -204,7 +209,7 @@ func (ibc *indexBlockCache) MustClose(isBig bool) {
|
|||||||
|
|
||||||
// cleaner periodically cleans least recently used items.
|
// cleaner periodically cleans least recently used items.
|
||||||
func (ibc *indexBlockCache) cleaner() {
|
func (ibc *indexBlockCache) cleaner() {
|
||||||
ticker := time.NewTicker(5 * time.Second)
|
ticker := time.NewTicker(30 * time.Second)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
@ -220,8 +225,8 @@ func (ibc *indexBlockCache) cleanByTimeout() {
|
|||||||
currentTime := fasttime.UnixTimestamp()
|
currentTime := fasttime.UnixTimestamp()
|
||||||
ibc.mu.Lock()
|
ibc.mu.Lock()
|
||||||
for k, ibe := range ibc.m {
|
for k, ibe := range ibc.m {
|
||||||
// Delete items accessed more than a minute ago.
|
// Delete items accessed more than two minutes ago.
|
||||||
if currentTime-atomic.LoadUint64(&ibe.lastAccessTime) > 60 {
|
if currentTime-atomic.LoadUint64(&ibe.lastAccessTime) > 2*60 {
|
||||||
delete(ibc.m, k)
|
delete(ibc.m, k)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -286,3 +291,13 @@ func (ibc *indexBlockCache) Len() uint64 {
|
|||||||
ibc.mu.Unlock()
|
ibc.mu.Unlock()
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ibc *indexBlockCache) SizeBytes() uint64 {
|
||||||
|
n := 0
|
||||||
|
ibc.mu.Lock()
|
||||||
|
for _, e := range ibc.m {
|
||||||
|
n += e.ib.SizeBytes()
|
||||||
|
}
|
||||||
|
ibc.mu.Unlock()
|
||||||
|
return uint64(n)
|
||||||
|
}
|
||||||
|
@ -307,13 +307,15 @@ func newPartition(name, smallPartsPath, bigPartsPath string, getDeletedMetricIDs
|
|||||||
type partitionMetrics struct {
|
type partitionMetrics struct {
|
||||||
PendingRows uint64
|
PendingRows uint64
|
||||||
|
|
||||||
BigIndexBlocksCacheSize uint64
|
BigIndexBlocksCacheSize uint64
|
||||||
BigIndexBlocksCacheRequests uint64
|
BigIndexBlocksCacheSizeBytes uint64
|
||||||
BigIndexBlocksCacheMisses uint64
|
BigIndexBlocksCacheRequests uint64
|
||||||
|
BigIndexBlocksCacheMisses uint64
|
||||||
|
|
||||||
SmallIndexBlocksCacheSize uint64
|
SmallIndexBlocksCacheSize uint64
|
||||||
SmallIndexBlocksCacheRequests uint64
|
SmallIndexBlocksCacheSizeBytes uint64
|
||||||
SmallIndexBlocksCacheMisses uint64
|
SmallIndexBlocksCacheRequests uint64
|
||||||
|
SmallIndexBlocksCacheMisses uint64
|
||||||
|
|
||||||
BigSizeBytes uint64
|
BigSizeBytes uint64
|
||||||
SmallSizeBytes uint64
|
SmallSizeBytes uint64
|
||||||
@ -360,6 +362,7 @@ func (pt *partition) UpdateMetrics(m *partitionMetrics) {
|
|||||||
p := pw.p
|
p := pw.p
|
||||||
|
|
||||||
m.BigIndexBlocksCacheSize += p.ibCache.Len()
|
m.BigIndexBlocksCacheSize += p.ibCache.Len()
|
||||||
|
m.BigIndexBlocksCacheSizeBytes += p.ibCache.SizeBytes()
|
||||||
m.BigIndexBlocksCacheRequests += p.ibCache.Requests()
|
m.BigIndexBlocksCacheRequests += p.ibCache.Requests()
|
||||||
m.BigIndexBlocksCacheMisses += p.ibCache.Misses()
|
m.BigIndexBlocksCacheMisses += p.ibCache.Misses()
|
||||||
m.BigRowsCount += p.ph.RowsCount
|
m.BigRowsCount += p.ph.RowsCount
|
||||||
@ -372,6 +375,7 @@ func (pt *partition) UpdateMetrics(m *partitionMetrics) {
|
|||||||
p := pw.p
|
p := pw.p
|
||||||
|
|
||||||
m.SmallIndexBlocksCacheSize += p.ibCache.Len()
|
m.SmallIndexBlocksCacheSize += p.ibCache.Len()
|
||||||
|
m.SmallIndexBlocksCacheSizeBytes += p.ibCache.SizeBytes()
|
||||||
m.SmallIndexBlocksCacheRequests += p.ibCache.Requests()
|
m.SmallIndexBlocksCacheRequests += p.ibCache.Requests()
|
||||||
m.SmallIndexBlocksCacheMisses += p.ibCache.Misses()
|
m.SmallIndexBlocksCacheMisses += p.ibCache.Misses()
|
||||||
m.SmallRowsCount += p.ph.RowsCount
|
m.SmallRowsCount += p.ph.RowsCount
|
||||||
|
Loading…
Reference in New Issue
Block a user