2019-05-22 23:23:23 +02:00
|
|
|
package main
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"flag"
|
|
|
|
"fmt"
|
|
|
|
"net/http"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2019-05-22 23:23:23 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/transport"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
|
2019-11-12 15:29:43 +01:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
|
2019-05-22 23:16:55 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
|
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
|
2019-05-22 23:23:23 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
|
2019-05-22 23:16:55 +02:00
|
|
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
|
|
|
|
"github.com/VictoriaMetrics/metrics"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2019-05-22 23:23:23 +02:00
|
|
|
httpListenAddr = flag.String("httpListenAddr", ":8482", "Address to listen for http connections")
|
2019-05-22 23:16:55 +02:00
|
|
|
retentionPeriod = flag.Int("retentionPeriod", 1, "Retention period in months")
|
2019-05-22 23:23:23 +02:00
|
|
|
storageDataPath = flag.String("storageDataPath", "vmstorage-data", "Path to storage data")
|
|
|
|
vminsertAddr = flag.String("vminsertAddr", ":8400", "TCP address to accept connections from vminsert services")
|
|
|
|
vmselectAddr = flag.String("vmselectAddr", ":8401", "TCP address to accept connections from vmselect services")
|
2019-05-22 23:16:55 +02:00
|
|
|
snapshotAuthKey = flag.String("snapshotAuthKey", "", "authKey, which must be passed in query string to /snapshot* pages")
|
2019-10-31 15:16:53 +01:00
|
|
|
|
|
|
|
bigMergeConcurrency = flag.Int("bigMergeConcurrency", 0, "The maximum number of CPU cores to use for big merges. Default value is used if set to 0")
|
|
|
|
smallMergeConcurrency = flag.Int("smallMergeConcurrency", 0, "The maximum number of CPU cores to use for small merges. Default value is used if set to 0")
|
2019-05-22 23:16:55 +02:00
|
|
|
)
|
|
|
|
|
2019-05-22 23:23:23 +02:00
|
|
|
func main() {
|
|
|
|
flag.Parse()
|
|
|
|
buildinfo.Init()
|
|
|
|
logger.Init()
|
|
|
|
|
2019-10-31 15:16:53 +01:00
|
|
|
storage.SetBigMergeWorkersCount(*bigMergeConcurrency)
|
|
|
|
storage.SetSmallMergeWorkersCount(*smallMergeConcurrency)
|
|
|
|
|
2019-05-22 23:23:23 +02:00
|
|
|
logger.Infof("opening storage at %q with retention period %d months", *storageDataPath, *retentionPeriod)
|
2019-05-22 23:16:55 +02:00
|
|
|
startTime := time.Now()
|
2019-05-22 23:23:23 +02:00
|
|
|
strg, err := storage.OpenStorage(*storageDataPath, *retentionPeriod)
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
2019-05-22 23:23:23 +02:00
|
|
|
logger.Fatalf("cannot open a storage at %s with retention period %d months: %s", *storageDataPath, *retentionPeriod, err)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
var m storage.Metrics
|
2019-05-22 23:23:23 +02:00
|
|
|
strg.UpdateMetrics(&m)
|
2019-05-22 23:16:55 +02:00
|
|
|
tm := &m.TableMetrics
|
|
|
|
partsCount := tm.SmallPartsCount + tm.BigPartsCount
|
|
|
|
blocksCount := tm.SmallBlocksCount + tm.BigBlocksCount
|
|
|
|
rowsCount := tm.SmallRowsCount + tm.BigRowsCount
|
2019-07-04 18:09:40 +02:00
|
|
|
sizeBytes := tm.SmallSizeBytes + tm.BigSizeBytes
|
|
|
|
logger.Infof("successfully opened storage %q in %s; partsCount: %d; blocksCount: %d; rowsCount: %d; sizeBytes: %d",
|
|
|
|
*storageDataPath, time.Since(startTime), partsCount, blocksCount, rowsCount, sizeBytes)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2019-05-22 23:23:23 +02:00
|
|
|
registerStorageMetrics(strg)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2019-05-22 23:23:23 +02:00
|
|
|
srv, err := transport.NewServer(*vminsertAddr, *vmselectAddr, strg)
|
|
|
|
if err != nil {
|
|
|
|
logger.Fatalf("cannot create a server with vminsertAddr=%s, vmselectAddr=%s: %s", *vminsertAddr, *vmselectAddr, err)
|
|
|
|
}
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2019-05-22 23:23:23 +02:00
|
|
|
go srv.RunVMInsert()
|
|
|
|
go srv.RunVMSelect()
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2019-05-22 23:23:23 +02:00
|
|
|
requestHandler := newRequestHandler(strg)
|
|
|
|
go func() {
|
|
|
|
httpserver.Serve(*httpListenAddr, requestHandler)
|
|
|
|
}()
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2019-05-22 23:23:23 +02:00
|
|
|
sig := procutil.WaitForSigterm()
|
|
|
|
logger.Infof("service received signal %s", sig)
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2019-05-22 23:23:23 +02:00
|
|
|
logger.Infof("gracefully shutting down the service")
|
|
|
|
startTime = time.Now()
|
|
|
|
srv.MustClose()
|
|
|
|
logger.Infof("successfully shut down the service in %s", time.Since(startTime))
|
2019-05-22 23:16:55 +02:00
|
|
|
|
2019-05-22 23:23:23 +02:00
|
|
|
logger.Infof("gracefully closing the storage at %s", *storageDataPath)
|
|
|
|
startTime = time.Now()
|
|
|
|
strg.MustClose()
|
2019-05-22 23:16:55 +02:00
|
|
|
logger.Infof("successfully closed the storage in %s", time.Since(startTime))
|
|
|
|
|
2019-11-12 15:29:43 +01:00
|
|
|
fs.MustStopDirRemover()
|
|
|
|
|
2019-05-22 23:23:23 +02:00
|
|
|
logger.Infof("the vmstorage has been stopped")
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
|
|
|
|
2019-05-22 23:23:23 +02:00
|
|
|
func newRequestHandler(strg *storage.Storage) httpserver.RequestHandler {
|
|
|
|
return func(w http.ResponseWriter, r *http.Request) bool {
|
|
|
|
return requestHandler(w, r, strg)
|
2019-05-22 23:16:55 +02:00
|
|
|
}
|
2019-05-22 23:23:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func requestHandler(w http.ResponseWriter, r *http.Request, strg *storage.Storage) bool {
|
|
|
|
path := r.URL.Path
|
2019-05-22 23:16:55 +02:00
|
|
|
if !strings.HasPrefix(path, "/snapshot") {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
authKey := r.FormValue("authKey")
|
|
|
|
if authKey != *snapshotAuthKey {
|
|
|
|
httpserver.Errorf(w, "invalid authKey %q. It must match the value from -snapshotAuthKey command line flag", authKey)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
path = path[len("/snapshot"):]
|
|
|
|
|
|
|
|
switch path {
|
|
|
|
case "/create":
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
2019-05-22 23:23:23 +02:00
|
|
|
snapshotPath, err := strg.CreateSnapshot()
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
|
|
|
msg := fmt.Sprintf("cannot create snapshot: %s", err)
|
|
|
|
logger.Errorf("%s", msg)
|
|
|
|
fmt.Fprintf(w, `{"status":"error","msg":%q}`, msg)
|
|
|
|
return true
|
|
|
|
}
|
2019-05-22 23:23:23 +02:00
|
|
|
fmt.Fprintf(w, `{"status":"ok","snapshot":%q}`, snapshotPath)
|
2019-05-22 23:16:55 +02:00
|
|
|
return true
|
|
|
|
case "/list":
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
2019-05-22 23:23:23 +02:00
|
|
|
snapshots, err := strg.ListSnapshots()
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
|
|
|
msg := fmt.Sprintf("cannot list snapshots: %s", err)
|
|
|
|
logger.Errorf("%s", msg)
|
|
|
|
fmt.Fprintf(w, `{"status":"error","msg":%q}`, msg)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `{"status":"ok","snapshots":[`)
|
|
|
|
if len(snapshots) > 0 {
|
|
|
|
for _, snapshot := range snapshots[:len(snapshots)-1] {
|
|
|
|
fmt.Fprintf(w, "\n%q,", snapshot)
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, "\n%q\n", snapshots[len(snapshots)-1])
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `]}`)
|
|
|
|
return true
|
|
|
|
case "/delete":
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
|
|
|
snapshotName := r.FormValue("snapshot")
|
2019-05-22 23:23:23 +02:00
|
|
|
if err := strg.DeleteSnapshot(snapshotName); err != nil {
|
2019-05-22 23:16:55 +02:00
|
|
|
msg := fmt.Sprintf("cannot delete snapshot %q: %s", snapshotName, err)
|
|
|
|
logger.Errorf("%s", msg)
|
|
|
|
fmt.Fprintf(w, `{"status":"error","msg":%q}`, msg)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `{"status":"ok"}`)
|
|
|
|
return true
|
|
|
|
case "/delete_all":
|
|
|
|
w.Header().Set("Content-Type", "application/json")
|
2019-05-22 23:23:23 +02:00
|
|
|
snapshots, err := strg.ListSnapshots()
|
2019-05-22 23:16:55 +02:00
|
|
|
if err != nil {
|
|
|
|
msg := fmt.Sprintf("cannot list snapshots: %s", err)
|
|
|
|
logger.Errorf("%s", msg)
|
|
|
|
fmt.Fprintf(w, `{"status":"error","msg":%q}`, msg)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
for _, snapshotName := range snapshots {
|
2019-05-22 23:23:23 +02:00
|
|
|
if err := strg.DeleteSnapshot(snapshotName); err != nil {
|
2019-05-22 23:16:55 +02:00
|
|
|
msg := fmt.Sprintf("cannot delete snapshot %q: %s", snapshotName, err)
|
|
|
|
logger.Errorf("%s", msg)
|
|
|
|
fmt.Fprintf(w, `{"status":"error","msg":%q}`, msg)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fmt.Fprintf(w, `{"status":"ok"}`)
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func registerStorageMetrics(strg *storage.Storage) {
|
|
|
|
mCache := &storage.Metrics{}
|
|
|
|
var mCacheLock sync.Mutex
|
|
|
|
var lastUpdateTime time.Time
|
|
|
|
|
|
|
|
m := func() *storage.Metrics {
|
|
|
|
mCacheLock.Lock()
|
|
|
|
defer mCacheLock.Unlock()
|
|
|
|
if time.Since(lastUpdateTime) < time.Second {
|
|
|
|
return mCache
|
|
|
|
}
|
|
|
|
var mc storage.Metrics
|
|
|
|
strg.UpdateMetrics(&mc)
|
|
|
|
mCache = &mc
|
|
|
|
lastUpdateTime = time.Now()
|
|
|
|
return mCache
|
|
|
|
}
|
|
|
|
tm := func() *storage.TableMetrics {
|
|
|
|
sm := m()
|
|
|
|
return &sm.TableMetrics
|
|
|
|
}
|
|
|
|
idbm := func() *storage.IndexDBMetrics {
|
|
|
|
sm := m()
|
|
|
|
return &sm.IndexDBMetrics
|
|
|
|
}
|
|
|
|
|
|
|
|
metrics.NewGauge(`vm_active_merges{type="storage/big"}`, func() float64 {
|
|
|
|
return float64(tm().ActiveBigMerges)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_active_merges{type="storage/small"}`, func() float64 {
|
|
|
|
return float64(tm().ActiveSmallMerges)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_active_merges{type="indexdb"}`, func() float64 {
|
|
|
|
return float64(idbm().ActiveMerges)
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics.NewGauge(`vm_merges_total{type="storage/big"}`, func() float64 {
|
|
|
|
return float64(tm().BigMergesCount)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_merges_total{type="storage/small"}`, func() float64 {
|
|
|
|
return float64(tm().SmallMergesCount)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_merges_total{type="indexdb"}`, func() float64 {
|
|
|
|
return float64(idbm().MergesCount)
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics.NewGauge(`vm_rows_merged_total{type="storage/big"}`, func() float64 {
|
|
|
|
return float64(tm().BigRowsMerged)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_rows_merged_total{type="storage/small"}`, func() float64 {
|
|
|
|
return float64(tm().SmallRowsMerged)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_rows_merged_total{type="indexdb"}`, func() float64 {
|
|
|
|
return float64(idbm().ItemsMerged)
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics.NewGauge(`vm_rows_deleted_total{type="storage/big"}`, func() float64 {
|
|
|
|
return float64(tm().BigRowsDeleted)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_rows_deleted_total{type="storage/small"}`, func() float64 {
|
|
|
|
return float64(tm().SmallRowsDeleted)
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics.NewGauge(`vm_references{type="storage/big", name="parts"}`, func() float64 {
|
|
|
|
return float64(tm().BigPartsRefCount)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_references{type="storage/small", name="parts"}`, func() float64 {
|
|
|
|
return float64(tm().SmallPartsRefCount)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_references{type="storage", name="partitions"}`, func() float64 {
|
|
|
|
return float64(tm().PartitionsRefCount)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_references{type="indexdb", name="objects"}`, func() float64 {
|
|
|
|
return float64(idbm().IndexDBRefCount)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_references{type="indexdb", name="parts"}`, func() float64 {
|
|
|
|
return float64(idbm().PartsRefCount)
|
|
|
|
})
|
|
|
|
|
2019-11-08 18:57:57 +01:00
|
|
|
metrics.NewGauge(`vm_new_timeseries_created_total`, func() float64 {
|
|
|
|
return float64(idbm().NewTimeseriesCreated)
|
|
|
|
})
|
2019-05-22 23:16:55 +02:00
|
|
|
metrics.NewGauge(`vm_missing_tsids_for_metric_id_total`, func() float64 {
|
|
|
|
return float64(idbm().MissingTSIDsForMetricID)
|
|
|
|
})
|
2019-06-09 21:11:09 +02:00
|
|
|
metrics.NewGauge(`vm_recent_hour_metric_ids_search_calls_total`, func() float64 {
|
2019-06-09 18:06:53 +02:00
|
|
|
return float64(idbm().RecentHourMetricIDsSearchCalls)
|
|
|
|
})
|
2019-06-09 21:11:09 +02:00
|
|
|
metrics.NewGauge(`vm_recent_hour_metric_ids_search_hits_total`, func() float64 {
|
2019-06-09 18:06:53 +02:00
|
|
|
return float64(idbm().RecentHourMetricIDsSearchHits)
|
|
|
|
})
|
2019-06-09 21:11:09 +02:00
|
|
|
metrics.NewGauge(`vm_date_metric_ids_search_calls_total`, func() float64 {
|
2019-06-09 18:06:53 +02:00
|
|
|
return float64(idbm().DateMetricIDsSearchCalls)
|
|
|
|
})
|
2019-06-09 21:11:09 +02:00
|
|
|
metrics.NewGauge(`vm_date_metric_ids_search_hits_total`, func() float64 {
|
2019-06-09 18:06:53 +02:00
|
|
|
return float64(idbm().DateMetricIDsSearchHits)
|
|
|
|
})
|
2019-11-06 13:24:48 +01:00
|
|
|
metrics.NewGauge(`vm_index_blocks_with_metric_ids_processed_total`, func() float64 {
|
|
|
|
return float64(idbm().IndexBlocksWithMetricIDsProcessed)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_index_blocks_with_metric_ids_incorrect_order_total`, func() float64 {
|
|
|
|
return float64(idbm().IndexBlocksWithMetricIDsIncorrectOrder)
|
|
|
|
})
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
metrics.NewGauge(`vm_assisted_merges_total{type="storage/small"}`, func() float64 {
|
|
|
|
return float64(tm().SmallAssistedMerges)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_assisted_merges_total{type="indexdb"}`, func() float64 {
|
|
|
|
return float64(idbm().AssistedMerges)
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics.NewGauge(`vm_pending_rows{type="storage"}`, func() float64 {
|
|
|
|
return float64(tm().PendingRows)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_pending_rows{type="indexdb"}`, func() float64 {
|
|
|
|
return float64(idbm().PendingItems)
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics.NewGauge(`vm_parts{type="storage/big"}`, func() float64 {
|
|
|
|
return float64(tm().BigPartsCount)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_parts{type="storage/small"}`, func() float64 {
|
|
|
|
return float64(tm().SmallPartsCount)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_parts{type="indexdb"}`, func() float64 {
|
|
|
|
return float64(idbm().PartsCount)
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics.NewGauge(`vm_blocks{type="storage/big"}`, func() float64 {
|
|
|
|
return float64(tm().BigBlocksCount)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_blocks{type="storage/small"}`, func() float64 {
|
|
|
|
return float64(tm().SmallBlocksCount)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_blocks{type="indexdb"}`, func() float64 {
|
|
|
|
return float64(idbm().BlocksCount)
|
|
|
|
})
|
|
|
|
|
2019-07-04 18:09:40 +02:00
|
|
|
metrics.NewGauge(`vm_data_size_bytes{type="storage/big"}`, func() float64 {
|
|
|
|
return float64(tm().BigSizeBytes)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_data_size_bytes{type="storage/small"}`, func() float64 {
|
|
|
|
return float64(tm().SmallSizeBytes)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_data_size_bytes{type="indexdb"}`, func() float64 {
|
|
|
|
return float64(idbm().SizeBytes)
|
|
|
|
})
|
|
|
|
|
2019-07-26 19:00:35 +02:00
|
|
|
metrics.NewGauge(`vm_rows_ignored_total{reason="big_timestamp"}`, func() float64 {
|
2019-07-26 13:10:25 +02:00
|
|
|
return float64(m().TooBigTimestampRows)
|
|
|
|
})
|
2019-07-26 19:00:35 +02:00
|
|
|
metrics.NewGauge(`vm_rows_ignored_total{reason="small_timestamp"}`, func() float64 {
|
2019-07-26 13:10:25 +02:00
|
|
|
return float64(m().TooSmallTimestampRows)
|
|
|
|
})
|
|
|
|
|
2019-08-06 13:09:17 +02:00
|
|
|
metrics.NewGauge(`vm_concurrent_addrows_limit_reached_total`, func() float64 {
|
|
|
|
return float64(m().AddRowsConcurrencyLimitReached)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_concurrent_addrows_limit_timeout_total`, func() float64 {
|
|
|
|
return float64(m().AddRowsConcurrencyLimitTimeout)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_concurrent_addrows_dropped_rows_total`, func() float64 {
|
|
|
|
return float64(m().AddRowsConcurrencyDroppedRows)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_concurrent_addrows_capacity`, func() float64 {
|
|
|
|
return float64(m().AddRowsConcurrencyCapacity)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_concurrent_addrows_current`, func() float64 {
|
|
|
|
return float64(m().AddRowsConcurrencyCurrent)
|
|
|
|
})
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
metrics.NewGauge(`vm_rows{type="storage/big"}`, func() float64 {
|
|
|
|
return float64(tm().BigRowsCount)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_rows{type="storage/small"}`, func() float64 {
|
|
|
|
return float64(tm().SmallRowsCount)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_rows{type="indexdb"}`, func() float64 {
|
|
|
|
return float64(idbm().ItemsCount)
|
|
|
|
})
|
|
|
|
|
2019-11-08 12:16:40 +01:00
|
|
|
metrics.NewGauge(`vm_recent_hour_inverted_index_entries`, func() float64 {
|
|
|
|
return float64(m().RecentHourInvertedIndexSize)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_recent_hour_inverted_index_unique_tag_pairs`, func() float64 {
|
|
|
|
return float64(m().RecentHourInvertedIndexUniqueTagPairsSize)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_recent_hour_inverted_index_pending_metric_ids`, func() float64 {
|
|
|
|
return float64(m().RecentHourInvertedIndexPendingMetricIDsSize)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_recent_hour_inverted_index_search_calls_total`, func() float64 {
|
|
|
|
return float64(idbm().RecentHourInvertedIndexSearchCalls)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_recent_hour_inverted_index_search_hits_total`, func() float64 {
|
|
|
|
return float64(idbm().RecentHourInvertedIndexSearchHits)
|
|
|
|
})
|
|
|
|
|
2019-11-09 22:17:42 +01:00
|
|
|
metrics.NewGauge(`vm_date_range_search_calls_total`, func() float64 {
|
|
|
|
return float64(idbm().DateRangeSearchCalls)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_date_range_hits_total`, func() float64 {
|
|
|
|
return float64(idbm().DateRangeSearchHits)
|
|
|
|
})
|
|
|
|
|
2019-11-11 12:21:05 +01:00
|
|
|
metrics.NewGauge(`vm_date_metric_id_cache_syncs_total`, func() float64 {
|
|
|
|
return float64(m().DateMetricIDCacheSyncsCount)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_date_metric_id_cache_resets_total`, func() float64 {
|
|
|
|
return float64(m().DateMetricIDCacheResetsCount)
|
|
|
|
})
|
|
|
|
|
2019-05-22 23:16:55 +02:00
|
|
|
metrics.NewGauge(`vm_cache_entries{type="storage/tsid"}`, func() float64 {
|
|
|
|
return float64(m().TSIDCacheSize)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_entries{type="storage/metricIDs"}`, func() float64 {
|
|
|
|
return float64(m().MetricIDCacheSize)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_entries{type="storage/metricName"}`, func() float64 {
|
|
|
|
return float64(m().MetricNameCacheSize)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_entries{type="storage/date_metricID"}`, func() float64 {
|
|
|
|
return float64(m().DateMetricIDCacheSize)
|
|
|
|
})
|
2019-06-19 17:36:47 +02:00
|
|
|
metrics.NewGauge(`vm_cache_entries{type="storage/hour_metric_ids"}`, func() float64 {
|
|
|
|
return float64(m().HourMetricIDCacheSize)
|
|
|
|
})
|
2019-05-22 23:16:55 +02:00
|
|
|
metrics.NewGauge(`vm_cache_entries{type="storage/bigIndexBlocks"}`, func() float64 {
|
|
|
|
return float64(tm().BigIndexBlocksCacheSize)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_entries{type="storage/smallIndexBlocks"}`, func() float64 {
|
|
|
|
return float64(tm().SmallIndexBlocksCacheSize)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_entries{type="indexdb/dataBlocks"}`, func() float64 {
|
|
|
|
return float64(idbm().DataBlocksCacheSize)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_entries{type="indexdb/indexBlocks"}`, func() float64 {
|
|
|
|
return float64(idbm().IndexBlocksCacheSize)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_entries{type="indexdb/tagFilters"}`, func() float64 {
|
|
|
|
return float64(idbm().TagCacheSize)
|
|
|
|
})
|
2019-06-10 13:02:44 +02:00
|
|
|
metrics.NewGauge(`vm_cache_entries{type="indexdb/uselessTagFilters"}`, func() float64 {
|
|
|
|
return float64(idbm().UselessTagFiltersCacheSize)
|
|
|
|
})
|
2019-05-22 23:16:55 +02:00
|
|
|
metrics.NewGauge(`vm_cache_entries{type="storage/regexps"}`, func() float64 {
|
|
|
|
return float64(storage.RegexpCacheSize())
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics.NewGauge(`vm_cache_size_bytes{type="storage/tsid"}`, func() float64 {
|
2019-07-09 23:47:29 +02:00
|
|
|
return float64(m().TSIDCacheSizeBytes)
|
2019-05-22 23:16:55 +02:00
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_size_bytes{type="storage/metricIDs"}`, func() float64 {
|
2019-07-09 23:47:29 +02:00
|
|
|
return float64(m().MetricIDCacheSizeBytes)
|
2019-05-22 23:16:55 +02:00
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_size_bytes{type="storage/metricName"}`, func() float64 {
|
2019-07-09 23:47:29 +02:00
|
|
|
return float64(m().MetricNameCacheSizeBytes)
|
2019-05-22 23:16:55 +02:00
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_size_bytes{type="indexdb/tagFilters"}`, func() float64 {
|
2019-07-09 23:47:29 +02:00
|
|
|
return float64(idbm().TagCacheSizeBytes)
|
2019-05-22 23:16:55 +02:00
|
|
|
})
|
2019-06-10 13:02:44 +02:00
|
|
|
metrics.NewGauge(`vm_cache_size_bytes{type="indexdb/uselessTagFilters"}`, func() float64 {
|
2019-07-09 23:47:29 +02:00
|
|
|
return float64(idbm().UselessTagFiltersCacheSizeBytes)
|
2019-06-10 13:02:44 +02:00
|
|
|
})
|
2019-05-22 23:16:55 +02:00
|
|
|
|
|
|
|
metrics.NewGauge(`vm_cache_requests_total{type="storage/tsid"}`, func() float64 {
|
|
|
|
return float64(m().TSIDCacheRequests)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_requests_total{type="storage/metricIDs"}`, func() float64 {
|
|
|
|
return float64(m().MetricIDCacheRequests)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_requests_total{type="storage/metricName"}`, func() float64 {
|
|
|
|
return float64(m().MetricNameCacheRequests)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_requests_total{type="storage/bigIndexBlocks"}`, func() float64 {
|
|
|
|
return float64(tm().BigIndexBlocksCacheRequests)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_requests_total{type="storage/smallIndexBlocks"}`, func() float64 {
|
|
|
|
return float64(tm().SmallIndexBlocksCacheRequests)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_requests_total{type="indexdb/dataBlocks"}`, func() float64 {
|
|
|
|
return float64(idbm().DataBlocksCacheRequests)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_requests_total{type="indexdb/indexBlocks"}`, func() float64 {
|
|
|
|
return float64(idbm().IndexBlocksCacheRequests)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_requests_total{type="indexdb/tagFilters"}`, func() float64 {
|
|
|
|
return float64(idbm().TagCacheRequests)
|
|
|
|
})
|
2019-06-10 13:02:44 +02:00
|
|
|
metrics.NewGauge(`vm_cache_requests_total{type="indexdb/uselessTagFilters"}`, func() float64 {
|
|
|
|
return float64(idbm().UselessTagFiltersCacheRequests)
|
|
|
|
})
|
2019-05-22 23:16:55 +02:00
|
|
|
metrics.NewGauge(`vm_cache_requests_total{type="storage/regexps"}`, func() float64 {
|
|
|
|
return float64(storage.RegexpCacheRequests())
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics.NewGauge(`vm_cache_misses_total{type="storage/tsid"}`, func() float64 {
|
|
|
|
return float64(m().TSIDCacheMisses)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_misses_total{type="storage/metricIDs"}`, func() float64 {
|
|
|
|
return float64(m().MetricIDCacheMisses)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_misses_total{type="storage/metricName"}`, func() float64 {
|
|
|
|
return float64(m().MetricNameCacheMisses)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_misses_total{type="storage/bigIndexBlocks"}`, func() float64 {
|
|
|
|
return float64(tm().BigIndexBlocksCacheMisses)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_misses_total{type="storage/smallIndexBlocks"}`, func() float64 {
|
|
|
|
return float64(tm().SmallIndexBlocksCacheMisses)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_misses_total{type="indexdb/dataBlocks"}`, func() float64 {
|
|
|
|
return float64(idbm().DataBlocksCacheMisses)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_misses_total{type="indexdb/indexBlocks"}`, func() float64 {
|
|
|
|
return float64(idbm().IndexBlocksCacheMisses)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_misses_total{type="indexdb/tagFilters"}`, func() float64 {
|
|
|
|
return float64(idbm().TagCacheMisses)
|
|
|
|
})
|
2019-06-10 13:02:44 +02:00
|
|
|
metrics.NewGauge(`vm_cache_misses_total{type="indexdb/uselessTagFilters"}`, func() float64 {
|
|
|
|
return float64(idbm().UselessTagFiltersCacheMisses)
|
|
|
|
})
|
2019-05-22 23:16:55 +02:00
|
|
|
metrics.NewGauge(`vm_cache_misses_total{type="storage/regexps"}`, func() float64 {
|
|
|
|
return float64(storage.RegexpCacheMisses())
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics.NewGauge(`vm_deleted_metrics_total{type="indexdb"}`, func() float64 {
|
|
|
|
return float64(idbm().DeletedMetricsCount)
|
|
|
|
})
|
|
|
|
|
|
|
|
metrics.NewGauge(`vm_cache_collisions_total{type="storage/tsid"}`, func() float64 {
|
|
|
|
return float64(m().TSIDCacheCollisions)
|
|
|
|
})
|
|
|
|
metrics.NewGauge(`vm_cache_collisions_total{type="storage/metricName"}`, func() float64 {
|
|
|
|
return float64(m().MetricNameCacheCollisions)
|
|
|
|
})
|
|
|
|
}
|