VictoriaMetrics/app/vmselect/main.go

376 lines
14 KiB
Go
Raw Normal View History

2019-05-22 23:23:23 +02:00
package main
2019-05-22 23:16:55 +02:00
import (
"errors"
2019-05-22 23:16:55 +02:00
"flag"
"fmt"
2019-05-22 23:16:55 +02:00
"net/http"
"os"
2019-05-22 23:16:55 +02:00
"runtime"
"strings"
"time"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/netstorage"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/prometheus"
"github.com/VictoriaMetrics/VictoriaMetrics/app/vmselect/promql"
2019-05-22 23:23:23 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag"
2019-05-22 23:23:23 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
2019-05-22 23:16:55 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
2019-05-22 23:23:23 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/storage"
"github.com/VictoriaMetrics/VictoriaMetrics/lib/timerpool"
2019-05-22 23:16:55 +02:00
"github.com/VictoriaMetrics/metrics"
)
var (
2019-05-22 23:23:23 +02:00
httpListenAddr = flag.String("httpListenAddr", ":8481", "Address to listen for http connections")
cacheDataPath = flag.String("cacheDataPath", "", "Path to directory for cache files. Cache isn't saved if empty")
maxConcurrentRequests = flag.Int("search.maxConcurrentRequests", getDefaultMaxConcurrentRequests(), "The maximum number of concurrent search requests. "+
"It shouldn't be high, since a single request can saturate all the CPU cores. See also -search.maxQueueDuration")
maxQueueDuration = flag.Duration("search.maxQueueDuration", 10*time.Second, "The maximum time the request waits for execution when -search.maxConcurrentRequests limit is reached")
minScrapeInterval = flag.Duration("dedup.minScrapeInterval", 0, "Remove superflouos samples from time series if they are located closer to each other than this duration. "+
"This may be useful for reducing overhead when multiple identically configured Prometheus instances write data to the same VictoriaMetrics. "+
"Deduplication is disabled if the -dedup.minScrapeInterval is 0")
resetCacheAuthKey = flag.String("search.resetCacheAuthKey", "", "Optional authKey for resetting rollup cache via /internal/resetRollupResultCache call")
storageNodes = flagutil.NewArray("storageNode", "Addresses of vmstorage nodes; usage: -storageNode=vmstorage-host1:8401 -storageNode=vmstorage-host2:8401")
2019-05-22 23:16:55 +02:00
)
func getDefaultMaxConcurrentRequests() int {
n := runtime.GOMAXPROCS(-1)
if n <= 4 {
n *= 2
}
if n > 16 {
// A single request can saturate all the CPU cores, so there is no sense
// in allowing higher number of concurrent requests - they will just contend
// for unavailable CPU time.
n = 16
}
return n
}
2019-05-22 23:23:23 +02:00
func main() {
// Write flags and help message to stdout, since it is easier to grep or pipe.
flag.CommandLine.SetOutput(os.Stdout)
envflag.Parse()
2019-05-22 23:23:23 +02:00
buildinfo.Init()
logger.Init()
cgroup.UpdateGOMAXPROCSToCPUQuota()
2019-05-22 23:23:23 +02:00
logger.Infof("starting netstorage at storageNodes %s", *storageNodes)
2019-05-22 23:23:23 +02:00
startTime := time.Now()
storage.SetMinScrapeIntervalForDeduplication(*minScrapeInterval)
2019-06-18 09:26:44 +02:00
if len(*storageNodes) == 0 {
logger.Fatalf("missing -storageNode arg")
2019-05-22 23:23:23 +02:00
}
2019-06-18 09:26:44 +02:00
netstorage.InitStorageNodes(*storageNodes)
logger.Infof("started netstorage in %.3f seconds", time.Since(startTime).Seconds())
2019-05-22 23:23:23 +02:00
if len(*cacheDataPath) > 0 {
tmpDataPath := *cacheDataPath + "/tmp"
fs.RemoveDirContents(tmpDataPath)
netstorage.InitTmpBlocksDir(tmpDataPath)
promql.InitRollupResultCache(*cacheDataPath + "/rollupResult")
} else {
netstorage.InitTmpBlocksDir("")
promql.InitRollupResultCache("")
}
2019-05-22 23:16:55 +02:00
concurrencyCh = make(chan struct{}, *maxConcurrentRequests)
2019-05-22 23:23:23 +02:00
go func() {
httpserver.Serve(*httpListenAddr, requestHandler)
}()
2019-05-22 23:16:55 +02:00
2019-05-22 23:23:23 +02:00
sig := procutil.WaitForSigterm()
logger.Infof("service received signal %s", sig)
app/vmstorage: add missing shutdown for http server on graceful shutdown This could result in the following panic during graceful shutdown when `/metrics` page is requested: http: panic serving 10.101.66.5:57366: runtime error: invalid memory address or nil pointer dereference goroutine 2050 [running]: net/http.(*conn).serve.func1(0xc00ef22000) net/http/server.go:1772 +0x139 panic(0xa0fc00, 0xe91d80) runtime/panic.go:973 +0x3e3 github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache.(*Cache).UpdateStats(0x0, 0xc0000516c8) github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache/cache.go:224 +0x37 github.com/VictoriaMetrics/VictoriaMetrics/lib/storage.(*indexDB).UpdateMetrics(0xc00b931d00, 0xc02c41acf8) github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/index_db.go:258 +0x9f github.com/VictoriaMetrics/VictoriaMetrics/lib/storage.(*Storage).UpdateMetrics(0xc0000bc7e0, 0xc02c41ac00) github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/storage.go:413 +0x4c5 main.registerStorageMetrics.func1(0x0) github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/main.go:186 +0xd9 main.registerStorageMetrics.func3(0xc00008c380) github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/main.go:196 +0x26 main.registerStorageMetrics.func7(0xc) github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/main.go:211 +0x26 github.com/VictoriaMetrics/metrics.(*Gauge).marshalTo(0xc000010148, 0xaa407d, 0x20, 0xb50d60, 0xc005319890) github.com/VictoriaMetrics/metrics@v1.11.2/gauge.go:38 +0x3f github.com/VictoriaMetrics/metrics.(*Set).WritePrometheus(0xc000084300, 0x7fd56809c940, 0xc005319860) github.com/VictoriaMetrics/metrics@v1.11.2/set.go:51 +0x1e1 github.com/VictoriaMetrics/metrics.WritePrometheus(0x7fd56809c940, 0xc005319860, 0xa16f01) github.com/VictoriaMetrics/metrics@v1.11.2/metrics.go:42 +0x41 github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver.writePrometheusMetrics(0x7fd56809c940, 0xc005319860) github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver/metrics.go:16 +0x44 github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver.handlerWrapper(0xb5a120, 0xc005319860, 0xc005018f00, 0xc00002cc90) github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver/httpserver.go:154 +0x58d github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver.gzipHandler.func1(0xb5a120, 0xc005319860, 0xc005018f00) github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver/httpserver.go:119 +0x8e net/http.HandlerFunc.ServeHTTP(0xc00002d110, 0xb5a660, 0xc0044141c0, 0xc005018f00) net/http/server.go:2012 +0x44 net/http.serverHandler.ServeHTTP(0xc004414000, 0xb5a660, 0xc0044141c0, 0xc005018f00) net/http/server.go:2807 +0xa3 net/http.(*conn).serve(0xc00ef22000, 0xb5bf60, 0xc010532080) net/http/server.go:1895 +0x86c created by net/http.(*Server).Serve net/http/server.go:2933 +0x35c
2020-04-02 20:07:59 +02:00
logger.Infof("gracefully shutting down http service at %q", *httpListenAddr)
2019-05-22 23:23:23 +02:00
startTime = time.Now()
if err := httpserver.Stop(*httpListenAddr); err != nil {
app/vmstorage: add missing shutdown for http server on graceful shutdown This could result in the following panic during graceful shutdown when `/metrics` page is requested: http: panic serving 10.101.66.5:57366: runtime error: invalid memory address or nil pointer dereference goroutine 2050 [running]: net/http.(*conn).serve.func1(0xc00ef22000) net/http/server.go:1772 +0x139 panic(0xa0fc00, 0xe91d80) runtime/panic.go:973 +0x3e3 github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache.(*Cache).UpdateStats(0x0, 0xc0000516c8) github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache/cache.go:224 +0x37 github.com/VictoriaMetrics/VictoriaMetrics/lib/storage.(*indexDB).UpdateMetrics(0xc00b931d00, 0xc02c41acf8) github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/index_db.go:258 +0x9f github.com/VictoriaMetrics/VictoriaMetrics/lib/storage.(*Storage).UpdateMetrics(0xc0000bc7e0, 0xc02c41ac00) github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/storage.go:413 +0x4c5 main.registerStorageMetrics.func1(0x0) github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/main.go:186 +0xd9 main.registerStorageMetrics.func3(0xc00008c380) github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/main.go:196 +0x26 main.registerStorageMetrics.func7(0xc) github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/main.go:211 +0x26 github.com/VictoriaMetrics/metrics.(*Gauge).marshalTo(0xc000010148, 0xaa407d, 0x20, 0xb50d60, 0xc005319890) github.com/VictoriaMetrics/metrics@v1.11.2/gauge.go:38 +0x3f github.com/VictoriaMetrics/metrics.(*Set).WritePrometheus(0xc000084300, 0x7fd56809c940, 0xc005319860) github.com/VictoriaMetrics/metrics@v1.11.2/set.go:51 +0x1e1 github.com/VictoriaMetrics/metrics.WritePrometheus(0x7fd56809c940, 0xc005319860, 0xa16f01) github.com/VictoriaMetrics/metrics@v1.11.2/metrics.go:42 +0x41 github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver.writePrometheusMetrics(0x7fd56809c940, 0xc005319860) github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver/metrics.go:16 +0x44 github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver.handlerWrapper(0xb5a120, 0xc005319860, 0xc005018f00, 0xc00002cc90) github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver/httpserver.go:154 +0x58d github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver.gzipHandler.func1(0xb5a120, 0xc005319860, 0xc005018f00) github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver/httpserver.go:119 +0x8e net/http.HandlerFunc.ServeHTTP(0xc00002d110, 0xb5a660, 0xc0044141c0, 0xc005018f00) net/http/server.go:2012 +0x44 net/http.serverHandler.ServeHTTP(0xc004414000, 0xb5a660, 0xc0044141c0, 0xc005018f00) net/http/server.go:2807 +0xa3 net/http.(*conn).serve(0xc00ef22000, 0xb5bf60, 0xc010532080) net/http/server.go:1895 +0x86c created by net/http.(*Server).Serve net/http/server.go:2933 +0x35c
2020-04-02 20:07:59 +02:00
logger.Fatalf("cannot stop http service: %s", err)
2019-05-22 23:23:23 +02:00
}
app/vmstorage: add missing shutdown for http server on graceful shutdown This could result in the following panic during graceful shutdown when `/metrics` page is requested: http: panic serving 10.101.66.5:57366: runtime error: invalid memory address or nil pointer dereference goroutine 2050 [running]: net/http.(*conn).serve.func1(0xc00ef22000) net/http/server.go:1772 +0x139 panic(0xa0fc00, 0xe91d80) runtime/panic.go:973 +0x3e3 github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache.(*Cache).UpdateStats(0x0, 0xc0000516c8) github.com/VictoriaMetrics/VictoriaMetrics/lib/workingsetcache/cache.go:224 +0x37 github.com/VictoriaMetrics/VictoriaMetrics/lib/storage.(*indexDB).UpdateMetrics(0xc00b931d00, 0xc02c41acf8) github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/index_db.go:258 +0x9f github.com/VictoriaMetrics/VictoriaMetrics/lib/storage.(*Storage).UpdateMetrics(0xc0000bc7e0, 0xc02c41ac00) github.com/VictoriaMetrics/VictoriaMetrics/lib/storage/storage.go:413 +0x4c5 main.registerStorageMetrics.func1(0x0) github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/main.go:186 +0xd9 main.registerStorageMetrics.func3(0xc00008c380) github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/main.go:196 +0x26 main.registerStorageMetrics.func7(0xc) github.com/VictoriaMetrics/VictoriaMetrics/app/vmstorage/main.go:211 +0x26 github.com/VictoriaMetrics/metrics.(*Gauge).marshalTo(0xc000010148, 0xaa407d, 0x20, 0xb50d60, 0xc005319890) github.com/VictoriaMetrics/metrics@v1.11.2/gauge.go:38 +0x3f github.com/VictoriaMetrics/metrics.(*Set).WritePrometheus(0xc000084300, 0x7fd56809c940, 0xc005319860) github.com/VictoriaMetrics/metrics@v1.11.2/set.go:51 +0x1e1 github.com/VictoriaMetrics/metrics.WritePrometheus(0x7fd56809c940, 0xc005319860, 0xa16f01) github.com/VictoriaMetrics/metrics@v1.11.2/metrics.go:42 +0x41 github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver.writePrometheusMetrics(0x7fd56809c940, 0xc005319860) github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver/metrics.go:16 +0x44 github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver.handlerWrapper(0xb5a120, 0xc005319860, 0xc005018f00, 0xc00002cc90) github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver/httpserver.go:154 +0x58d github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver.gzipHandler.func1(0xb5a120, 0xc005319860, 0xc005018f00) github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver/httpserver.go:119 +0x8e net/http.HandlerFunc.ServeHTTP(0xc00002d110, 0xb5a660, 0xc0044141c0, 0xc005018f00) net/http/server.go:2012 +0x44 net/http.serverHandler.ServeHTTP(0xc004414000, 0xb5a660, 0xc0044141c0, 0xc005018f00) net/http/server.go:2807 +0xa3 net/http.(*conn).serve(0xc00ef22000, 0xb5bf60, 0xc010532080) net/http/server.go:1895 +0x86c created by net/http.(*Server).Serve net/http/server.go:2933 +0x35c
2020-04-02 20:07:59 +02:00
logger.Infof("successfully shut down http service in %.3f seconds", time.Since(startTime).Seconds())
2019-05-22 23:23:23 +02:00
logger.Infof("shutting down neststorage...")
startTime = time.Now()
netstorage.Stop()
if len(*cacheDataPath) > 0 {
promql.StopRollupResultCache()
}
logger.Infof("successfully stopped netstorage in %.3f seconds", time.Since(startTime).Seconds())
2019-05-22 23:23:23 +02:00
fs.MustStopDirRemover()
2019-05-22 23:23:23 +02:00
logger.Infof("the vmselect has been stopped")
2019-05-22 23:16:55 +02:00
}
2019-05-22 23:23:23 +02:00
var concurrencyCh chan struct{}
var (
concurrencyLimitReached = metrics.NewCounter(`vm_concurrent_select_limit_reached_total`)
concurrencyLimitTimeout = metrics.NewCounter(`vm_concurrent_select_limit_timeout_total`)
_ = metrics.NewGauge(`vm_concurrent_select_capacity`, func() float64 {
return float64(cap(concurrencyCh))
})
_ = metrics.NewGauge(`vm_concurrent_select_current`, func() float64 {
return float64(len(concurrencyCh))
})
)
2019-05-22 23:23:23 +02:00
func requestHandler(w http.ResponseWriter, r *http.Request) bool {
startTime := time.Now()
2019-05-22 23:16:55 +02:00
// Limit the number of concurrent queries.
select {
case concurrencyCh <- struct{}{}:
defer func() { <-concurrencyCh }()
default:
// Sleep for a while until giving up. This should resolve short bursts in requests.
concurrencyLimitReached.Inc()
t := timerpool.Get(*maxQueueDuration)
select {
case concurrencyCh <- struct{}{}:
timerpool.Put(t)
defer func() { <-concurrencyCh }()
case <-t.C:
timerpool.Put(t)
concurrencyLimitTimeout.Inc()
err := &httpserver.ErrorWithStatusCode{
Err: fmt.Errorf("cannot handle more than %d concurrent search requests during %s; possible solutions: "+
"increase `-search.maxQueueDuration`, increase `-search.maxConcurrentRequests`, increase server capacity",
*maxConcurrentRequests, *maxQueueDuration),
StatusCode: http.StatusServiceUnavailable,
}
httpserver.Errorf(w, r, "%s", err)
return true
}
2019-05-22 23:16:55 +02:00
}
path := strings.Replace(r.URL.Path, "//", "/", -1)
if path == "/internal/resetRollupResultCache" {
if len(*resetCacheAuthKey) > 0 && r.FormValue("authKey") != *resetCacheAuthKey {
sendPrometheusError(w, r, fmt.Errorf("invalid authKey=%q for %q", r.FormValue("authKey"), path))
return true
}
promql.ResetRollupResultCache()
return true
2019-05-22 23:23:23 +02:00
}
p, err := httpserver.ParsePath(path)
if err != nil {
httpserver.Errorf(w, r, "cannot parse path %q: %s", path, err)
2019-05-22 23:23:23 +02:00
return true
}
at, err := auth.NewToken(p.AuthToken)
if err != nil {
httpserver.Errorf(w, r, "auth error: %s", err)
2019-05-22 23:23:23 +02:00
return true
}
switch p.Prefix {
case "select":
return selectHandler(startTime, w, r, p, at)
2019-05-22 23:23:23 +02:00
case "delete":
return deleteHandler(startTime, w, r, p, at)
2019-05-22 23:23:23 +02:00
default:
// This is not our link
return false
}
}
func selectHandler(startTime time.Time, w http.ResponseWriter, r *http.Request, p *httpserver.Path, at *auth.Token) bool {
2019-05-22 23:23:23 +02:00
if strings.HasPrefix(p.Suffix, "prometheus/api/v1/label/") {
s := p.Suffix[len("prometheus/api/v1/label/"):]
2019-05-22 23:16:55 +02:00
if strings.HasSuffix(s, "/values") {
labelValuesRequests.Inc()
labelName := s[:len(s)-len("/values")]
httpserver.EnableCORS(w, r)
if err := prometheus.LabelValuesHandler(startTime, at, labelName, w, r); err != nil {
2019-05-22 23:16:55 +02:00
labelValuesErrors.Inc()
sendPrometheusError(w, r, err)
return true
}
return true
}
}
2019-05-22 23:23:23 +02:00
switch p.Suffix {
case "prometheus/api/v1/query":
2019-05-22 23:16:55 +02:00
queryRequests.Inc()
httpserver.EnableCORS(w, r)
if err := prometheus.QueryHandler(startTime, at, w, r); err != nil {
2019-05-22 23:16:55 +02:00
queryErrors.Inc()
sendPrometheusError(w, r, err)
return true
}
return true
2019-05-22 23:23:23 +02:00
case "prometheus/api/v1/query_range":
2019-05-22 23:16:55 +02:00
queryRangeRequests.Inc()
httpserver.EnableCORS(w, r)
if err := prometheus.QueryRangeHandler(startTime, at, w, r); err != nil {
2019-05-22 23:16:55 +02:00
queryRangeErrors.Inc()
sendPrometheusError(w, r, err)
return true
}
return true
2019-05-22 23:23:23 +02:00
case "prometheus/api/v1/series":
2019-05-22 23:16:55 +02:00
seriesRequests.Inc()
httpserver.EnableCORS(w, r)
if err := prometheus.SeriesHandler(startTime, at, w, r); err != nil {
2019-05-22 23:16:55 +02:00
seriesErrors.Inc()
sendPrometheusError(w, r, err)
return true
}
return true
2019-05-22 23:23:23 +02:00
case "prometheus/api/v1/series/count":
2019-05-22 23:16:55 +02:00
seriesCountRequests.Inc()
httpserver.EnableCORS(w, r)
if err := prometheus.SeriesCountHandler(startTime, at, w, r); err != nil {
2019-05-22 23:16:55 +02:00
seriesCountErrors.Inc()
sendPrometheusError(w, r, err)
return true
}
return true
2019-05-22 23:23:23 +02:00
case "prometheus/api/v1/labels":
2019-05-22 23:16:55 +02:00
labelsRequests.Inc()
httpserver.EnableCORS(w, r)
if err := prometheus.LabelsHandler(startTime, at, w, r); err != nil {
2019-05-22 23:16:55 +02:00
labelsErrors.Inc()
sendPrometheusError(w, r, err)
return true
}
return true
case "prometheus/api/v1/labels/count":
labelsCountRequests.Inc()
httpserver.EnableCORS(w, r)
if err := prometheus.LabelsCountHandler(startTime, at, w, r); err != nil {
labelsCountErrors.Inc()
sendPrometheusError(w, r, err)
return true
}
return true
case "prometheus/api/v1/status/tsdb":
statusTSDBRequests.Inc()
if err := prometheus.TSDBStatusHandler(startTime, at, w, r); err != nil {
statusTSDBErrors.Inc()
sendPrometheusError(w, r, err)
return true
}
return true
case "prometheus/api/v1/status/active_queries":
statusActiveQueriesRequests.Inc()
promql.WriteActiveQueries(w)
return true
2019-05-22 23:23:23 +02:00
case "prometheus/api/v1/export":
2019-05-22 23:16:55 +02:00
exportRequests.Inc()
if err := prometheus.ExportHandler(startTime, at, w, r); err != nil {
2019-05-22 23:16:55 +02:00
exportErrors.Inc()
httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err)
2019-05-22 23:16:55 +02:00
return true
}
return true
2019-05-22 23:23:23 +02:00
case "prometheus/federate":
2019-05-22 23:16:55 +02:00
federateRequests.Inc()
if err := prometheus.FederateHandler(startTime, at, w, r); err != nil {
2019-05-22 23:16:55 +02:00
federateErrors.Inc()
httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err)
2019-05-22 23:16:55 +02:00
return true
}
return true
case "prometheus/api/v1/rules":
// Return dumb placeholder
rulesRequests.Inc()
w.Header().Set("Content-Type", "application/json")
fmt.Fprintf(w, "%s", `{"status":"success","data":{"groups":[]}}`)
return true
case "prometheus/api/v1/alerts":
// Return dumb placehloder
alertsRequests.Inc()
w.Header().Set("Content-Type", "application/json")
fmt.Fprintf(w, "%s", `{"status":"success","data":{"alerts":[]}}`)
return true
case "prometheus/api/v1/metadata":
// Return dumb placeholder
metadataRequests.Inc()
w.Header().Set("Content-Type", "application/json")
fmt.Fprintf(w, "%s", `{"status":"success","data":{}}`)
return true
2019-05-22 23:23:23 +02:00
default:
return false
}
}
func deleteHandler(startTime time.Time, w http.ResponseWriter, r *http.Request, p *httpserver.Path, at *auth.Token) bool {
2019-05-22 23:23:23 +02:00
switch p.Suffix {
case "prometheus/api/v1/admin/tsdb/delete_series":
2019-05-22 23:16:55 +02:00
deleteRequests.Inc()
if err := prometheus.DeleteHandler(startTime, at, r); err != nil {
2019-05-22 23:16:55 +02:00
deleteErrors.Inc()
httpserver.Errorf(w, r, "error in %q: %s", r.URL.Path, err)
2019-05-22 23:16:55 +02:00
return true
}
w.WriteHeader(http.StatusNoContent)
return true
default:
return false
}
}
func sendPrometheusError(w http.ResponseWriter, r *http.Request, err error) {
logger.Warnf("error in %q: %s", r.RequestURI, err)
2019-05-22 23:16:55 +02:00
w.Header().Set("Content-Type", "application/json")
statusCode := http.StatusUnprocessableEntity
var esc *httpserver.ErrorWithStatusCode
if errors.As(err, &esc) {
statusCode = esc.StatusCode
}
2019-05-22 23:16:55 +02:00
w.WriteHeader(statusCode)
prometheus.WriteErrorResponse(w, statusCode, err)
}
var (
2019-05-22 23:23:23 +02:00
labelValuesRequests = metrics.NewCounter(`vm_http_requests_total{path="/select/{}/prometheus/api/v1/label/{}/values"}`)
labelValuesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/select/{}/prometheus/api/v1/label/{}/values"}`)
2019-05-22 23:16:55 +02:00
2019-05-22 23:23:23 +02:00
queryRequests = metrics.NewCounter(`vm_http_requests_total{path="/select/{}/prometheus/api/v1/query"}`)
queryErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/select/{}/prometheus/api/v1/query"}`)
2019-05-22 23:16:55 +02:00
queryRangeRequests = metrics.NewCounter(`vm_http_requests_total{path="/select/{}/prometheus/api/v1/query_range"}`)
2019-05-22 23:23:23 +02:00
queryRangeErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/select/{}/prometheus/api/v1/query_range"}`)
2019-05-22 23:16:55 +02:00
2019-05-22 23:23:23 +02:00
seriesRequests = metrics.NewCounter(`vm_http_requests_total{path="/select/{}/prometheus/api/v1/series"}`)
seriesErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/select/{}/prometheus/api/v1/series"}`)
2019-05-22 23:16:55 +02:00
2019-05-22 23:23:23 +02:00
seriesCountRequests = metrics.NewCounter(`vm_http_requests_total{path="/select/{}/prometheus/api/v1/series/count"}`)
seriesCountErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/select/{}/prometheus/api/v1/series/count"}`)
2019-05-22 23:16:55 +02:00
2019-05-22 23:23:23 +02:00
labelsRequests = metrics.NewCounter(`vm_http_requests_total{path="/select/{}/prometheus/api/v1/labels"}`)
labelsErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/select/{}/prometheus/api/v1/labels"}`)
2019-05-22 23:16:55 +02:00
labelsCountRequests = metrics.NewCounter(`vm_http_requests_total{path="/select/{}/prometheus/api/v1/labels/count"}`)
labelsCountErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/select/{}/prometheus/api/v1/labels/count"}`)
statusTSDBRequests = metrics.NewCounter(`vm_http_requests_total{path="/select/{}/prometheus/api/v1/status/tsdb"}`)
statusTSDBErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/select/{}/prometheus/api/v1/status/tsdb"}`)
statusActiveQueriesRequests = metrics.NewCounter(`vm_http_requests_total{path="/select/{}prometheus/api/v1/status/active_queries"}`)
2019-05-22 23:23:23 +02:00
deleteRequests = metrics.NewCounter(`vm_http_requests_total{path="/delete/{}/prometheus/api/v1/admin/tsdb/delete_series"}`)
deleteErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/delete/{}/prometheus/api/v1/admin/tsdb/delete_series"}`)
2019-05-22 23:16:55 +02:00
2019-05-22 23:23:23 +02:00
exportRequests = metrics.NewCounter(`vm_http_requests_total{path="/select/{}/prometheus/api/v1/export"}`)
exportErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/select/{}/prometheus/api/v1/export"}`)
2019-05-22 23:16:55 +02:00
2019-05-22 23:23:23 +02:00
federateRequests = metrics.NewCounter(`vm_http_requests_total{path="/select/{}/prometheus/federate"}`)
federateErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/select/{}/prometheus/federate"}`)
rulesRequests = metrics.NewCounter(`vm_http_requests_total{path="/select/{}/prometheus/api/v1/rules"}`)
alertsRequests = metrics.NewCounter(`vm_http_requests_total{path="/select/{}/prometheus/api/v1/alerts"}`)
metadataRequests = metrics.NewCounter(`vm_http_requests_total{path="/select/{}/prometheus/api/v1/metadata"}`)
2019-05-22 23:16:55 +02:00
)