app/vmselect: accept focusLabel query arg at /api/v1/status/tsdb

This allows filling the seriesCountByFocusLabelValue list in the /api/v1/status/tsdb response
with label values for the specified focusLabel, which contain the highest number of time series.

TODO: add this to Cardinality explorer at VMUI - https://docs.victoriametrics.com/#cardinality-explorer
This commit is contained in:
Aliaksandr Valialkin 2022-06-14 17:46:16 +03:00
parent b6c1ca12b7
commit ec7963208d
No known key found for this signature in database
GPG Key ID: A72BEC6CD3D0DED1
10 changed files with 161 additions and 140 deletions

View File

@ -268,7 +268,8 @@ See the [example VMUI at VictoriaMetrics playground](https://play.victoriametric
VictoriaMetrics provides an ability to explore time series cardinality at `cardinality` tab in [vmui](#vmui) in the following ways: VictoriaMetrics provides an ability to explore time series cardinality at `cardinality` tab in [vmui](#vmui) in the following ways:
- To identify metric names with the highest number of series. - To identify metric names with the highest number of series.
- To idnetify labels with the highest number of series. - To identify labels with the highest number of series.
- To identify values with the highest number of series for the selected label (aka `focusLabel`).
- To identify label=name pairs with the highest number of series. - To identify label=name pairs with the highest number of series.
- To identify labels with the highest number of unique values. - To identify labels with the highest number of unique values.
@ -1441,6 +1442,7 @@ VictoriaMetrics returns TSDB stats at `/api/v1/status/tsdb` page in the way simi
* `topN=N` where `N` is the number of top entries to return in the response. By default top 10 entries are returned. * `topN=N` where `N` is the number of top entries to return in the response. By default top 10 entries are returned.
* `date=YYYY-MM-DD` where `YYYY-MM-DD` is the date for collecting the stats. By default the stats is collected for the current day. Pass `date=1970-01-01` in order to collect global stats across all the days. * `date=YYYY-MM-DD` where `YYYY-MM-DD` is the date for collecting the stats. By default the stats is collected for the current day. Pass `date=1970-01-01` in order to collect global stats across all the days.
* `focusLabel=LABEL_NAME` returns label values with the highest number of time series for the given `LABEL_NAME` in the `seriesCountByFocusLabelValue` list.
* `match[]=SELECTOR` where `SELECTOR` is an arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for series to take into account during stats calculation. By default all the series are taken into account. * `match[]=SELECTOR` where `SELECTOR` is an arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for series to take into account during stats calculation. By default all the series are taken into account.
* `extra_label=LABEL=VALUE`. See [these docs](#prometheus-querying-api-enhancements) for more details. * `extra_label=LABEL=VALUE`. See [these docs](#prometheus-querying-api-enhancements) for more details.

View File

@ -764,25 +764,11 @@ func GetTagValueSuffixes(qt *querytracer.Tracer, tr storage.TimeRange, tagKey, t
return suffixes, nil return suffixes, nil
} }
// GetTSDBStatusForDate returns tsdb status according to https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats // GetTSDBStatus returns tsdb status according to https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats
func GetTSDBStatusForDate(qt *querytracer.Tracer, deadline searchutils.Deadline, date uint64, topN, maxMetrics int) (*storage.TSDBStatus, error) {
qt = qt.NewChild("get tsdb stats for date=%d, topN=%d", date, topN)
defer qt.Done()
if deadline.Exceeded() {
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
}
status, err := vmstorage.GetTSDBStatusForDate(qt, date, topN, maxMetrics, deadline.Deadline())
if err != nil {
return nil, fmt.Errorf("error during tsdb status request: %w", err)
}
return status, nil
}
// GetTSDBStatusWithFilters returns tsdb status according to https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats
// //
// It accepts aribtrary filters on time series in sq. // It accepts aribtrary filters on time series in sq.
func GetTSDBStatusWithFilters(qt *querytracer.Tracer, deadline searchutils.Deadline, sq *storage.SearchQuery, topN int) (*storage.TSDBStatus, error) { func GetTSDBStatus(qt *querytracer.Tracer, sq *storage.SearchQuery, focusLabel string, topN int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
qt = qt.NewChild("get tsdb stats: %s, topN=%d", sq, topN) qt = qt.NewChild("get tsdb stats: %s, focusLabel=%q, topN=%d", sq, focusLabel, topN)
defer qt.Done() defer qt.Done()
if deadline.Exceeded() { if deadline.Exceeded() {
return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String()) return nil, fmt.Errorf("timeout exceeded before starting the query processing: %s", deadline.String())
@ -796,9 +782,9 @@ func GetTSDBStatusWithFilters(qt *querytracer.Tracer, deadline searchutils.Deadl
return nil, err return nil, err
} }
date := uint64(tr.MinTimestamp) / (3600 * 24 * 1000) date := uint64(tr.MinTimestamp) / (3600 * 24 * 1000)
status, err := vmstorage.GetTSDBStatusWithFiltersForDate(qt, tfss, date, topN, sq.MaxMetrics, deadline.Deadline()) status, err := vmstorage.GetTSDBStatus(qt, tfss, date, focusLabel, topN, sq.MaxMetrics, deadline.Deadline())
if err != nil { if err != nil {
return nil, fmt.Errorf("error during tsdb status with filters request: %w", err) return nil, fmt.Errorf("error during tsdb status request: %w", err)
} }
return status, nil return status, nil
} }

View File

@ -490,12 +490,17 @@ func TSDBStatusHandler(qt *querytracer.Tracer, startTime time.Time, w http.Respo
date := fasttime.UnixDate() date := fasttime.UnixDate()
dateStr := r.FormValue("date") dateStr := r.FormValue("date")
if len(dateStr) > 0 { if len(dateStr) > 0 {
t, err := time.Parse("2006-01-02", dateStr) if dateStr == "0" {
if err != nil { date = 0
return fmt.Errorf("cannot parse `date` arg %q: %w", dateStr, err) } else {
t, err := time.Parse("2006-01-02", dateStr)
if err != nil {
return fmt.Errorf("cannot parse `date` arg %q: %w", dateStr, err)
}
date = uint64(t.Unix()) / secsPerDay
} }
date = uint64(t.Unix()) / secsPerDay
} }
focusLabel := r.FormValue("focusLabel")
topN := 10 topN := 10
topNStr := r.FormValue("topN") topNStr := r.FormValue("topN")
if len(topNStr) > 0 { if len(topNStr) > 0 {
@ -511,18 +516,14 @@ func TSDBStatusHandler(qt *querytracer.Tracer, startTime time.Time, w http.Respo
} }
topN = n topN = n
} }
var status *storage.TSDBStatus start := int64(date*secsPerDay) * 1000
if len(cp.filterss) == 0 { end := int64((date+1)*secsPerDay)*1000 - 1
status, err = netstorage.GetTSDBStatusForDate(qt, cp.deadline, date, topN, *maxTSDBStatusSeries) sq := storage.NewSearchQuery(start, end, cp.filterss, *maxTSDBStatusSeries)
if err != nil { status, err := netstorage.GetTSDBStatus(qt, sq, focusLabel, topN, cp.deadline)
return fmt.Errorf(`cannot obtain tsdb status for date=%d, topN=%d: %w`, date, topN, err) if err != nil {
} return fmt.Errorf("cannot obtain tsdb stats: %w", err)
} else {
status, err = tsdbStatusWithMatches(qt, cp.filterss, date, topN, *maxTSDBStatusSeries, cp.deadline)
if err != nil {
return fmt.Errorf("cannot obtain tsdb status with matches for date=%d, topN=%d: %w", date, topN, err)
}
} }
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
bw := bufferedwriter.Get(w) bw := bufferedwriter.Get(w)
defer bufferedwriter.Put(bw) defer bufferedwriter.Put(bw)
@ -533,17 +534,6 @@ func TSDBStatusHandler(qt *querytracer.Tracer, startTime time.Time, w http.Respo
return nil return nil
} }
func tsdbStatusWithMatches(qt *querytracer.Tracer, filterss [][]storage.TagFilter, date uint64, topN, maxMetrics int, deadline searchutils.Deadline) (*storage.TSDBStatus, error) {
start := int64(date*secsPerDay) * 1000
end := int64(date*secsPerDay+secsPerDay) * 1000
sq := storage.NewSearchQuery(start, end, filterss, maxMetrics)
status, err := netstorage.GetTSDBStatusWithFilters(qt, deadline, sq, topN)
if err != nil {
return nil, err
}
return status, nil
}
var tsdbStatusDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/status/tsdb"}`) var tsdbStatusDuration = metrics.NewSummary(`vm_request_duration_seconds{path="/api/v1/status/tsdb"}`)
// LabelsHandler processes /api/v1/labels request. // LabelsHandler processes /api/v1/labels request.

View File

@ -13,6 +13,7 @@ TSDBStatusResponse generates response for /api/v1/status/tsdb .
"totalLabelValuePairs": {%dul= status.TotalLabelValuePairs %}, "totalLabelValuePairs": {%dul= status.TotalLabelValuePairs %},
"seriesCountByMetricName":{%= tsdbStatusEntries(status.SeriesCountByMetricName) %}, "seriesCountByMetricName":{%= tsdbStatusEntries(status.SeriesCountByMetricName) %},
"seriesCountByLabelName":{%= tsdbStatusEntries(status.SeriesCountByLabelName) %}, "seriesCountByLabelName":{%= tsdbStatusEntries(status.SeriesCountByLabelName) %},
"seriesCountByFocusLabelValue":{%= tsdbStatusEntries(status.SeriesCountByFocusLabelValue) %},
"seriesCountByLabelValuePair":{%= tsdbStatusEntries(status.SeriesCountByLabelValuePair) %}, "seriesCountByLabelValuePair":{%= tsdbStatusEntries(status.SeriesCountByLabelValuePair) %},
"labelValueCountByLabelName":{%= tsdbStatusEntries(status.LabelValueCountByLabelName) %} "labelValueCountByLabelName":{%= tsdbStatusEntries(status.LabelValueCountByLabelName) %}
} }

View File

@ -44,102 +44,106 @@ func StreamTSDBStatusResponse(qw422016 *qt422016.Writer, status *storage.TSDBSta
//line app/vmselect/prometheus/tsdb_status_response.qtpl:15 //line app/vmselect/prometheus/tsdb_status_response.qtpl:15
streamtsdbStatusEntries(qw422016, status.SeriesCountByLabelName) streamtsdbStatusEntries(qw422016, status.SeriesCountByLabelName)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:15 //line app/vmselect/prometheus/tsdb_status_response.qtpl:15
qw422016.N().S(`,"seriesCountByFocusLabelValue":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:16
streamtsdbStatusEntries(qw422016, status.SeriesCountByFocusLabelValue)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:16
qw422016.N().S(`,"seriesCountByLabelValuePair":`) qw422016.N().S(`,"seriesCountByLabelValuePair":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:16 //line app/vmselect/prometheus/tsdb_status_response.qtpl:17
streamtsdbStatusEntries(qw422016, status.SeriesCountByLabelValuePair) streamtsdbStatusEntries(qw422016, status.SeriesCountByLabelValuePair)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:16 //line app/vmselect/prometheus/tsdb_status_response.qtpl:17
qw422016.N().S(`,"labelValueCountByLabelName":`) qw422016.N().S(`,"labelValueCountByLabelName":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17 //line app/vmselect/prometheus/tsdb_status_response.qtpl:18
streamtsdbStatusEntries(qw422016, status.LabelValueCountByLabelName) streamtsdbStatusEntries(qw422016, status.LabelValueCountByLabelName)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:17 //line app/vmselect/prometheus/tsdb_status_response.qtpl:18
qw422016.N().S(`}`) qw422016.N().S(`}`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:19 //line app/vmselect/prometheus/tsdb_status_response.qtpl:20
qt.Done() qt.Done()
//line app/vmselect/prometheus/tsdb_status_response.qtpl:20 //line app/vmselect/prometheus/tsdb_status_response.qtpl:21
streamdumpQueryTrace(qw422016, qt) streamdumpQueryTrace(qw422016, qt)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:20 //line app/vmselect/prometheus/tsdb_status_response.qtpl:21
qw422016.N().S(`}`) qw422016.N().S(`}`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22 //line app/vmselect/prometheus/tsdb_status_response.qtpl:23
} }
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22 //line app/vmselect/prometheus/tsdb_status_response.qtpl:23
func WriteTSDBStatusResponse(qq422016 qtio422016.Writer, status *storage.TSDBStatus, qt *querytracer.Tracer) { func WriteTSDBStatusResponse(qq422016 qtio422016.Writer, status *storage.TSDBStatus, qt *querytracer.Tracer) {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22 //line app/vmselect/prometheus/tsdb_status_response.qtpl:23
qw422016 := qt422016.AcquireWriter(qq422016) qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22 //line app/vmselect/prometheus/tsdb_status_response.qtpl:23
StreamTSDBStatusResponse(qw422016, status, qt) StreamTSDBStatusResponse(qw422016, status, qt)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22 //line app/vmselect/prometheus/tsdb_status_response.qtpl:23
qt422016.ReleaseWriter(qw422016) qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22 //line app/vmselect/prometheus/tsdb_status_response.qtpl:23
} }
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22 //line app/vmselect/prometheus/tsdb_status_response.qtpl:23
func TSDBStatusResponse(status *storage.TSDBStatus, qt *querytracer.Tracer) string { func TSDBStatusResponse(status *storage.TSDBStatus, qt *querytracer.Tracer) string {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22 //line app/vmselect/prometheus/tsdb_status_response.qtpl:23
qb422016 := qt422016.AcquireByteBuffer() qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22 //line app/vmselect/prometheus/tsdb_status_response.qtpl:23
WriteTSDBStatusResponse(qb422016, status, qt) WriteTSDBStatusResponse(qb422016, status, qt)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22 //line app/vmselect/prometheus/tsdb_status_response.qtpl:23
qs422016 := string(qb422016.B) qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22 //line app/vmselect/prometheus/tsdb_status_response.qtpl:23
qt422016.ReleaseByteBuffer(qb422016) qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22 //line app/vmselect/prometheus/tsdb_status_response.qtpl:23
return qs422016 return qs422016
//line app/vmselect/prometheus/tsdb_status_response.qtpl:22 //line app/vmselect/prometheus/tsdb_status_response.qtpl:23
} }
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24 //line app/vmselect/prometheus/tsdb_status_response.qtpl:25
func streamtsdbStatusEntries(qw422016 *qt422016.Writer, a []storage.TopHeapEntry) { func streamtsdbStatusEntries(qw422016 *qt422016.Writer, a []storage.TopHeapEntry) {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:24 //line app/vmselect/prometheus/tsdb_status_response.qtpl:25
qw422016.N().S(`[`) qw422016.N().S(`[`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:26 //line app/vmselect/prometheus/tsdb_status_response.qtpl:27
for i, e := range a { for i, e := range a {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:26 //line app/vmselect/prometheus/tsdb_status_response.qtpl:27
qw422016.N().S(`{"name":`) qw422016.N().S(`{"name":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:28 //line app/vmselect/prometheus/tsdb_status_response.qtpl:29
qw422016.N().Q(e.Name) qw422016.N().Q(e.Name)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:28 //line app/vmselect/prometheus/tsdb_status_response.qtpl:29
qw422016.N().S(`,"value":`) qw422016.N().S(`,"value":`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29 //line app/vmselect/prometheus/tsdb_status_response.qtpl:30
qw422016.N().D(int(e.Count)) qw422016.N().D(int(e.Count))
//line app/vmselect/prometheus/tsdb_status_response.qtpl:29 //line app/vmselect/prometheus/tsdb_status_response.qtpl:30
qw422016.N().S(`}`) qw422016.N().S(`}`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:31 //line app/vmselect/prometheus/tsdb_status_response.qtpl:32
if i+1 < len(a) { if i+1 < len(a) {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:31 //line app/vmselect/prometheus/tsdb_status_response.qtpl:32
qw422016.N().S(`,`) qw422016.N().S(`,`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:31 //line app/vmselect/prometheus/tsdb_status_response.qtpl:32
} }
//line app/vmselect/prometheus/tsdb_status_response.qtpl:32 //line app/vmselect/prometheus/tsdb_status_response.qtpl:33
} }
//line app/vmselect/prometheus/tsdb_status_response.qtpl:32 //line app/vmselect/prometheus/tsdb_status_response.qtpl:33
qw422016.N().S(`]`) qw422016.N().S(`]`)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34 //line app/vmselect/prometheus/tsdb_status_response.qtpl:35
} }
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34 //line app/vmselect/prometheus/tsdb_status_response.qtpl:35
func writetsdbStatusEntries(qq422016 qtio422016.Writer, a []storage.TopHeapEntry) { func writetsdbStatusEntries(qq422016 qtio422016.Writer, a []storage.TopHeapEntry) {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34 //line app/vmselect/prometheus/tsdb_status_response.qtpl:35
qw422016 := qt422016.AcquireWriter(qq422016) qw422016 := qt422016.AcquireWriter(qq422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34 //line app/vmselect/prometheus/tsdb_status_response.qtpl:35
streamtsdbStatusEntries(qw422016, a) streamtsdbStatusEntries(qw422016, a)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34 //line app/vmselect/prometheus/tsdb_status_response.qtpl:35
qt422016.ReleaseWriter(qw422016) qt422016.ReleaseWriter(qw422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34 //line app/vmselect/prometheus/tsdb_status_response.qtpl:35
} }
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34 //line app/vmselect/prometheus/tsdb_status_response.qtpl:35
func tsdbStatusEntries(a []storage.TopHeapEntry) string { func tsdbStatusEntries(a []storage.TopHeapEntry) string {
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34 //line app/vmselect/prometheus/tsdb_status_response.qtpl:35
qb422016 := qt422016.AcquireByteBuffer() qb422016 := qt422016.AcquireByteBuffer()
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34 //line app/vmselect/prometheus/tsdb_status_response.qtpl:35
writetsdbStatusEntries(qb422016, a) writetsdbStatusEntries(qb422016, a)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34 //line app/vmselect/prometheus/tsdb_status_response.qtpl:35
qs422016 := string(qb422016.B) qs422016 := string(qb422016.B)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34 //line app/vmselect/prometheus/tsdb_status_response.qtpl:35
qt422016.ReleaseByteBuffer(qb422016) qt422016.ReleaseByteBuffer(qb422016)
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34 //line app/vmselect/prometheus/tsdb_status_response.qtpl:35
return qs422016 return qs422016
//line app/vmselect/prometheus/tsdb_status_response.qtpl:34 //line app/vmselect/prometheus/tsdb_status_response.qtpl:35
} }

View File

@ -215,18 +215,10 @@ func SearchGraphitePaths(tr storage.TimeRange, query []byte, maxPaths int, deadl
return paths, err return paths, err
} }
// GetTSDBStatusForDate returns TSDB status for the given date. // GetTSDBStatus returns TSDB status for given filters on the given date.
func GetTSDBStatusForDate(qt *querytracer.Tracer, date uint64, topN, maxMetrics int, deadline uint64) (*storage.TSDBStatus, error) { func GetTSDBStatus(qt *querytracer.Tracer, tfss []*storage.TagFilters, date uint64, focusLabel string, topN, maxMetrics int, deadline uint64) (*storage.TSDBStatus, error) {
WG.Add(1) WG.Add(1)
status, err := Storage.GetTSDBStatusWithFiltersForDate(qt, nil, date, topN, maxMetrics, deadline) status, err := Storage.GetTSDBStatus(qt, tfss, date, focusLabel, topN, maxMetrics, deadline)
WG.Done()
return status, err
}
// GetTSDBStatusWithFiltersForDate returns TSDB status for given filters on the given date.
func GetTSDBStatusWithFiltersForDate(qt *querytracer.Tracer, tfss []*storage.TagFilters, date uint64, topN, maxMetrics int, deadline uint64) (*storage.TSDBStatus, error) {
WG.Add(1)
status, err := Storage.GetTSDBStatusWithFiltersForDate(qt, tfss, date, topN, maxMetrics, deadline)
WG.Done() WG.Done()
return status, err return status, err
} }

View File

@ -272,7 +272,8 @@ See the [example VMUI at VictoriaMetrics playground](https://play.victoriametric
VictoriaMetrics provides an ability to explore time series cardinality at `cardinality` tab in [vmui](#vmui) in the following ways: VictoriaMetrics provides an ability to explore time series cardinality at `cardinality` tab in [vmui](#vmui) in the following ways:
- To identify metric names with the highest number of series. - To identify metric names with the highest number of series.
- To idnetify labels with the highest number of series. - To identify labels with the highest number of series.
- To identify values with the highest number of series for the selected label (aka `focusLabel`).
- To identify label=name pairs with the highest number of series. - To identify label=name pairs with the highest number of series.
- To identify labels with the highest number of unique values. - To identify labels with the highest number of unique values.
@ -1445,6 +1446,7 @@ VictoriaMetrics returns TSDB stats at `/api/v1/status/tsdb` page in the way simi
* `topN=N` where `N` is the number of top entries to return in the response. By default top 10 entries are returned. * `topN=N` where `N` is the number of top entries to return in the response. By default top 10 entries are returned.
* `date=YYYY-MM-DD` where `YYYY-MM-DD` is the date for collecting the stats. By default the stats is collected for the current day. Pass `date=1970-01-01` in order to collect global stats across all the days. * `date=YYYY-MM-DD` where `YYYY-MM-DD` is the date for collecting the stats. By default the stats is collected for the current day. Pass `date=1970-01-01` in order to collect global stats across all the days.
* `focusLabel=LABEL_NAME` returns label values with the highest number of time series for the given `LABEL_NAME` in the `seriesCountByFocusLabelValue` list.
* `match[]=SELECTOR` where `SELECTOR` is an arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for series to take into account during stats calculation. By default all the series are taken into account. * `match[]=SELECTOR` where `SELECTOR` is an arbitrary [time series selector](https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors) for series to take into account during stats calculation. By default all the series are taken into account.
* `extra_label=LABEL=VALUE`. See [these docs](#prometheus-querying-api-enhancements) for more details. * `extra_label=LABEL=VALUE`. See [these docs](#prometheus-querying-api-enhancements) for more details.

View File

@ -1239,11 +1239,11 @@ func (is *indexSearch) getSeriesCount() (uint64, error) {
return metricIDsLen, nil return metricIDsLen, nil
} }
// GetTSDBStatusWithFiltersForDate returns topN entries for tsdb status for the given tfss and the given date. // GetTSDBStatus returns topN entries for tsdb status for the given tfss, date and focusLabel.
func (db *indexDB) GetTSDBStatusWithFiltersForDate(qt *querytracer.Tracer, tfss []*TagFilters, date uint64, topN, maxMetrics int, deadline uint64) (*TSDBStatus, error) { func (db *indexDB) GetTSDBStatus(qt *querytracer.Tracer, tfss []*TagFilters, date uint64, focusLabel string, topN, maxMetrics int, deadline uint64) (*TSDBStatus, error) {
qtChild := qt.NewChild("collect tsdb stats in the current indexdb") qtChild := qt.NewChild("collect tsdb stats in the current indexdb")
is := db.getIndexSearch(deadline) is := db.getIndexSearch(deadline)
status, err := is.getTSDBStatusWithFiltersForDate(qtChild, tfss, date, topN, maxMetrics) status, err := is.getTSDBStatus(qtChild, tfss, date, focusLabel, topN, maxMetrics)
qtChild.Done() qtChild.Done()
db.putIndexSearch(is) db.putIndexSearch(is)
if err != nil { if err != nil {
@ -1255,7 +1255,7 @@ func (db *indexDB) GetTSDBStatusWithFiltersForDate(qt *querytracer.Tracer, tfss
ok := db.doExtDB(func(extDB *indexDB) { ok := db.doExtDB(func(extDB *indexDB) {
qtChild := qt.NewChild("collect tsdb stats in the previous indexdb") qtChild := qt.NewChild("collect tsdb stats in the previous indexdb")
is := extDB.getIndexSearch(deadline) is := extDB.getIndexSearch(deadline)
status, err = is.getTSDBStatusWithFiltersForDate(qtChild, tfss, date, topN, maxMetrics) status, err = is.getTSDBStatus(qtChild, tfss, date, focusLabel, topN, maxMetrics)
qtChild.Done() qtChild.Done()
extDB.putIndexSearch(is) extDB.putIndexSearch(is)
}) })
@ -1265,8 +1265,8 @@ func (db *indexDB) GetTSDBStatusWithFiltersForDate(qt *querytracer.Tracer, tfss
return status, nil return status, nil
} }
// getTSDBStatusWithFiltersForDate returns topN entries for tsdb status for the given tfss and the given date. // getTSDBStatus returns topN entries for tsdb status for the given tfss, date and focusLabel.
func (is *indexSearch) getTSDBStatusWithFiltersForDate(qt *querytracer.Tracer, tfss []*TagFilters, date uint64, topN, maxMetrics int) (*TSDBStatus, error) { func (is *indexSearch) getTSDBStatus(qt *querytracer.Tracer, tfss []*TagFilters, date uint64, focusLabel string, topN, maxMetrics int) (*TSDBStatus, error) {
filter, err := is.searchMetricIDsWithFiltersOnDate(qt, tfss, date, maxMetrics) filter, err := is.searchMetricIDsWithFiltersOnDate(qt, tfss, date, maxMetrics)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1281,12 +1281,14 @@ func (is *indexSearch) getTSDBStatusWithFiltersForDate(qt *querytracer.Tracer, t
dmis := is.db.s.getDeletedMetricIDs() dmis := is.db.s.getDeletedMetricIDs()
thSeriesCountByMetricName := newTopHeap(topN) thSeriesCountByMetricName := newTopHeap(topN)
thSeriesCountByLabelName := newTopHeap(topN) thSeriesCountByLabelName := newTopHeap(topN)
thSeriesCountByFocusLabelValue := newTopHeap(topN)
thSeriesCountByLabelValuePair := newTopHeap(topN) thSeriesCountByLabelValuePair := newTopHeap(topN)
thLabelValueCountByLabelName := newTopHeap(topN) thLabelValueCountByLabelName := newTopHeap(topN)
var tmp, prevLabelName, prevLabelValuePair []byte var tmp, prevLabelName, prevLabelValuePair []byte
var labelValueCountByLabelName, seriesCountByLabelValuePair uint64 var labelValueCountByLabelName, seriesCountByLabelValuePair uint64
var totalSeries, labelSeries, totalLabelValuePairs uint64 var totalSeries, labelSeries, totalLabelValuePairs uint64
nameEqualBytes := []byte("__name__=") nameEqualBytes := []byte("__name__=")
focusLabelEqualBytes := []byte(focusLabel + "=")
loopsPaceLimiter := 0 loopsPaceLimiter := 0
nsPrefixExpected := byte(nsPrefixDateTagToMetricIDs) nsPrefixExpected := byte(nsPrefixDateTagToMetricIDs)
@ -1358,6 +1360,9 @@ func (is *indexSearch) getTSDBStatusWithFiltersForDate(qt *querytracer.Tracer, t
if bytes.HasPrefix(prevLabelValuePair, nameEqualBytes) { if bytes.HasPrefix(prevLabelValuePair, nameEqualBytes) {
thSeriesCountByMetricName.push(prevLabelValuePair[len(nameEqualBytes):], seriesCountByLabelValuePair) thSeriesCountByMetricName.push(prevLabelValuePair[len(nameEqualBytes):], seriesCountByLabelValuePair)
} }
if bytes.HasPrefix(prevLabelValuePair, focusLabelEqualBytes) {
thSeriesCountByFocusLabelValue.push(prevLabelValuePair[len(focusLabelEqualBytes):], seriesCountByLabelValuePair)
}
seriesCountByLabelValuePair = 0 seriesCountByLabelValuePair = 0
labelValueCountByLabelName++ labelValueCountByLabelName++
prevLabelValuePair = append(prevLabelValuePair[:0], labelValuePair...) prevLabelValuePair = append(prevLabelValuePair[:0], labelValuePair...)
@ -1377,13 +1382,17 @@ func (is *indexSearch) getTSDBStatusWithFiltersForDate(qt *querytracer.Tracer, t
if bytes.HasPrefix(prevLabelValuePair, nameEqualBytes) { if bytes.HasPrefix(prevLabelValuePair, nameEqualBytes) {
thSeriesCountByMetricName.push(prevLabelValuePair[len(nameEqualBytes):], seriesCountByLabelValuePair) thSeriesCountByMetricName.push(prevLabelValuePair[len(nameEqualBytes):], seriesCountByLabelValuePair)
} }
if bytes.HasPrefix(prevLabelValuePair, focusLabelEqualBytes) {
thSeriesCountByFocusLabelValue.push(prevLabelValuePair[len(focusLabelEqualBytes):], seriesCountByLabelValuePair)
}
status := &TSDBStatus{ status := &TSDBStatus{
TotalSeries: totalSeries, TotalSeries: totalSeries,
TotalLabelValuePairs: totalLabelValuePairs, TotalLabelValuePairs: totalLabelValuePairs,
SeriesCountByMetricName: thSeriesCountByMetricName.getSortedResult(), SeriesCountByMetricName: thSeriesCountByMetricName.getSortedResult(),
SeriesCountByLabelName: thSeriesCountByLabelName.getSortedResult(), SeriesCountByLabelName: thSeriesCountByLabelName.getSortedResult(),
SeriesCountByLabelValuePair: thSeriesCountByLabelValuePair.getSortedResult(), SeriesCountByFocusLabelValue: thSeriesCountByFocusLabelValue.getSortedResult(),
LabelValueCountByLabelName: thLabelValueCountByLabelName.getSortedResult(), SeriesCountByLabelValuePair: thSeriesCountByLabelValuePair.getSortedResult(),
LabelValueCountByLabelName: thLabelValueCountByLabelName.getSortedResult(),
} }
return status, nil return status, nil
} }
@ -1392,12 +1401,13 @@ func (is *indexSearch) getTSDBStatusWithFiltersForDate(qt *querytracer.Tracer, t
// //
// See https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats // See https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-stats
type TSDBStatus struct { type TSDBStatus struct {
TotalSeries uint64 TotalSeries uint64
TotalLabelValuePairs uint64 TotalLabelValuePairs uint64
SeriesCountByMetricName []TopHeapEntry SeriesCountByMetricName []TopHeapEntry
SeriesCountByLabelName []TopHeapEntry SeriesCountByLabelName []TopHeapEntry
SeriesCountByLabelValuePair []TopHeapEntry SeriesCountByFocusLabelValue []TopHeapEntry
LabelValueCountByLabelName []TopHeapEntry SeriesCountByLabelValuePair []TopHeapEntry
LabelValueCountByLabelName []TopHeapEntry
} }
func (status *TSDBStatus) hasEntries() bool { func (status *TSDBStatus) hasEntries() bool {

View File

@ -1807,10 +1807,10 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
t.Fatalf("expected %d time series for all days, got %d time series", metricsPerDay*days, len(matchedTSIDs)) t.Fatalf("expected %d time series for all days, got %d time series", metricsPerDay*days, len(matchedTSIDs))
} }
// Check GetTSDBStatusWithFiltersForDate with nil filters. // Check GetTSDBStatus with nil filters.
status, err := db.GetTSDBStatusWithFiltersForDate(nil, nil, baseDate, 5, 1e6, noDeadline) status, err := db.GetTSDBStatus(nil, nil, baseDate, "day", 5, 1e6, noDeadline)
if err != nil { if err != nil {
t.Fatalf("error in GetTSDBStatusWithFiltersForDate with nil filters: %s", err) t.Fatalf("error in GetTSDBStatus with nil filters: %s", err)
} }
if !status.hasEntries() { if !status.hasEntries() {
t.Fatalf("expecting non-empty TSDB status") t.Fatalf("expecting non-empty TSDB status")
@ -1845,6 +1845,15 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
if !reflect.DeepEqual(status.SeriesCountByLabelName, expectedSeriesCountByLabelName) { if !reflect.DeepEqual(status.SeriesCountByLabelName, expectedSeriesCountByLabelName) {
t.Fatalf("unexpected SeriesCountByLabelName;\ngot\n%v\nwant\n%v", status.SeriesCountByLabelName, expectedSeriesCountByLabelName) t.Fatalf("unexpected SeriesCountByLabelName;\ngot\n%v\nwant\n%v", status.SeriesCountByLabelName, expectedSeriesCountByLabelName)
} }
expectedSeriesCountByFocusLabelValue := []TopHeapEntry{
{
Name: "0",
Count: 1000,
},
}
if !reflect.DeepEqual(status.SeriesCountByFocusLabelValue, expectedSeriesCountByFocusLabelValue) {
t.Fatalf("unexpected SeriesCountByFocusLabelValue;\ngot\n%v\nwant\n%v", status.SeriesCountByFocusLabelValue, expectedSeriesCountByFocusLabelValue)
}
expectedLabelValueCountByLabelName := []TopHeapEntry{ expectedLabelValueCountByLabelName := []TopHeapEntry{
{ {
Name: "uniqueid", Name: "uniqueid",
@ -1900,14 +1909,14 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
t.Fatalf("unexpected TotalLabelValuePairs; got %d; want %d", status.TotalLabelValuePairs, expectedLabelValuePairs) t.Fatalf("unexpected TotalLabelValuePairs; got %d; want %d", status.TotalLabelValuePairs, expectedLabelValuePairs)
} }
// Check GetTSDBStatusWithFiltersForDate with non-nil filter, which matches all the series // Check GetTSDBStatus with non-nil filter, which matches all the series
tfs = NewTagFilters() tfs = NewTagFilters()
if err := tfs.Add([]byte("day"), []byte("0"), false, false); err != nil { if err := tfs.Add([]byte("day"), []byte("0"), false, false); err != nil {
t.Fatalf("cannot add filter: %s", err) t.Fatalf("cannot add filter: %s", err)
} }
status, err = db.GetTSDBStatusWithFiltersForDate(nil, []*TagFilters{tfs}, baseDate, 5, 1e6, noDeadline) status, err = db.GetTSDBStatus(nil, []*TagFilters{tfs}, baseDate, "", 5, 1e6, noDeadline)
if err != nil { if err != nil {
t.Fatalf("error in GetTSDBStatusWithFiltersForDate: %s", err) t.Fatalf("error in GetTSDBStatus: %s", err)
} }
if !status.hasEntries() { if !status.hasEntries() {
t.Fatalf("expecting non-empty TSDB status") t.Fatalf("expecting non-empty TSDB status")
@ -1930,10 +1939,10 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
t.Fatalf("unexpected TotalLabelValuePairs; got %d; want %d", status.TotalLabelValuePairs, expectedLabelValuePairs) t.Fatalf("unexpected TotalLabelValuePairs; got %d; want %d", status.TotalLabelValuePairs, expectedLabelValuePairs)
} }
// Check GetTSDBStatusWithFiltersOnDate, which matches all the series on a global time range // Check GetTSDBStatus, which matches all the series on a global time range
status, err = db.GetTSDBStatusWithFiltersForDate(nil, nil, 0, 5, 1e6, noDeadline) status, err = db.GetTSDBStatus(nil, nil, 0, "day", 5, 1e6, noDeadline)
if err != nil { if err != nil {
t.Fatalf("error in GetTSDBStatusWithFiltersForDate: %s", err) t.Fatalf("error in GetTSDBStatus: %s", err)
} }
if !status.hasEntries() { if !status.hasEntries() {
t.Fatalf("expecting non-empty TSDB status") t.Fatalf("expecting non-empty TSDB status")
@ -1955,15 +1964,40 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
if status.TotalLabelValuePairs != expectedLabelValuePairs { if status.TotalLabelValuePairs != expectedLabelValuePairs {
t.Fatalf("unexpected TotalLabelValuePairs; got %d; want %d", status.TotalLabelValuePairs, expectedLabelValuePairs) t.Fatalf("unexpected TotalLabelValuePairs; got %d; want %d", status.TotalLabelValuePairs, expectedLabelValuePairs)
} }
expectedSeriesCountByFocusLabelValue = []TopHeapEntry{
{
Name: "0",
Count: 1000,
},
{
Name: "1",
Count: 1000,
},
{
Name: "2",
Count: 1000,
},
{
Name: "3",
Count: 1000,
},
{
Name: "4",
Count: 1000,
},
}
if !reflect.DeepEqual(status.SeriesCountByFocusLabelValue, expectedSeriesCountByFocusLabelValue) {
t.Fatalf("unexpected SeriesCountByFocusLabelValue;\ngot\n%v\nwant\n%v", status.SeriesCountByFocusLabelValue, expectedSeriesCountByFocusLabelValue)
}
// Check GetTSDBStatusWithFiltersForDate with non-nil filter, which matches only 3 series // Check GetTSDBStatus with non-nil filter, which matches only 3 series
tfs = NewTagFilters() tfs = NewTagFilters()
if err := tfs.Add([]byte("uniqueid"), []byte("0|1|3"), false, true); err != nil { if err := tfs.Add([]byte("uniqueid"), []byte("0|1|3"), false, true); err != nil {
t.Fatalf("cannot add filter: %s", err) t.Fatalf("cannot add filter: %s", err)
} }
status, err = db.GetTSDBStatusWithFiltersForDate(nil, []*TagFilters{tfs}, baseDate, 5, 1e6, noDeadline) status, err = db.GetTSDBStatus(nil, []*TagFilters{tfs}, baseDate, "", 5, 1e6, noDeadline)
if err != nil { if err != nil {
t.Fatalf("error in GetTSDBStatusWithFiltersForDate: %s", err) t.Fatalf("error in GetTSDBStatus: %s", err)
} }
if !status.hasEntries() { if !status.hasEntries() {
t.Fatalf("expecting non-empty TSDB status") t.Fatalf("expecting non-empty TSDB status")
@ -1986,10 +2020,10 @@ func TestSearchTSIDWithTimeRange(t *testing.T) {
t.Fatalf("unexpected TotalLabelValuePairs; got %d; want %d", status.TotalLabelValuePairs, expectedLabelValuePairs) t.Fatalf("unexpected TotalLabelValuePairs; got %d; want %d", status.TotalLabelValuePairs, expectedLabelValuePairs)
} }
// Check GetTSDBStatusWithFiltersForDate with non-nil filter on global time range, which matches only 15 series // Check GetTSDBStatus with non-nil filter on global time range, which matches only 15 series
status, err = db.GetTSDBStatusWithFiltersForDate(nil, []*TagFilters{tfs}, 0, 5, 1e6, noDeadline) status, err = db.GetTSDBStatus(nil, []*TagFilters{tfs}, 0, "", 5, 1e6, noDeadline)
if err != nil { if err != nil {
t.Fatalf("error in GetTSDBStatusWithFiltersForDate: %s", err) t.Fatalf("error in GetTSDBStatus: %s", err)
} }
if !status.hasEntries() { if !status.hasEntries() {
t.Fatalf("expecting non-empty TSDB status") t.Fatalf("expecting non-empty TSDB status")

View File

@ -1467,9 +1467,9 @@ func (s *Storage) GetSeriesCount(deadline uint64) (uint64, error) {
return s.idb().GetSeriesCount(deadline) return s.idb().GetSeriesCount(deadline)
} }
// GetTSDBStatusWithFiltersForDate returns TSDB status data for /api/v1/status/tsdb with match[] filters. // GetTSDBStatus returns TSDB status data for /api/v1/status/tsdb
func (s *Storage) GetTSDBStatusWithFiltersForDate(qt *querytracer.Tracer, tfss []*TagFilters, date uint64, topN, maxMetrics int, deadline uint64) (*TSDBStatus, error) { func (s *Storage) GetTSDBStatus(qt *querytracer.Tracer, tfss []*TagFilters, date uint64, focusLabel string, topN, maxMetrics int, deadline uint64) (*TSDBStatus, error) {
return s.idb().GetTSDBStatusWithFiltersForDate(qt, tfss, date, topN, maxMetrics, deadline) return s.idb().GetTSDBStatus(qt, tfss, date, focusLabel, topN, maxMetrics, deadline)
} }
// MetricRow is a metric to insert into storage. // MetricRow is a metric to insert into storage.