mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-23 20:37:12 +01:00
lib/promscrape: expose promscrape_series_limit_max_series
and promscrape_series_limit_current_series
metrics per each scrape target with the enabled unique series limiter
This commit is contained in:
parent
9761b7f3ef
commit
c055bc478c
@ -12,6 +12,7 @@ sort: 15
|
|||||||
* FEATURE: vmagent: atomatically switch to [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode) if the response from the given target exceeds the command-line flag value `-promscrape.minResponseSizeForStreamParse`. This should reduce memory usage when `vmagent` scrapes targets with non-uniform response sizes (this is the case in Kubernetes monitoring).
|
* FEATURE: vmagent: atomatically switch to [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode) if the response from the given target exceeds the command-line flag value `-promscrape.minResponseSizeForStreamParse`. This should reduce memory usage when `vmagent` scrapes targets with non-uniform response sizes (this is the case in Kubernetes monitoring).
|
||||||
* FEATURE: vmagent: send Prometheus-like staleness marks in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). Previously staleness marks wern't sent in stream parsing mode. See [these docs](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) for details.
|
* FEATURE: vmagent: send Prometheus-like staleness marks in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). Previously staleness marks wern't sent in stream parsing mode. See [these docs](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) for details.
|
||||||
* FEATURE: vmagent: properly calculate `scrape_series_added` metric for targets in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). Previously it was set to 0 in stream parsing mode. See [more details about this metric](https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series).
|
* FEATURE: vmagent: properly calculate `scrape_series_added` metric for targets in [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode). Previously it was set to 0 in stream parsing mode. See [more details about this metric](https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series).
|
||||||
|
* FEATURE: vmagent: expose `promscrape_series_limit_max_series` and `promscrape_series_limit_current_series` metrics at `http://vmagent:8429/metrics` for scrape targets with the [enabled series limiter](https://docs.victoriametrics.com/vmagent.html#cardinality-limiter).
|
||||||
* FEATURE: vmagent: return error if `sample_limit` or `series_limit` options are set when [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode) is enabled, since these limits cannot be applied in stream parsing mode.
|
* FEATURE: vmagent: return error if `sample_limit` or `series_limit` options are set when [stream parsing mode](https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode) is enabled, since these limits cannot be applied in stream parsing mode.
|
||||||
* FEATURE: add trigonometric functions, which are going to be added in [Prometheus 2.31](https://github.com/prometheus/prometheus/pull/9239): [acosh](https://docs.victoriametrics.com/MetricsQL.html#acosh), [asinh](https://docs.victoriametrics.com/MetricsQL.html#asinh), [atan](https://docs.victoriametrics.com/MetricsQL.html#atan), [atanh](https://docs.victoriametrics.com/MetricsQL.html#atanh), [cosh](https://docs.victoriametrics.com/MetricsQL.html#cosh), [deg](https://docs.victoriametrics.com/MetricsQL.html#deg), [rad](https://docs.victoriametrics.com/MetricsQL.html#rad), [sinh](https://docs.victoriametrics.com/MetricsQL.html#sinh), [tan](https://docs.victoriametrics.com/MetricsQL.html#tan), [tanh](https://docs.victoriametrics.com/MetricsQL.html#tanh). Also add `atan2` binary operator. See [this pull request](https://github.com/prometheus/prometheus/pull/9248).
|
* FEATURE: add trigonometric functions, which are going to be added in [Prometheus 2.31](https://github.com/prometheus/prometheus/pull/9239): [acosh](https://docs.victoriametrics.com/MetricsQL.html#acosh), [asinh](https://docs.victoriametrics.com/MetricsQL.html#asinh), [atan](https://docs.victoriametrics.com/MetricsQL.html#atan), [atanh](https://docs.victoriametrics.com/MetricsQL.html#atanh), [cosh](https://docs.victoriametrics.com/MetricsQL.html#cosh), [deg](https://docs.victoriametrics.com/MetricsQL.html#deg), [rad](https://docs.victoriametrics.com/MetricsQL.html#rad), [sinh](https://docs.victoriametrics.com/MetricsQL.html#sinh), [tan](https://docs.victoriametrics.com/MetricsQL.html#tan), [tanh](https://docs.victoriametrics.com/MetricsQL.html#tanh). Also add `atan2` binary operator. See [this pull request](https://github.com/prometheus/prometheus/pull/9248).
|
||||||
* FEATURE: consistently return the same set of time series from [limitk](https://docs.victoriametrics.com/MetricsQL.html#limitk) function. This improves the usability of periodically refreshed graphs.
|
* FEATURE: consistently return the same set of time series from [limitk](https://docs.victoriametrics.com/MetricsQL.html#limitk) function. This improves the usability of periodically refreshed graphs.
|
||||||
|
@ -199,6 +199,9 @@ type scrapeWork struct {
|
|||||||
// Optional limiter on the number of unique series per scrape target.
|
// Optional limiter on the number of unique series per scrape target.
|
||||||
seriesLimiter *bloomfilter.Limiter
|
seriesLimiter *bloomfilter.Limiter
|
||||||
|
|
||||||
|
// Optional counter on the number of dropped samples if the limit on the number of unique series is set.
|
||||||
|
seriesLimiterRowsDroppedTotal *metrics.Counter
|
||||||
|
|
||||||
// prevBodyLen contains the previous response body length for the given scrape work.
|
// prevBodyLen contains the previous response body length for the given scrape work.
|
||||||
// It is used as a hint in order to reduce memory usage for body buffers.
|
// It is used as a hint in order to reduce memory usage for body buffers.
|
||||||
prevBodyLen int
|
prevBodyLen int
|
||||||
@ -302,6 +305,13 @@ func (sw *scrapeWork) run(stopCh <-chan struct{}) {
|
|||||||
t := time.Now().UnixNano() / 1e6
|
t := time.Now().UnixNano() / 1e6
|
||||||
sw.sendStaleSeries("", t, true)
|
sw.sendStaleSeries("", t, true)
|
||||||
if sw.seriesLimiter != nil {
|
if sw.seriesLimiter != nil {
|
||||||
|
job := sw.Config.Job()
|
||||||
|
metrics.UnregisterMetric(fmt.Sprintf(`promscrape_series_limit_rows_dropped_total{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
|
||||||
|
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL))
|
||||||
|
metrics.UnregisterMetric(fmt.Sprintf(`promscrape_series_limit_max_series{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
|
||||||
|
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL))
|
||||||
|
metrics.UnregisterMetric(fmt.Sprintf(`promscrape_series_limit_current_series{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
|
||||||
|
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL))
|
||||||
sw.seriesLimiter.MustStop()
|
sw.seriesLimiter.MustStop()
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
@ -615,22 +625,31 @@ func (sw *scrapeWork) applySeriesLimit(wc *writeRequestCtx) bool {
|
|||||||
seriesLimit = sw.Config.SeriesLimit
|
seriesLimit = sw.Config.SeriesLimit
|
||||||
}
|
}
|
||||||
if sw.seriesLimiter == nil && seriesLimit > 0 {
|
if sw.seriesLimiter == nil && seriesLimit > 0 {
|
||||||
|
job := sw.Config.Job()
|
||||||
sw.seriesLimiter = bloomfilter.NewLimiter(seriesLimit, 24*time.Hour)
|
sw.seriesLimiter = bloomfilter.NewLimiter(seriesLimit, 24*time.Hour)
|
||||||
|
sw.seriesLimiterRowsDroppedTotal = metrics.GetOrCreateCounter(fmt.Sprintf(`promscrape_series_limit_rows_dropped_total{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
|
||||||
|
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL))
|
||||||
|
_ = metrics.GetOrCreateGauge(fmt.Sprintf(`promscrape_series_limit_max_series{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
|
||||||
|
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL), func() float64 {
|
||||||
|
return float64(sw.seriesLimiter.MaxItems())
|
||||||
|
})
|
||||||
|
_ = metrics.GetOrCreateGauge(fmt.Sprintf(`promscrape_series_limit_current_series{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
|
||||||
|
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL), func() float64 {
|
||||||
|
return float64(sw.seriesLimiter.CurrentItems())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
hsl := sw.seriesLimiter
|
hsl := sw.seriesLimiter
|
||||||
if hsl == nil {
|
if hsl == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
dstSeries := wc.writeRequest.Timeseries[:0]
|
dstSeries := wc.writeRequest.Timeseries[:0]
|
||||||
job := sw.Config.Job()
|
|
||||||
limitExceeded := false
|
limitExceeded := false
|
||||||
for _, ts := range wc.writeRequest.Timeseries {
|
for _, ts := range wc.writeRequest.Timeseries {
|
||||||
h := sw.getLabelsHash(ts.Labels)
|
h := sw.getLabelsHash(ts.Labels)
|
||||||
if !hsl.Add(h) {
|
if !hsl.Add(h) {
|
||||||
// The limit on the number of hourly unique series per scrape target has been exceeded.
|
// The limit on the number of hourly unique series per scrape target has been exceeded.
|
||||||
// Drop the metric.
|
// Drop the metric.
|
||||||
metrics.GetOrCreateCounter(fmt.Sprintf(`promscrape_series_limit_rows_dropped_total{scrape_job_original=%q,scrape_job=%q,scrape_target=%q}`,
|
sw.seriesLimiterRowsDroppedTotal.Inc()
|
||||||
sw.Config.jobNameOriginal, job, sw.Config.ScrapeURL)).Inc()
|
|
||||||
limitExceeded = true
|
limitExceeded = true
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user