mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-15 00:13:30 +01:00
lib/promscrape: apply sample_limit
after metric relabeling is applied as Prometheus does
See the description for `sample_limit` option from Prometheus docs: Per-scrape limit on number of scraped samples that will be accepted. If more than this number of samples are present after metric relabeling the entire scrape will be treated as failed. 0 means no limit. https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config
This commit is contained in:
parent
7b66c8cbf8
commit
0554430d7e
@ -11,9 +11,9 @@
|
||||
* FATURE: vmagent: accept `scrape_offset` option at `scrape_config`. This option may be useful when scrapes must start at the specified offset of every scrape interval. See [these docs](https://victoriametrics.github.io/vmagent.html#troubleshooting) for details.
|
||||
* FEATURE: vmauth: allow using regexp paths in `url_map`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1112) for details.
|
||||
|
||||
|
||||
* BUGFIX: vmagent: prevent from high CPU usage bug during failing scrapes with small `scrape_timeout` (less than a few seconds).
|
||||
* BUGFIX: vmagent: reduce memory usage when Kubernetes service discovery is used in big number of distinct jobs by sharing the cache. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113
|
||||
* BUGFIX: vmagent: reduce memory usage when Kubernetes service discovery is used in big number of distinct scrape config jobs by sharing Kubernetes object cache. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1113
|
||||
* BUGFIX: vmagent: apply `sample_limit` only after `metric_relabel_configs` are applied as Prometheus does. Previously the `sample_limit` was applied before metrics relabeling.
|
||||
* BUGFUX: avoid `duplicate time series` error if `prometheus_buckets()` covers a time range with distinct set of buckets.
|
||||
* BUGFIX: prevent exponent overflow when processing extremely small values close to zero such as `2.964393875E-314`. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1114
|
||||
|
||||
|
@ -290,27 +290,15 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
|
||||
srcRows := wc.rows.Rows
|
||||
samplesScraped := len(srcRows)
|
||||
scrapedSamples.Update(float64(samplesScraped))
|
||||
if sw.Config.SampleLimit > 0 && samplesScraped > sw.Config.SampleLimit {
|
||||
srcRows = srcRows[:0]
|
||||
for i := range srcRows {
|
||||
sw.addRowToTimeseries(wc, &srcRows[i], scrapeTimestamp, true)
|
||||
}
|
||||
samplesPostRelabeling := len(wc.writeRequest.Timeseries)
|
||||
if sw.Config.SampleLimit > 0 && samplesPostRelabeling > sw.Config.SampleLimit {
|
||||
wc.resetNoRows()
|
||||
up = 0
|
||||
scrapesSkippedBySampleLimit.Inc()
|
||||
}
|
||||
samplesPostRelabeling := 0
|
||||
for i := range srcRows {
|
||||
sw.addRowToTimeseries(wc, &srcRows[i], scrapeTimestamp, true)
|
||||
if len(wc.labels) > 40000 {
|
||||
// Limit the maximum size of wc.writeRequest.
|
||||
// This should reduce memory usage when scraping targets with millions of metrics and/or labels.
|
||||
// For example, when scraping /federate handler from Prometheus - see https://prometheus.io/docs/prometheus/latest/federation/
|
||||
samplesPostRelabeling += len(wc.writeRequest.Timeseries)
|
||||
sw.updateSeriesAdded(wc)
|
||||
startTime := time.Now()
|
||||
sw.PushData(&wc.writeRequest)
|
||||
pushDataDuration.UpdateDuration(startTime)
|
||||
wc.resetNoRows()
|
||||
}
|
||||
}
|
||||
samplesPostRelabeling += len(wc.writeRequest.Timeseries)
|
||||
sw.updateSeriesAdded(wc)
|
||||
seriesAdded := sw.finalizeSeriesAdded(samplesPostRelabeling)
|
||||
sw.addAutoTimeseries(wc, "up", float64(up), scrapeTimestamp)
|
||||
|
@ -332,7 +332,7 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
||||
up 0 123
|
||||
scrape_samples_scraped 2 123
|
||||
scrape_duration_seconds 0 123
|
||||
scrape_samples_post_metric_relabeling 0 123
|
||||
scrape_samples_post_metric_relabeling 2 123
|
||||
scrape_series_added 0 123
|
||||
`)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user