mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-16 00:41:24 +01:00
lib/promscrape: fix applying sample_limit when scraping targets with big number of metrics
This has been broken at 7785869ccc
This commit is contained in:
parent
deff8d419a
commit
a01c56104a
@ -228,24 +228,26 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
|
|||||||
srcRows := wc.rows.Rows
|
srcRows := wc.rows.Rows
|
||||||
samplesScraped := len(srcRows)
|
samplesScraped := len(srcRows)
|
||||||
scrapedSamples.Update(float64(samplesScraped))
|
scrapedSamples.Update(float64(samplesScraped))
|
||||||
|
if sw.Config.SampleLimit > 0 && samplesScraped > sw.Config.SampleLimit {
|
||||||
|
srcRows = srcRows[:0]
|
||||||
|
up = 0
|
||||||
|
scrapesSkippedBySampleLimit.Inc()
|
||||||
|
}
|
||||||
|
samplesPostRelabeling := 0
|
||||||
for i := range srcRows {
|
for i := range srcRows {
|
||||||
sw.addRowToTimeseries(wc, &srcRows[i], scrapeTimestamp, true)
|
sw.addRowToTimeseries(wc, &srcRows[i], scrapeTimestamp, true)
|
||||||
if len(wc.labels) > 10000 {
|
if len(wc.labels) > 10000 {
|
||||||
// Limit the maximum size of wc.writeRequest.
|
// Limit the maximum size of wc.writeRequest.
|
||||||
// This should reduce memory usage when scraping targets with millions of metrics and/or labels.
|
// This should reduce memory usage when scraping targets with millions of metrics and/or labels.
|
||||||
// For example, when scraping /federate handler from Prometheus - see https://prometheus.io/docs/prometheus/latest/federation/
|
// For example, when scraping /federate handler from Prometheus - see https://prometheus.io/docs/prometheus/latest/federation/
|
||||||
|
samplesPostRelabeling += len(wc.writeRequest.Timeseries)
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
sw.PushData(&wc.writeRequest)
|
sw.PushData(&wc.writeRequest)
|
||||||
pushDataDuration.UpdateDuration(startTime)
|
pushDataDuration.UpdateDuration(startTime)
|
||||||
wc.reset()
|
wc.reset()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if sw.Config.SampleLimit > 0 && len(wc.writeRequest.Timeseries) > sw.Config.SampleLimit {
|
samplesPostRelabeling += len(wc.writeRequest.Timeseries)
|
||||||
prompbmarshal.ResetWriteRequest(&wc.writeRequest)
|
|
||||||
up = 0
|
|
||||||
scrapesSkippedBySampleLimit.Inc()
|
|
||||||
}
|
|
||||||
samplesPostRelabeling := len(wc.writeRequest.Timeseries)
|
|
||||||
seriesAdded := sw.getSeriesAdded(wc)
|
seriesAdded := sw.getSeriesAdded(wc)
|
||||||
sw.addAutoTimeseries(wc, "up", float64(up), scrapeTimestamp)
|
sw.addAutoTimeseries(wc, "up", float64(up), scrapeTimestamp)
|
||||||
sw.addAutoTimeseries(wc, "scrape_duration_seconds", duration, scrapeTimestamp)
|
sw.addAutoTimeseries(wc, "scrape_duration_seconds", duration, scrapeTimestamp)
|
||||||
|
Loading…
Reference in New Issue
Block a user