mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-15 16:30:55 +01:00
lib/promscrape: remove vm_promscrape_scrapes_failed_per_url_total and vm_promscrape_scrapes_skipped_by_sample_limit_per_url_total metrics
These metrics may result in big number of time series when vmagent scrapes thousands of targets and these targets constantly changes. * It is better using `up == 0` query for determining failing targets. * It is better using the following query for determining targets with exceeded limit on the number of metrics: scrape_samples_scraped > 0 if up == 0
This commit is contained in:
parent
c769f8321d
commit
18c2075159
@ -269,7 +269,6 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
|
||||
if err != nil {
|
||||
up = 0
|
||||
scrapesFailed.Inc()
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_scrapes_failed_per_url_total{url=%q}`, sw.Config.ScrapeURL)).Inc()
|
||||
} else {
|
||||
bodyString := bytesutil.ToUnsafeString(body.B)
|
||||
wc.rows.UnmarshalWithErrLogger(bodyString, sw.logError)
|
||||
@ -281,7 +280,6 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
|
||||
srcRows = srcRows[:0]
|
||||
up = 0
|
||||
scrapesSkippedBySampleLimit.Inc()
|
||||
metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_scrapes_skipped_by_sample_limit_per_url_total{url=%q}`, sw.Config.ScrapeURL)).Inc()
|
||||
}
|
||||
samplesPostRelabeling := 0
|
||||
for i := range srcRows {
|
||||
|
Loading…
Reference in New Issue
Block a user