lib/promscrape: remove vm_promscrape_scrapes_failed_per_url_total and vm_promscrape_scrapes_skipped_by_sample_limit_per_url_total metrics

These metrics may result in big number of time series when vmagent scrapes thousands of targets and these targets constantly changes.

* It is better using `up == 0` query for determining failing targets.
* It is better using the following query for determining targets with exceeded limit on the number of metrics:

  scrape_samples_scraped > 0 if up == 0
This commit is contained in:
Aliaksandr Valialkin 2021-02-12 05:11:37 +02:00
parent c769f8321d
commit 18c2075159

View File

@ -269,7 +269,6 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
if err != nil { if err != nil {
up = 0 up = 0
scrapesFailed.Inc() scrapesFailed.Inc()
metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_scrapes_failed_per_url_total{url=%q}`, sw.Config.ScrapeURL)).Inc()
} else { } else {
bodyString := bytesutil.ToUnsafeString(body.B) bodyString := bytesutil.ToUnsafeString(body.B)
wc.rows.UnmarshalWithErrLogger(bodyString, sw.logError) wc.rows.UnmarshalWithErrLogger(bodyString, sw.logError)
@ -281,7 +280,6 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error
srcRows = srcRows[:0] srcRows = srcRows[:0]
up = 0 up = 0
scrapesSkippedBySampleLimit.Inc() scrapesSkippedBySampleLimit.Inc()
metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_scrapes_skipped_by_sample_limit_per_url_total{url=%q}`, sw.Config.ScrapeURL)).Inc()
} }
samplesPostRelabeling := 0 samplesPostRelabeling := 0
for i := range srcRows { for i := range srcRows {