From 18c2075159fa0c7be18efe82e5fe8f70b18f6e0b Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Fri, 12 Feb 2021 05:11:37 +0200 Subject: [PATCH] lib/promscrape: remove vm_promscrape_scrapes_failed_per_url_total and vm_promscrape_scrapes_skipped_by_sample_limit_per_url_total metrics These metrics may result in big number of time series when vmagent scrapes thousands of targets and these targets constantly changes. * It is better using `up == 0` query for determining failing targets. * It is better using the following query for determining targets with exceeded limit on the number of metrics: scrape_samples_scraped > 0 if up == 0 --- lib/promscrape/scrapework.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go index 2c37469ed5..585529e017 100644 --- a/lib/promscrape/scrapework.go +++ b/lib/promscrape/scrapework.go @@ -269,7 +269,6 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error if err != nil { up = 0 scrapesFailed.Inc() - metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_scrapes_failed_per_url_total{url=%q}`, sw.Config.ScrapeURL)).Inc() } else { bodyString := bytesutil.ToUnsafeString(body.B) wc.rows.UnmarshalWithErrLogger(bodyString, sw.logError) @@ -281,7 +280,6 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error srcRows = srcRows[:0] up = 0 scrapesSkippedBySampleLimit.Inc() - metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_scrapes_skipped_by_sample_limit_per_url_total{url=%q}`, sw.Config.ScrapeURL)).Inc() } samplesPostRelabeling := 0 for i := range srcRows {