diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 5a24263e73..3d10ff8c8a 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -17,6 +17,7 @@ in front of VictoriaMetrics. [Contact us](mailto:sales@victoriametrics.com) if y - `vm_promscrape_discovery_requests_total` - `vm_promscrape_discovery_retries_total` - `vm_promscrape_scrape_retries_total` + - `vm_promscrape_service_discovery_duration_seconds` * BUGFIX: vmagent: reduce HTTP reconnection rate for scrape targets. Previously vmagent could errorneusly close HTTP keep-alive connections more frequently than needed. * BUGFIX: vmagent: retry scrape and service discovery requests when the remote server closes HTTP keep-alive connection. Previously `disable_keepalive: true` option could be used under `scrape_configs` section when working with such servers. diff --git a/lib/promscrape/scraper.go b/lib/promscrape/scraper.go index 43b5d670a2..5e3aef789f 100644 --- a/lib/promscrape/scraper.go +++ b/lib/promscrape/scraper.go @@ -180,6 +180,8 @@ func (scs *scrapeConfigs) add(name string, checkInterval time.Duration, getScrap checkInterval: checkInterval, cfgCh: make(chan *Config, 1), stopCh: scs.stopCh, + + discoveryDuration: metrics.GetOrCreateHistogram(fmt.Sprintf("vm_promscrape_service_discovery_duration_seconds{type=%q}", name)), } scs.wg.Add(1) go func() { @@ -208,6 +210,8 @@ type scrapeConfig struct { checkInterval time.Duration cfgCh chan *Config stopCh <-chan struct{} + + discoveryDuration *metrics.Histogram } func (scfg *scrapeConfig) run() { @@ -224,9 +228,11 @@ func (scfg *scrapeConfig) run() { cfg := <-scfg.cfgCh var swsPrev []*ScrapeWork updateScrapeWork := func(cfg *Config) { + startTime := time.Now() sws := scfg.getScrapeWork(cfg, swsPrev) sg.update(sws) swsPrev = sws + scfg.discoveryDuration.UpdateDuration(startTime) } updateScrapeWork(cfg) atomic.AddInt32(&PendingScrapeConfigs, -1)