diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go index a97b2c01d0..d21840d6b8 100644 --- a/lib/promscrape/scrapework.go +++ b/lib/promscrape/scrapework.go @@ -3,7 +3,6 @@ package promscrape import ( "flag" "fmt" - "math/rand" "strings" "time" @@ -14,6 +13,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel" parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus" "github.com/VictoriaMetrics/metrics" + xxhash "github.com/cespare/xxhash/v2" ) var ( @@ -121,11 +121,22 @@ type scrapeWork struct { } func (sw *scrapeWork) run(stopCh <-chan struct{}) { - // Randomize start time for the first scrape in order to spread load - // when scraping many targets. + // Calculate start time for the first scrape from ScrapeURL and labels. + // This should spread load when scraping many targets with different + // scrape urls and labels. + // This also makes consistent scrape times across restarts + // for a target with the same ScrapeURL and labels. scrapeInterval := sw.Config.ScrapeInterval - randSleep := time.Duration(float64(scrapeInterval) * rand.Float64()) - timer := time.NewTimer(randSleep) + key := fmt.Sprintf("ScrapeURL=%s, Labels=%s", sw.Config.ScrapeURL, sw.Config.LabelsString()) + h := uint32(xxhash.Sum64([]byte(key))) + randSleep := uint64(float64(scrapeInterval) * (float64(h) / (1 << 32))) + sleepOffset := uint64(time.Now().UnixNano()) % uint64(scrapeInterval) + logger.Infof("randsleep=%d, sleepOffset=%d", randSleep, sleepOffset) + if randSleep < sleepOffset { + randSleep += uint64(scrapeInterval) + } + randSleep -= sleepOffset + timer := time.NewTimer(time.Duration(randSleep)) var timestamp int64 var ticker *time.Ticker select {