From c6f6f094c5b5da9b8e209e72793f7d07fdce8114 Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Mon, 22 Jan 2024 00:38:21 +0200 Subject: [PATCH] =?UTF-8?q?Revert=20"lib/promscrape:=20do=20not=20store=20?= =?UTF-8?q?last=20scrape=20response=20when=20stale=20markers=20=E2=80=A6?= =?UTF-8?q?=20(#5577)"?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit cfec258803daacd777a384ef1460ce01ddb1dc18. Reason for revert: the original code already doesn't store the last scrape response when stale markers are disabled. The scrapeWork.areIdenticalSeries() function always returns true is stale markers are disabled. This prevents from storing the last response at scrapeWork.processScrapedData(). It looks like the reverted commit could also return back the issue https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3660 Updates https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5577 --- docs/CHANGELOG.md | 1 - lib/promscrape/scrapework.go | 25 ++++++++++++------------- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 88eac31915..60f50c545a 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -68,7 +68,6 @@ The sandbox cluster installation is running under the constant load generated by * BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly assume role with [AWS IRSA authorization](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). Previously role chaining was not supported. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3822) for details. * BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): exit if there is config syntax error in [`scrape_config_files`](https://docs.victoriametrics.com/vmagent.html#loading-scrape-configs-from-multiple-files) when `-promscrape.config.strictParse=true`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5508). * BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly discover targets for `role: endpoints` and `role: endpointslice` in [kubernetes_sd_configs](https://docs.victoriametrics.com/sd_configs.html#kubernetes_sd_configs). Previously some `endpoints` and `endpointslice` targets could be left undiscovered or some targets could have missing `__meta_*` labels when performing service discovery in busy Kubernetes clusters with large number of pods. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5557). -* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not store scrape response for target in memory when staleness markers are disabled. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5577) for details. * BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix a link for the statistic inaccuracy explanation in the cardinality explorer tool. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5460). * BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): send `step` param for instant queries. The change reverts [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3896) due to reasons explained in [this comment](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3896#issuecomment-1896704401). * BUGFIX: all: fix potential panic during components shutdown when [metrics push](https://docs.victoriametrics.com/#push-metrics) is configured. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5548). Thanks to @zhdd99 for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5549). diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go index 3fe5c4d437..2bb3846c17 100644 --- a/lib/promscrape/scrapework.go +++ b/lib/promscrape/scrapework.go @@ -337,13 +337,11 @@ func (sw *scrapeWork) run(stopCh <-chan struct{}, globalStopCh <-chan struct{}) // Do not send staleness markers on graceful shutdown as Prometheus does. // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2013#issuecomment-1006994079 default: - if !sw.Config.NoStaleMarkers { - // Send staleness markers to all the metrics scraped last time from the target - // when the given target disappears as Prometheus does. - // Use the current real timestamp for staleness markers, so queries - // stop returning data just after the time the target disappears. - sw.sendStaleSeries(lastScrape, "", t, true) - } + // Send staleness markers to all the metrics scraped last time from the target + // when the given target disappears as Prometheus does. + // Use the current real timestamp for staleness markers, so queries + // stop returning data just after the time the target disappears. + sw.sendStaleSeries(lastScrape, "", t, true) } if sw.seriesLimiter != nil { sw.seriesLimiter.MustStop() @@ -489,8 +487,7 @@ func (sw *scrapeWork) processScrapedData(scrapeTimestamp, realTimestamp int64, b bodyString = "" } seriesAdded := 0 - // cannot track the number of new time series when stale markers are disabled. - if !sw.Config.NoStaleMarkers && !areIdenticalSeries { + if !areIdenticalSeries { // The returned value for seriesAdded may be bigger than the real number of added series // if some series were removed during relabeling. // This is a trade-off between performance and accuracy. @@ -520,7 +517,7 @@ func (sw *scrapeWork) processScrapedData(scrapeTimestamp, realTimestamp int64, b writeRequestCtxPool.Put(wc) } // body must be released only after wc is released, since wc refers to body. - if !sw.Config.NoStaleMarkers && !areIdenticalSeries { + if !areIdenticalSeries { // Send stale markers for disappeared metrics with the real scrape timestamp // in order to guarantee that query doesn't return data after this time for the disappeared metrics. sw.sendStaleSeries(lastScrape, bodyString, realTimestamp, false) @@ -630,8 +627,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error { scrapesFailed.Inc() } seriesAdded := 0 - // cannot track the number of new time series when stale markers are disabled. - if !sw.Config.NoStaleMarkers && !areIdenticalSeries { + if !areIdenticalSeries { // The returned value for seriesAdded may be bigger than the real number of added series // if some series were removed during relabeling. // This is a trade-off between performance and accuracy. @@ -651,7 +647,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error { sw.prevBodyLen = sbr.bodyLen wc.reset() writeRequestCtxPool.Put(wc) - if !sw.Config.NoStaleMarkers && !areIdenticalSeries { + if !areIdenticalSeries { // Send stale markers for disappeared metrics with the real scrape timestamp // in order to guarantee that query doesn't return data after this time for the disappeared metrics. sw.sendStaleSeries(lastScrape, bodyString, realTimestamp, false) @@ -790,6 +786,9 @@ func (sw *scrapeWork) sendStaleSeries(lastScrape, currScrape string, timestamp i defer func() { <-sendStaleSeriesConcurrencyLimitCh }() + if sw.Config.NoStaleMarkers { + return + } bodyString := lastScrape if currScrape != "" { bodyString = parser.GetRowsDiff(lastScrape, currScrape)