lib/promscrape: do not store last scrape response when stale markers … (#5577)

* lib/promscrape: do not store last scrape response when stale markers are disabled

* update changelog
This commit is contained in:
Hui Wang 2024-01-20 00:53:41 +08:00 committed by Aliaksandr Valialkin
parent 75920ab3b1
commit 66eb013b54
No known key found for this signature in database
GPG Key ID: 52C003EE2BCDB9EB
2 changed files with 14 additions and 12 deletions

View File

@ -61,6 +61,7 @@ The sandbox cluster installation is running under the constant load generated by
* BUGFIX: [vmctl](https://docs.victoriametrics.com/vmctl.html): retry on import errors in `vm-native` mode. Before, retries happened only on writes into a network connection between source and destination. But errors returned by server after all the data was transmitted were logged, but not retried.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): properly assume role with [AWS IRSA authorization](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html). Previously role chaining was not supported. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3822) for details.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): exit if there is config syntax error in [`scrape_config_files`](https://docs.victoriametrics.com/vmagent.html#loading-scrape-configs-from-multiple-files) when `-promscrape.config.strictParse=true`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5508).
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): do not store scrape response for target in memory when staleness markers are disabled. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5577) for details.
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix a link for the statistic inaccuracy explanation in the cardinality explorer tool. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5460).
* BUGFIX: all: fix potential panic during components shutdown when [metrics push](https://docs.victoriametrics.com/#push-metrics) is configured. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5548). Thanks to @zhdd99 for the [pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5549).
* BUGFIX: [vmselect](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): properly determine time range search for instant queries with too big look-behind window like `foo[100y]`. Previously, such queries could return empty responses even if `foo` is present in database.

View File

@ -337,12 +337,14 @@ func (sw *scrapeWork) run(stopCh <-chan struct{}, globalStopCh <-chan struct{})
// Do not send staleness markers on graceful shutdown as Prometheus does.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2013#issuecomment-1006994079
default:
if !sw.Config.NoStaleMarkers {
// Send staleness markers to all the metrics scraped last time from the target
// when the given target disappears as Prometheus does.
// Use the current real timestamp for staleness markers, so queries
// stop returning data just after the time the target disappears.
sw.sendStaleSeries(lastScrape, "", t, true)
}
}
if sw.seriesLimiter != nil {
sw.seriesLimiter.MustStop()
sw.seriesLimiter = nil
@ -487,7 +489,8 @@ func (sw *scrapeWork) processScrapedData(scrapeTimestamp, realTimestamp int64, b
bodyString = ""
}
seriesAdded := 0
if !areIdenticalSeries {
// cannot track the number of new time series when stale markers are disabled.
if !sw.Config.NoStaleMarkers && !areIdenticalSeries {
// The returned value for seriesAdded may be bigger than the real number of added series
// if some series were removed during relabeling.
// This is a trade-off between performance and accuracy.
@ -517,7 +520,7 @@ func (sw *scrapeWork) processScrapedData(scrapeTimestamp, realTimestamp int64, b
writeRequestCtxPool.Put(wc)
}
// body must be released only after wc is released, since wc refers to body.
if !areIdenticalSeries {
if !sw.Config.NoStaleMarkers && !areIdenticalSeries {
// Send stale markers for disappeared metrics with the real scrape timestamp
// in order to guarantee that query doesn't return data after this time for the disappeared metrics.
sw.sendStaleSeries(lastScrape, bodyString, realTimestamp, false)
@ -627,7 +630,8 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
scrapesFailed.Inc()
}
seriesAdded := 0
if !areIdenticalSeries {
// cannot track the number of new time series when stale markers are disabled.
if !sw.Config.NoStaleMarkers && !areIdenticalSeries {
// The returned value for seriesAdded may be bigger than the real number of added series
// if some series were removed during relabeling.
// This is a trade-off between performance and accuracy.
@ -647,7 +651,7 @@ func (sw *scrapeWork) scrapeStream(scrapeTimestamp, realTimestamp int64) error {
sw.prevBodyLen = sbr.bodyLen
wc.reset()
writeRequestCtxPool.Put(wc)
if !areIdenticalSeries {
if !sw.Config.NoStaleMarkers && !areIdenticalSeries {
// Send stale markers for disappeared metrics with the real scrape timestamp
// in order to guarantee that query doesn't return data after this time for the disappeared metrics.
sw.sendStaleSeries(lastScrape, bodyString, realTimestamp, false)
@ -788,9 +792,6 @@ func (sw *scrapeWork) sendStaleSeries(lastScrape, currScrape string, timestamp i
defer func() {
<-sendStaleSeriesConcurrencyLimitCh
}()
if sw.Config.NoStaleMarkers {
return
}
bodyString := lastScrape
if currScrape != "" {
bodyString = parser.GetRowsDiff(lastScrape, currScrape)