diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 3ad68b8d34..3fc22ec55c 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -63,7 +63,7 @@ * FEATURE: optimize requests to `/api/v1/labels` and `/api/v1/label//values` when `start` and `end` args are set. * FEATURE: reduce memory usage when query touches big number of time series. -* FEATURE: vmagent: reduce memory usage when `kubernetes_sd_config` discovers big number of scrape targets (e.g. hundreds of thouthands) and the majority of these targets (99%) +* FEATURE: vmagent: reduce memory usage when `kubernetes_sd_config` discovers big number of scrape targets (e.g. hundreds of thousands) and the majority of these targets (99%) are dropped during relabeling. Previously labels for all the dropped targets were displayed at `/api/v1/targets` page. Now only up to `-promscrape.maxDroppedTargets` such targets are displayed. See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/878 for details. * FEATURE: vmagent: reduce memory usage when scraping big number of targets with big number of temporary labels starting with `__`. diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go index 30b0dea597..864b2cbe06 100644 --- a/lib/promscrape/scrapework.go +++ b/lib/promscrape/scrapework.go @@ -249,7 +249,7 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error // Common case: read all the data from scrape target to memory (body) and then process it. // This case should work more optimally for than stream parse code above for common case when scrape target exposes - // up to a few thouthand metrics. + // up to a few thousand metrics. body := leveledbytebufferpool.Get(sw.prevBodyLen) var err error body.B, err = sw.ReadData(body.B[:0])