From 0a14b7bb820167c7b4af36a9efdbe17ff297cc0a Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sat, 7 Jan 2023 00:14:28 -0800 Subject: [PATCH] lib/promscrape: reduce the number of concurrently executed processScrapedData calls from 2x of the number of CPUs to the number of CPUs This should reduce the maximum memory usage for processScrapedData() function by 2x. The only part, which can be IO-bound in the processScrapedData() is pushData() call, when it buffers data to persistent queue if the remote storage cannot keep up with the data ingestion speed. In this case it is OK if the scrape pace will be limited. --- lib/promscrape/scrapework.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go index 84d378f379..f88cb48c60 100644 --- a/lib/promscrape/scrapework.go +++ b/lib/promscrape/scrapework.go @@ -424,7 +424,7 @@ func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error return err } -var concurrencyLimitCh = make(chan struct{}, 2*cgroup.AvailableCPUs()) +var concurrencyLimitCh = make(chan struct{}, cgroup.AvailableCPUs()) func (sw *scrapeWork) processScrapedData(scrapeTimestamp, realTimestamp int64, body *bytesutil.ByteBuffer, err error) (bool, error) { // This function is CPU-bound, while it may allocate big amounts of memory.