mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-23 12:31:07 +01:00
lib/promscrape: fixed reload on max_scrape_size change (#7282)
### Describe Your Changes
fixed reload on max_scrape_size change
https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7260
### Checklist
The following checks are **mandatory**:
- [x] My change adheres [VictoriaMetrics contributing
guidelines](https://docs.victoriametrics.com/contributing/).
---------
Signed-off-by: hagen1778 <roman@victoriametrics.com>
Co-authored-by: hagen1778 <roman@victoriametrics.com>
(cherry picked from commit 965a33c893
)
This commit is contained in:
parent
abd2f34833
commit
1d352b92c7
@ -35,6 +35,7 @@ See also [LTS releases](https://docs.victoriametrics.com/lts-releases/).
|
||||
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): properly apply replication factor when storage node groups are used and replication factor is configured via global value such as `-replicationFactor=2`. Previously, global replication factor was ignored for storage node groups. See [these docs](https://docs.victoriametrics.com/cluster-victoriametrics/#vmstorage-groups-at-vmselect) for more information about storage groups configuration.
|
||||
* BUGFIX: `vmselect` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): properly process response in [multi-level cluster setup](https://docs.victoriametrics.com/cluster-victoriametrics/#multi-level-cluster-setup). Before, vmselect could return no data in multi-level setup. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7270) for details. The issue was introduced in [v1.104.0](https://docs.victoriametrics.com/changelog/#v11040).
|
||||
* BUGFIX: [vmalert](https://docs.victoriametrics.com/vmalert): properly apply configuration changes during hot-reload to rule groups that haven't started yet. Previously, configuration updates to such groups could have resulted into blocking all evaluations within the group, until vmalert restart.
|
||||
* BUGFIX: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and [vmagent](https://docs.victoriametrics.com/vmagent/): properly update `max_scrape_size` param change during [hot-reload](https://docs.victoriametrics.com/vmagent/#configuration-update). See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/7260).
|
||||
|
||||
## [v1.104.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.104.0)
|
||||
|
||||
|
@ -171,7 +171,7 @@ func (c *client) ReadData(dst *bytesutil.ByteBuffer) error {
|
||||
maxScrapeSizeExceeded.Inc()
|
||||
return fmt.Errorf("the response from %q exceeds -promscrape.maxScrapeSize or max_scrape_size in the scrape config (%d bytes). "+
|
||||
"Possible solutions are: reduce the response size for the target, increase -promscrape.maxScrapeSize command-line flag, "+
|
||||
"increase max_scrape_size value in scrape config for the given target", c.scrapeURL, maxScrapeSize.N)
|
||||
"increase max_scrape_size value in scrape config for the given target", c.scrapeURL, c.maxScrapeSize)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
81
lib/promscrape/scraper_test.go
Normal file
81
lib/promscrape/scraper_test.go
Normal file
@ -0,0 +1,81 @@
|
||||
package promscrape
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
|
||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||
)
|
||||
|
||||
func TestScraperReload(t *testing.T) {
|
||||
f := func(oldCfgData, newCfgData string, reloadExpected bool) {
|
||||
pushData := func(_ *auth.Token, _ *prompbmarshal.WriteRequest) {}
|
||||
globalStopChan = make(chan struct{})
|
||||
defer close(globalStopChan)
|
||||
|
||||
randName := rand.Int()
|
||||
sg := newScraperGroup(fmt.Sprintf("static_configs_%d", randName), pushData, globalStopChan)
|
||||
defer sg.stop()
|
||||
|
||||
scrapeConfigPath := "test-scrape.yaml"
|
||||
var oldCfg, newCfg Config
|
||||
if err := oldCfg.parseData([]byte(oldCfgData), scrapeConfigPath); err != nil {
|
||||
t.Fatalf("cannot create old config: %s", err)
|
||||
}
|
||||
oldSws := oldCfg.getStaticScrapeWork()
|
||||
sg.update(oldSws)
|
||||
oldChangesCount := sg.changesCount.Get()
|
||||
|
||||
if err := newCfg.parseData([]byte(newCfgData), scrapeConfigPath); err != nil {
|
||||
t.Fatalf("cannot create new config: %s", err)
|
||||
}
|
||||
doReload := (&newCfg).mustRestart(&oldCfg)
|
||||
if doReload != reloadExpected {
|
||||
t.Errorf("unexpected reload behaviour:\nexpected: %t\nactual: %t\n", reloadExpected, doReload)
|
||||
}
|
||||
newSws := newCfg.getStaticScrapeWork()
|
||||
sg.update(newSws)
|
||||
newChangesCount := sg.changesCount.Get()
|
||||
if (newChangesCount != oldChangesCount) != reloadExpected {
|
||||
t.Errorf("expected reload behaviour:\nexpected reload happen: %t\nactual reload happen: %t", reloadExpected, newChangesCount != oldChangesCount)
|
||||
}
|
||||
}
|
||||
f(`
|
||||
scrape_configs:
|
||||
- job_name: node-exporter
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8429`, `
|
||||
scrape_configs:
|
||||
- job_name: node-exporter
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8429`, false)
|
||||
f(`
|
||||
scrape_configs:
|
||||
- job_name: node-exporter
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8429`, `
|
||||
scrape_configs:
|
||||
- job_name: node-exporter
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8429
|
||||
- localhost:8428`, true)
|
||||
f(`
|
||||
scrape_configs:
|
||||
- job_name: node-exporter
|
||||
max_scrape_size: 1KiB
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8429`, `
|
||||
scrape_configs:
|
||||
- job_name: node-exporter
|
||||
max_scrape_size: 2KiB
|
||||
static_configs:
|
||||
- targets:
|
||||
- localhost:8429`, true)
|
||||
}
|
@ -163,13 +163,13 @@ func (sw *ScrapeWork) key() string {
|
||||
// Do not take into account OriginalLabels, since they can be changed with relabeling.
|
||||
// Do not take into account RelabelConfigs, since it is already applied to Labels.
|
||||
// Take into account JobNameOriginal in order to capture the case when the original job_name is changed via relabeling.
|
||||
key := fmt.Sprintf("JobNameOriginal=%s, ScrapeURL=%s, ScrapeInterval=%s, ScrapeTimeout=%s, HonorLabels=%v, HonorTimestamps=%v, DenyRedirects=%v, Labels=%s, "+
|
||||
"ExternalLabels=%s, "+
|
||||
key := fmt.Sprintf("JobNameOriginal=%s, ScrapeURL=%s, ScrapeInterval=%s, ScrapeTimeout=%s, HonorLabels=%v, "+
|
||||
"HonorTimestamps=%v, DenyRedirects=%v, Labels=%s, ExternalLabels=%s, MaxScrapeSize=%d, "+
|
||||
"ProxyURL=%s, ProxyAuthConfig=%s, AuthConfig=%s, MetricRelabelConfigs=%q, "+
|
||||
"SampleLimit=%d, DisableCompression=%v, DisableKeepAlive=%v, StreamParse=%v, "+
|
||||
"ScrapeAlignInterval=%s, ScrapeOffset=%s, SeriesLimit=%d, NoStaleMarkers=%v",
|
||||
sw.jobNameOriginal, sw.ScrapeURL, sw.ScrapeInterval, sw.ScrapeTimeout, sw.HonorLabels, sw.HonorTimestamps, sw.DenyRedirects, sw.Labels.String(),
|
||||
sw.ExternalLabels.String(),
|
||||
sw.jobNameOriginal, sw.ScrapeURL, sw.ScrapeInterval, sw.ScrapeTimeout, sw.HonorLabels,
|
||||
sw.HonorTimestamps, sw.DenyRedirects, sw.Labels.String(), sw.ExternalLabels.String(), sw.MaxScrapeSize,
|
||||
sw.ProxyURL.String(), sw.ProxyAuthConfig.String(), sw.AuthConfig.String(), sw.MetricRelabelConfigs.String(),
|
||||
sw.SampleLimit, sw.DisableCompression, sw.DisableKeepAlive, sw.StreamParse,
|
||||
sw.ScrapeAlignInterval, sw.ScrapeOffset, sw.SeriesLimit, sw.NoStaleMarkers)
|
||||
|
Loading…
Reference in New Issue
Block a user