mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-15 16:30:55 +01:00
app/vmagent: add -remoteWrite.urlRelabelConfig
for applying individual relabeling for each -remoteWrite.url
Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/320 Updates https://github.com/VictoriaMetrics/VictoriaMetrics/issues/308
This commit is contained in:
parent
c3b239eb1a
commit
af19ca2483
@ -69,11 +69,7 @@ sections from [Prometheus config file](https://prometheus.io/docs/prometheus/lat
|
|||||||
* `scrape_configs`
|
* `scrape_configs`
|
||||||
|
|
||||||
All the other sections are ignored, including [remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write) section.
|
All the other sections are ignored, including [remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write) section.
|
||||||
Use `-remoteWrite.*` command-line flags instead for configuring remote write settings:
|
Use `-remoteWrite.*` command-line flags instead for configuring remote write settings.
|
||||||
|
|
||||||
* `-remoteWrite.url` for pointing to remote storage. Data to remote storage can be sent either via HTTP or HTTPS. See `-remoteWrite.tls*` flags for details.
|
|
||||||
* `-remoteWrite.label` for adding labels to metrics before sending them to remote storage.
|
|
||||||
* `-remoteWrite.relabelConfig` for applying relabeling to metrics before sending them to remote storage.
|
|
||||||
|
|
||||||
The following scrape types in [scrape_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) section are supported:
|
The following scrape types in [scrape_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) section are supported:
|
||||||
|
|
||||||
@ -114,7 +110,8 @@ The relabeling can be defined in the following places:
|
|||||||
* At `scrape_config -> relabel_configs` section in `-promscrape.config` file. This relabeling is applied to targets when parsing the file during `vmagent` startup
|
* At `scrape_config -> relabel_configs` section in `-promscrape.config` file. This relabeling is applied to targets when parsing the file during `vmagent` startup
|
||||||
or during config reload after sending `SIGHUP` signal to `vmagent` via `kill -HUP`.
|
or during config reload after sending `SIGHUP` signal to `vmagent` via `kill -HUP`.
|
||||||
* At `scrape_config -> metric_relabel_configs` section in `-promscrape.config` file. This relabeling is applied to metrics after each scrape for the configured targets.
|
* At `scrape_config -> metric_relabel_configs` section in `-promscrape.config` file. This relabeling is applied to metrics after each scrape for the configured targets.
|
||||||
* At `-remoteWrite.relabelConfig` file. This relabeling is aplied to all the collected metrics before sending them to `-remoteWrite.url`.
|
* At `-remoteWrite.relabelConfig` file. This relabeling is aplied to all the collected metrics before sending them to remote storage.
|
||||||
|
* At `-remoteWrite.urlRelabelConfig` files. This relabeling is applied to metrics before sending them to the corresponding `-remoteWrite.url`.
|
||||||
|
|
||||||
Read more about relabeling in the following articles:
|
Read more about relabeling in the following articles:
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ type client struct {
|
|||||||
stopCh chan struct{}
|
stopCh chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newClient(remoteWriteURL, urlLabelValue string, fq *persistentqueue.FastQueue) *client {
|
func newClient(remoteWriteURL, urlLabelValue string, fq *persistentqueue.FastQueue, concurrency int) *client {
|
||||||
authHeader := ""
|
authHeader := ""
|
||||||
if len(*basicAuthUsername) > 0 || len(*basicAuthPassword) > 0 {
|
if len(*basicAuthUsername) > 0 || len(*basicAuthPassword) > 0 {
|
||||||
// See https://en.wikipedia.org/wiki/Basic_access_authentication
|
// See https://en.wikipedia.org/wiki/Basic_access_authentication
|
||||||
@ -98,7 +98,7 @@ func newClient(remoteWriteURL, urlLabelValue string, fq *persistentqueue.FastQue
|
|||||||
host += ":80"
|
host += ":80"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
maxConns := 2 * *queues
|
maxConns := 2 * concurrency
|
||||||
hc := &fasthttp.HostClient{
|
hc := &fasthttp.HostClient{
|
||||||
Addr: host,
|
Addr: host,
|
||||||
Name: "vmagent",
|
Name: "vmagent",
|
||||||
@ -126,7 +126,7 @@ func newClient(remoteWriteURL, urlLabelValue string, fq *persistentqueue.FastQue
|
|||||||
c.requestsOKCount = metrics.NewCounter(fmt.Sprintf(`vmagent_remotewrite_requests_total{url=%q, status_code="2XX"}`, c.urlLabelValue))
|
c.requestsOKCount = metrics.NewCounter(fmt.Sprintf(`vmagent_remotewrite_requests_total{url=%q, status_code="2XX"}`, c.urlLabelValue))
|
||||||
c.errorsCount = metrics.NewCounter(fmt.Sprintf(`vmagent_remotewrite_errors_total{url=%q}`, c.urlLabelValue))
|
c.errorsCount = metrics.NewCounter(fmt.Sprintf(`vmagent_remotewrite_errors_total{url=%q}`, c.urlLabelValue))
|
||||||
c.retriesCount = metrics.NewCounter(fmt.Sprintf(`vmagent_remotewrite_retries_count_total{url=%q}`, c.urlLabelValue))
|
c.retriesCount = metrics.NewCounter(fmt.Sprintf(`vmagent_remotewrite_retries_count_total{url=%q}`, c.urlLabelValue))
|
||||||
for i := 0; i < *queues; i++ {
|
for i := 0; i < concurrency; i++ {
|
||||||
c.wg.Add(1)
|
c.wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer c.wg.Done()
|
defer c.wg.Done()
|
||||||
|
@ -12,45 +12,42 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
extraLabelsUnparsed = flagutil.NewArray("remoteWrite.label", "Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. "+
|
unparsedLabelsGlobal = flagutil.NewArray("remoteWrite.label", "Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. "+
|
||||||
"Pass multiple -remoteWrite.label flags in order to add multiple flags to metrics before sending them to remote storage")
|
"Pass multiple -remoteWrite.label flags in order to add multiple flags to metrics before sending them to remote storage")
|
||||||
relabelConfigPath = flag.String("remoteWrite.relabelConfig", "", "Optional path to file with relabel_config entries. These entries are applied to all the metrics "+
|
relabelConfigPathGlobal = flag.String("remoteWrite.relabelConfig", "", "Optional path to file with relabel_config entries. These entries are applied to all the metrics "+
|
||||||
"before sending them to -remoteWrite.url. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config for details")
|
"before sending them to -remoteWrite.url. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config for details")
|
||||||
)
|
)
|
||||||
|
|
||||||
var extraLabels []prompbmarshal.Label
|
var labelsGlobal []prompbmarshal.Label
|
||||||
var prcs []promrelabel.ParsedRelabelConfig
|
var prcsGlobal []promrelabel.ParsedRelabelConfig
|
||||||
|
|
||||||
// initRelabel must be called after parsing command-line flags.
|
// initRelabelGlobal must be called after parsing command-line flags.
|
||||||
func initRelabel() {
|
func initRelabelGlobal() {
|
||||||
// Init extraLabels
|
// Init labelsGlobal
|
||||||
for _, s := range *extraLabelsUnparsed {
|
labelsGlobal = nil
|
||||||
|
for _, s := range *unparsedLabelsGlobal {
|
||||||
n := strings.IndexByte(s, '=')
|
n := strings.IndexByte(s, '=')
|
||||||
if n < 0 {
|
if n < 0 {
|
||||||
logger.Panicf("FATAL: missing '=' in `-remoteWrite.label`. It must contain label in the form `name=value`; got %q", s)
|
logger.Panicf("FATAL: missing '=' in `-remoteWrite.label`. It must contain label in the form `name=value`; got %q", s)
|
||||||
}
|
}
|
||||||
extraLabels = append(extraLabels, prompbmarshal.Label{
|
labelsGlobal = append(labelsGlobal, prompbmarshal.Label{
|
||||||
Name: s[:n],
|
Name: s[:n],
|
||||||
Value: s[n+1:],
|
Value: s[n+1:],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init prcs
|
// Init prcsGlobal
|
||||||
if len(*relabelConfigPath) > 0 {
|
prcsGlobal = nil
|
||||||
|
if len(*relabelConfigPathGlobal) > 0 {
|
||||||
var err error
|
var err error
|
||||||
prcs, err = promrelabel.LoadRelabelConfigs(*relabelConfigPath)
|
prcsGlobal, err = promrelabel.LoadRelabelConfigs(*relabelConfigPathGlobal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Panicf("FATAL: cannot load relabel configs from -remoteWrite.relabelConfig=%q: %s", *relabelConfigPath, err)
|
logger.Panicf("FATAL: cannot load relabel configs from -remoteWrite.relabelConfig=%q: %s", *relabelConfigPathGlobal, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func resetRelabel() {
|
func (rctx *relabelCtx) applyRelabeling(tss []prompbmarshal.TimeSeries, extraLabels []prompbmarshal.Label, prcs []promrelabel.ParsedRelabelConfig) []prompbmarshal.TimeSeries {
|
||||||
extraLabels = nil
|
|
||||||
prcs = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rctx *relabelCtx) applyRelabeling(tss []prompbmarshal.TimeSeries) []prompbmarshal.TimeSeries {
|
|
||||||
if len(extraLabels) == 0 && len(prcs) == 0 {
|
if len(extraLabels) == 0 && len(prcs) == 0 {
|
||||||
// Nothing to change.
|
// Nothing to change.
|
||||||
return tss
|
return tss
|
||||||
@ -105,3 +102,12 @@ var relabelCtxPool = &sync.Pool{
|
|||||||
return &relabelCtx{}
|
return &relabelCtx{}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getRelabelCtx() *relabelCtx {
|
||||||
|
return relabelCtxPool.Get().(*relabelCtx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func putRelabelCtx(rctx *relabelCtx) {
|
||||||
|
rctx.labels = rctx.labels[:0]
|
||||||
|
relabelCtxPool.Put(rctx)
|
||||||
|
}
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/memory"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue"
|
||||||
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal"
|
||||||
|
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
|
||||||
"github.com/VictoriaMetrics/metrics"
|
"github.com/VictoriaMetrics/metrics"
|
||||||
xxhash "github.com/cespare/xxhash/v2"
|
xxhash "github.com/cespare/xxhash/v2"
|
||||||
)
|
)
|
||||||
@ -19,6 +20,7 @@ var (
|
|||||||
remoteWriteURLs = flagutil.NewArray("remoteWrite.url", "Remote storage URL to write data to. It must support Prometheus remote_write API. "+
|
remoteWriteURLs = flagutil.NewArray("remoteWrite.url", "Remote storage URL to write data to. It must support Prometheus remote_write API. "+
|
||||||
"It is recommended using VictoriaMetrics as remote storage. Example url: http://<victoriametrics-host>:8428/api/v1/write . "+
|
"It is recommended using VictoriaMetrics as remote storage. Example url: http://<victoriametrics-host>:8428/api/v1/write . "+
|
||||||
"Pass multiple -remoteWrite.url flags in order to write data concurrently to multiple remote storage systems")
|
"Pass multiple -remoteWrite.url flags in order to write data concurrently to multiple remote storage systems")
|
||||||
|
relabelConfigPaths = flagutil.NewArray("remoteWrite.urlRelabelConfig", "Optional path to relabel config for the corresponding -remoteWrite.url")
|
||||||
tmpDataPath = flag.String("remoteWrite.tmpDataPath", "vmagent-remotewrite-data", "Path to directory where temporary data for remote write component is stored")
|
tmpDataPath = flag.String("remoteWrite.tmpDataPath", "vmagent-remotewrite-data", "Path to directory where temporary data for remote write component is stored")
|
||||||
queues = flag.Int("remoteWrite.queues", 1, "The number of concurrent queues to each -remoteWrite.url. Set more queues if a single queue "+
|
queues = flag.Int("remoteWrite.queues", 1, "The number of concurrent queues to each -remoteWrite.url. Set more queues if a single queue "+
|
||||||
"isn't enough for sending high volume of collected data to remote storage")
|
"isn't enough for sending high volume of collected data to remote storage")
|
||||||
@ -26,6 +28,8 @@ var (
|
|||||||
"It is hidden by default, since it can contain sensistive auth info")
|
"It is hidden by default, since it can contain sensistive auth info")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var rwctxs []*remoteWriteCtx
|
||||||
|
|
||||||
// Init initializes remotewrite.
|
// Init initializes remotewrite.
|
||||||
//
|
//
|
||||||
// It must be called after flag.Parse().
|
// It must be called after flag.Parse().
|
||||||
@ -40,7 +44,7 @@ func Init() {
|
|||||||
// remoteWrite.url can contain authentication codes, so hide it at `/metrics` output.
|
// remoteWrite.url can contain authentication codes, so hide it at `/metrics` output.
|
||||||
httpserver.RegisterSecretFlag("remoteWrite.url")
|
httpserver.RegisterSecretFlag("remoteWrite.url")
|
||||||
}
|
}
|
||||||
initRelabel()
|
initRelabelGlobal()
|
||||||
|
|
||||||
maxInmemoryBlocks := memory.Allowed() / len(*remoteWriteURLs) / maxRowsPerBlock / 100
|
maxInmemoryBlocks := memory.Allowed() / len(*remoteWriteURLs) / maxRowsPerBlock / 100
|
||||||
if maxInmemoryBlocks > 200 {
|
if maxInmemoryBlocks > 200 {
|
||||||
@ -53,27 +57,16 @@ func Init() {
|
|||||||
maxInmemoryBlocks = 2
|
maxInmemoryBlocks = 2
|
||||||
}
|
}
|
||||||
for i, remoteWriteURL := range *remoteWriteURLs {
|
for i, remoteWriteURL := range *remoteWriteURLs {
|
||||||
h := xxhash.Sum64([]byte(remoteWriteURL))
|
relabelConfigPath := ""
|
||||||
path := fmt.Sprintf("%s/persistent-queue/%016X", *tmpDataPath, h)
|
if i < len(*relabelConfigPaths) {
|
||||||
fq := persistentqueue.MustOpenFastQueue(path, remoteWriteURL, maxInmemoryBlocks)
|
relabelConfigPath = (*relabelConfigPaths)[i]
|
||||||
|
}
|
||||||
urlLabelValue := fmt.Sprintf("secret-url-%d", i+1)
|
urlLabelValue := fmt.Sprintf("secret-url-%d", i+1)
|
||||||
if *showRemoteWriteURL {
|
if *showRemoteWriteURL {
|
||||||
urlLabelValue = remoteWriteURL
|
urlLabelValue = remoteWriteURL
|
||||||
}
|
}
|
||||||
_ = metrics.NewGauge(fmt.Sprintf(`vmagent_remotewrite_pending_data_bytes{url=%q, hash="%016X"}`, urlLabelValue, h), func() float64 {
|
rwctx := newRemoteWriteCtx(remoteWriteURL, relabelConfigPath, maxInmemoryBlocks, urlLabelValue)
|
||||||
return float64(fq.GetPendingBytes())
|
rwctxs = append(rwctxs, rwctx)
|
||||||
})
|
|
||||||
_ = metrics.NewGauge(fmt.Sprintf(`vmagent_remotewrite_pending_inmemory_blocks{url=%q}`, urlLabelValue), func() float64 {
|
|
||||||
return float64(fq.GetInmemoryQueueLen())
|
|
||||||
})
|
|
||||||
c := newClient(remoteWriteURL, urlLabelValue, fq)
|
|
||||||
fqs = append(fqs, fq)
|
|
||||||
cs = append(cs, c)
|
|
||||||
}
|
|
||||||
|
|
||||||
pss = make([]*pendingSeries, *queues)
|
|
||||||
for i := range pss {
|
|
||||||
pss[i] = newPendingSeries(pushBlockToPersistentQueues)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -81,30 +74,20 @@ func Init() {
|
|||||||
//
|
//
|
||||||
// It is expected that nobody calls Push during and after the call to this func.
|
// It is expected that nobody calls Push during and after the call to this func.
|
||||||
func Stop() {
|
func Stop() {
|
||||||
for _, ps := range pss {
|
for _, rwctx := range rwctxs {
|
||||||
ps.MustStop()
|
rwctx.MustStop()
|
||||||
}
|
}
|
||||||
|
rwctxs = nil
|
||||||
// Close all the persistent queues. This should unblock clients waiting in MustReadBlock.
|
|
||||||
for _, fq := range fqs {
|
|
||||||
fq.MustClose()
|
|
||||||
}
|
|
||||||
fqs = nil
|
|
||||||
|
|
||||||
// Stop all the clients
|
|
||||||
for _, c := range cs {
|
|
||||||
c.MustStop()
|
|
||||||
}
|
|
||||||
cs = nil
|
|
||||||
|
|
||||||
resetRelabel()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Push sends wr to remote storage systems set via `-remoteWrite.url`.
|
// Push sends wr to remote storage systems set via `-remoteWrite.url`.
|
||||||
//
|
//
|
||||||
// Each timeseries in wr.Timeseries must contain one sample.
|
// Each timeseries in wr.Timeseries must contain one sample.
|
||||||
func Push(wr *prompbmarshal.WriteRequest) {
|
func Push(wr *prompbmarshal.WriteRequest) {
|
||||||
rctx := relabelCtxPool.Get().(*relabelCtx)
|
var rctx *relabelCtx
|
||||||
|
if len(prcsGlobal) > 0 {
|
||||||
|
rctx = getRelabelCtx()
|
||||||
|
}
|
||||||
tss := wr.Timeseries
|
tss := wr.Timeseries
|
||||||
for len(tss) > 0 {
|
for len(tss) > 0 {
|
||||||
// Process big tss in smaller blocks in order to reduce maxmimum memory usage
|
// Process big tss in smaller blocks in order to reduce maxmimum memory usage
|
||||||
@ -115,22 +98,92 @@ func Push(wr *prompbmarshal.WriteRequest) {
|
|||||||
} else {
|
} else {
|
||||||
tss = nil
|
tss = nil
|
||||||
}
|
}
|
||||||
tssBlock = rctx.applyRelabeling(tssBlock)
|
if rctx != nil {
|
||||||
idx := atomic.AddUint64(&pssNextIdx, 1) % uint64(len(pss))
|
tssBlockLen := len(tssBlock)
|
||||||
pss[idx].Push(tssBlock)
|
tssBlock = rctx.applyRelabeling(tssBlock, labelsGlobal, prcsGlobal)
|
||||||
|
globalRelabelMetricsDropped.Add(tssBlockLen - len(tssBlock))
|
||||||
|
}
|
||||||
|
for _, rwctx := range rwctxs {
|
||||||
|
rwctx.Push(tssBlock)
|
||||||
|
}
|
||||||
rctx.reset()
|
rctx.reset()
|
||||||
}
|
}
|
||||||
relabelCtxPool.Put(rctx)
|
if rctx != nil {
|
||||||
}
|
putRelabelCtx(rctx)
|
||||||
|
|
||||||
func pushBlockToPersistentQueues(block []byte) {
|
|
||||||
for _, fq := range fqs {
|
|
||||||
fq.MustWriteBlock(block)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var fqs []*persistentqueue.FastQueue
|
var globalRelabelMetricsDropped = metrics.NewCounter("vmagent_remotewrite_global_relabel_metrics_dropped_total")
|
||||||
var cs []*client
|
|
||||||
|
|
||||||
var pssNextIdx uint64
|
type remoteWriteCtx struct {
|
||||||
var pss []*pendingSeries
|
fq *persistentqueue.FastQueue
|
||||||
|
c *client
|
||||||
|
prcs []promrelabel.ParsedRelabelConfig
|
||||||
|
pss []*pendingSeries
|
||||||
|
pssNextIdx uint64
|
||||||
|
|
||||||
|
relabelMetricsDropped *metrics.Counter
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRemoteWriteCtx(remoteWriteURL, relabelConfigPath string, maxInmemoryBlocks int, urlLabelValue string) *remoteWriteCtx {
|
||||||
|
h := xxhash.Sum64([]byte(remoteWriteURL))
|
||||||
|
path := fmt.Sprintf("%s/persistent-queue/%016X", *tmpDataPath, h)
|
||||||
|
fq := persistentqueue.MustOpenFastQueue(path, remoteWriteURL, maxInmemoryBlocks)
|
||||||
|
_ = metrics.NewGauge(fmt.Sprintf(`vmagent_remotewrite_pending_data_bytes{url=%q, hash="%016X"}`, urlLabelValue, h), func() float64 {
|
||||||
|
return float64(fq.GetPendingBytes())
|
||||||
|
})
|
||||||
|
_ = metrics.NewGauge(fmt.Sprintf(`vmagent_remotewrite_pending_inmemory_blocks{url=%q}`, urlLabelValue), func() float64 {
|
||||||
|
return float64(fq.GetInmemoryQueueLen())
|
||||||
|
})
|
||||||
|
c := newClient(remoteWriteURL, urlLabelValue, fq, *queues)
|
||||||
|
var prcs []promrelabel.ParsedRelabelConfig
|
||||||
|
if len(relabelConfigPath) > 0 {
|
||||||
|
var err error
|
||||||
|
prcs, err = promrelabel.LoadRelabelConfigs(relabelConfigPath)
|
||||||
|
if err != nil {
|
||||||
|
logger.Panicf("FATAL: cannot load relabel configs from -remoteWrite.urlRelabelConfig=%q: %s", relabelConfigPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pss := make([]*pendingSeries, *queues)
|
||||||
|
for i := range pss {
|
||||||
|
pss[i] = newPendingSeries(fq.MustWriteBlock)
|
||||||
|
}
|
||||||
|
return &remoteWriteCtx{
|
||||||
|
fq: fq,
|
||||||
|
c: c,
|
||||||
|
prcs: prcs,
|
||||||
|
pss: pss,
|
||||||
|
|
||||||
|
relabelMetricsDropped: metrics.NewCounter(fmt.Sprintf(`vmagent_remotewrite_relabel_metrics_dropped_total{url=%q}`, urlLabelValue)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rwctx *remoteWriteCtx) MustStop() {
|
||||||
|
for _, ps := range rwctx.pss {
|
||||||
|
ps.MustStop()
|
||||||
|
}
|
||||||
|
rwctx.pss = nil
|
||||||
|
rwctx.fq.MustClose()
|
||||||
|
rwctx.fq = nil
|
||||||
|
rwctx.prcs = nil
|
||||||
|
rwctx.c.MustStop()
|
||||||
|
rwctx.c = nil
|
||||||
|
|
||||||
|
rwctx.relabelMetricsDropped = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rwctx *remoteWriteCtx) Push(tss []prompbmarshal.TimeSeries) {
|
||||||
|
var rctx *relabelCtx
|
||||||
|
if len(rwctx.prcs) > 0 {
|
||||||
|
rctx = getRelabelCtx()
|
||||||
|
tssLen := len(tss)
|
||||||
|
tss = rctx.applyRelabeling(tss, nil, rwctx.prcs)
|
||||||
|
rwctx.relabelMetricsDropped.Add(tssLen - len(tss))
|
||||||
|
}
|
||||||
|
pss := rwctx.pss
|
||||||
|
idx := atomic.AddUint64(&rwctx.pssNextIdx, 1) % uint64(len(pss))
|
||||||
|
pss[idx].Push(tss)
|
||||||
|
if rctx != nil {
|
||||||
|
putRelabelCtx(rctx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user