2020-02-23 12:35:47 +01:00
package promscrape
import (
2022-05-06 23:02:54 +02:00
"encoding/json"
2020-04-13 12:15:30 +02:00
"flag"
2020-02-23 12:35:47 +01:00
"fmt"
"net/url"
"path/filepath"
2021-02-26 20:41:54 +01:00
"sort"
2021-09-09 17:49:37 +02:00
"strconv"
2020-02-23 12:35:47 +01:00
"strings"
"time"
2022-08-27 01:22:37 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/auth"
2021-02-28 17:39:57 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
2021-02-26 11:46:28 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/cgroup"
2020-08-13 15:43:55 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/envtemplate"
2021-12-02 23:08:42 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/fs"
2020-02-23 12:35:47 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
2020-04-13 11:59:05 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promauth"
2020-02-23 12:35:47 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel"
2022-07-13 22:43:18 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/azure"
2020-05-04 19:48:02 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consul"
2023-05-04 11:36:21 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/consulagent"
2021-06-14 12:15:04 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/digitalocean"
2020-05-05 23:01:49 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/dns"
2021-06-25 10:42:47 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/docker"
2021-06-25 12:20:18 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/dockerswarm"
2020-04-27 18:25:45 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/ec2"
2020-11-20 12:38:12 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/eureka"
2020-04-24 16:50:21 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/gce"
2021-06-22 12:33:37 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/http"
2020-04-13 20:02:27 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/kubernetes"
2023-02-22 13:59:56 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/kuma"
2023-01-05 23:03:58 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/nomad"
2020-10-05 15:45:33 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/openstack"
2022-08-04 19:44:16 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discovery/yandexcloud"
2022-02-11 15:17:00 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promutils"
2020-12-24 09:56:10 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
2021-02-21 22:21:17 +01:00
"github.com/VictoriaMetrics/metrics"
2022-06-21 19:23:30 +02:00
"github.com/cespare/xxhash/v2"
2020-02-23 12:35:47 +01:00
"gopkg.in/yaml.v2"
)
2020-04-13 12:15:30 +02:00
var (
2023-01-17 19:14:46 +01:00
noStaleMarkers = flag . Bool ( "promscrape.noStaleMarkers" , false , "Whether to disable sending Prometheus stale markers for metrics when scrape target disappears. This option may reduce memory usage if stale markers aren't needed for your setup. This option also disables populating the scrape_series_added metric. See https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series" )
seriesLimitPerTarget = flag . Int ( "promscrape.seriesLimitPerTarget" , 0 , "Optional limit on the number of unique time series a single scrape target can expose. See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter for more info" )
strictParse = flag . Bool ( "promscrape.config.strictParse" , true , "Whether to deny unsupported fields in -promscrape.config . Set to false in order to silently skip unsupported fields" )
dryRun = flag . Bool ( "promscrape.config.dryRun" , false , "Checks -promscrape.config file for errors and unsupported fields and then exits. " +
2020-05-21 13:54:28 +02:00
"Returns non-zero exit code on parsing errors and emits these errors to stderr. " +
2020-12-07 12:15:42 +01:00
"See also -promscrape.config.strictParse command-line flag. " +
"Pass -loggerLevel=ERROR if you don't need to see info messages in the output." )
2020-11-04 10:08:30 +01:00
dropOriginalLabels = flag . Bool ( "promscrape.dropOriginalLabels" , false , "Whether to drop original labels for scrape targets at /targets and /api/v1/targets pages. " +
"This may be needed for reducing memory usage when original labels for big number of scrape targets occupy big amounts of memory. " +
"Note that this reduces debuggability for improper per-target relabeling configs" )
2021-02-28 17:39:57 +01:00
clusterMembersCount = flag . Int ( "promscrape.cluster.membersCount" , 0 , "The number of members in a cluster of scrapers. " +
2023-05-10 09:50:41 +02:00
"Each member must have a unique -promscrape.cluster.memberNum in the range 0 ... promscrape.cluster.membersCount-1 . " +
"Each member then scrapes roughly 1/N of all the targets. By default, cluster scraping is disabled, i.e. a single scraper scrapes all the targets" )
2022-04-12 11:24:11 +02:00
clusterMemberNum = flag . String ( "promscrape.cluster.memberNum" , "0" , "The number of number in the cluster of scrapers. " +
2023-05-10 09:50:41 +02:00
"It must be a unique value in the range 0 ... promscrape.cluster.membersCount-1 across scrapers in the cluster. " +
2022-04-12 11:24:11 +02:00
"Can be specified as pod name of Kubernetes StatefulSet - pod-name-Num, where Num is a numeric part of pod name" )
2021-03-04 09:20:15 +01:00
clusterReplicationFactor = flag . Int ( "promscrape.cluster.replicationFactor" , 1 , "The number of members in the cluster, which scrape the same targets. " +
2022-05-12 17:50:29 +02:00
"If the replication factor is greater than 1, then the deduplication must be enabled at remote storage side. See https://docs.victoriametrics.com/#deduplication" )
2022-06-03 23:35:51 +02:00
clusterName = flag . String ( "promscrape.cluster.name" , "" , "Optional name of the cluster. If multiple vmagent clusters scrape the same targets, " +
"then each cluster must have unique name in order to properly de-duplicate samples received from these clusters. " +
"See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2679" )
2020-04-13 12:15:30 +02:00
)
2022-04-12 11:24:11 +02:00
var clusterMemberID int
2022-04-12 11:36:17 +02:00
func mustInitClusterMemberID ( ) {
2022-04-12 11:24:11 +02:00
s := * clusterMemberNum
// special case for kubernetes deployment, where pod-name formatted at some-pod-name-1
// obtain memberNum from last segment
// https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2359
if idx := strings . LastIndexByte ( s , '-' ) ; idx >= 0 {
s = s [ idx + 1 : ]
}
2022-06-01 00:42:30 +02:00
n , err := strconv . Atoi ( s )
2022-04-12 11:24:11 +02:00
if err != nil {
2022-04-12 11:36:17 +02:00
logger . Fatalf ( "cannot parse -promscrape.cluster.memberNum=%q: %s" , * clusterMemberNum , err )
2022-04-12 11:24:11 +02:00
}
2022-06-01 00:42:30 +02:00
clusterMemberID = n
2022-04-12 11:24:11 +02:00
}
2020-02-23 12:35:47 +01:00
// Config represents essential parts from Prometheus config defined at https://prometheus.io/docs/prometheus/latest/configuration/configuration/
type Config struct {
2022-04-16 19:28:46 +02:00
Global GlobalConfig ` yaml:"global,omitempty" `
ScrapeConfigs [ ] * ScrapeConfig ` yaml:"scrape_configs,omitempty" `
ScrapeConfigFiles [ ] string ` yaml:"scrape_config_files,omitempty" `
2020-02-23 12:35:47 +01:00
// This is set to the directory from where the config has been loaded.
baseDir string
}
2022-04-16 13:25:54 +02:00
func ( cfg * Config ) unmarshal ( data [ ] byte , isStrict bool ) error {
var err error
2022-10-26 13:49:20 +02:00
data , err = envtemplate . ReplaceBytes ( data )
2022-10-18 09:28:39 +02:00
if err != nil {
return fmt . Errorf ( "cannot expand environment variables: %w" , err )
}
2022-04-16 13:25:54 +02:00
if isStrict {
if err = yaml . UnmarshalStrict ( data , cfg ) ; err != nil {
err = fmt . Errorf ( "%w; pass -promscrape.config.strictParse=false command-line flag for ignoring unknown fields in yaml config" , err )
}
} else {
err = yaml . Unmarshal ( data , cfg )
}
return err
}
2021-08-26 07:51:14 +02:00
func ( cfg * Config ) marshal ( ) [ ] byte {
data , err := yaml . Marshal ( cfg )
if err != nil {
logger . Panicf ( "BUG: cannot marshal Config: %s" , err )
}
return data
}
2021-04-05 21:02:09 +02:00
func ( cfg * Config ) mustStart ( ) {
startTime := time . Now ( )
logger . Infof ( "starting service discovery routines..." )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
sc . mustStart ( cfg . baseDir )
2021-04-05 21:02:09 +02:00
}
2021-06-18 09:53:10 +02:00
jobNames := cfg . getJobNames ( )
tsmGlobal . registerJobNames ( jobNames )
2021-04-05 21:02:09 +02:00
logger . Infof ( "started service discovery routines in %.3f seconds" , time . Since ( startTime ) . Seconds ( ) )
}
2022-04-16 19:28:46 +02:00
func ( cfg * Config ) mustRestart ( prevCfg * Config ) {
startTime := time . Now ( )
logger . Infof ( "restarting service discovery routines..." )
prevScrapeCfgByName := make ( map [ string ] * ScrapeConfig , len ( prevCfg . ScrapeConfigs ) )
for _ , scPrev := range prevCfg . ScrapeConfigs {
prevScrapeCfgByName [ scPrev . JobName ] = scPrev
}
2022-07-18 16:15:02 +02:00
// Restart all the scrape jobs on Global config change.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2884
needGlobalRestart := ! areEqualGlobalConfigs ( & cfg . Global , & prevCfg . Global )
2022-12-04 06:53:01 +01:00
// Loop over the new jobs, start new ones and restart updated ones.
2022-04-16 19:28:46 +02:00
var started , stopped , restarted int
currentJobNames := make ( map [ string ] struct { } , len ( cfg . ScrapeConfigs ) )
for i , sc := range cfg . ScrapeConfigs {
currentJobNames [ sc . JobName ] = struct { } { }
scPrev := prevScrapeCfgByName [ sc . JobName ]
if scPrev == nil {
// New scrape config has been appeared. Start it.
sc . mustStart ( cfg . baseDir )
started ++
continue
}
2022-07-18 16:15:02 +02:00
if ! needGlobalRestart && areEqualScrapeConfigs ( scPrev , sc ) {
2022-04-16 19:28:46 +02:00
// The scrape config didn't change, so no need to restart it.
// Use the reference to the previous job, so it could be stopped properly later.
cfg . ScrapeConfigs [ i ] = scPrev
} else {
// The scrape config has been changed. Stop the previous scrape config and start new one.
scPrev . mustStop ( )
sc . mustStart ( cfg . baseDir )
restarted ++
}
}
// Stop preious jobs which weren't found in the current configuration.
for _ , scPrev := range prevCfg . ScrapeConfigs {
if _ , ok := currentJobNames [ scPrev . JobName ] ; ! ok {
scPrev . mustStop ( )
stopped ++
}
}
jobNames := cfg . getJobNames ( )
tsmGlobal . registerJobNames ( jobNames )
logger . Infof ( "restarted service discovery routines in %.3f seconds, stopped=%d, started=%d, restarted=%d" , time . Since ( startTime ) . Seconds ( ) , stopped , started , restarted )
}
2022-07-18 16:15:02 +02:00
func areEqualGlobalConfigs ( a , b * GlobalConfig ) bool {
sa := a . marshalJSON ( )
sb := b . marshalJSON ( )
return string ( sa ) == string ( sb )
}
2022-04-16 19:28:46 +02:00
func areEqualScrapeConfigs ( a , b * ScrapeConfig ) bool {
2022-05-06 23:02:54 +02:00
sa := a . marshalJSON ( )
sb := b . marshalJSON ( )
2022-04-16 19:28:46 +02:00
return string ( sa ) == string ( sb )
}
2022-05-06 23:02:54 +02:00
func ( sc * ScrapeConfig ) unmarshalJSON ( data [ ] byte ) error {
return json . Unmarshal ( data , sc )
2022-04-22 12:19:20 +02:00
}
2022-05-06 23:02:54 +02:00
func ( sc * ScrapeConfig ) marshalJSON ( ) [ ] byte {
data , err := json . Marshal ( sc )
2022-04-16 19:28:46 +02:00
if err != nil {
logger . Panicf ( "BUG: cannot marshal ScrapeConfig: %s" , err )
}
return data
}
2022-07-18 16:15:02 +02:00
func ( gc * GlobalConfig ) marshalJSON ( ) [ ] byte {
data , err := json . Marshal ( gc )
if err != nil {
logger . Panicf ( "BUG: cannot marshal GlobalConfig: %s" , err )
}
return data
}
2021-03-01 13:13:56 +01:00
func ( cfg * Config ) mustStop ( ) {
startTime := time . Now ( )
logger . Infof ( "stopping service discovery routines..." )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
sc . mustStop ( )
2021-03-01 13:13:56 +01:00
}
logger . Infof ( "stopped service discovery routines in %.3f seconds" , time . Since ( startTime ) . Seconds ( ) )
}
2021-06-18 09:53:10 +02:00
// getJobNames returns all the scrape job names from the cfg.
func ( cfg * Config ) getJobNames ( ) [ ] string {
a := make ( [ ] string , 0 , len ( cfg . ScrapeConfigs ) )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
a = append ( a , sc . JobName )
2021-06-18 09:53:10 +02:00
}
return a
}
2020-02-23 12:35:47 +01:00
// GlobalConfig represents essential parts for `global` section of Prometheus config.
//
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/
type GlobalConfig struct {
2022-04-16 13:25:54 +02:00
ScrapeInterval * promutils . Duration ` yaml:"scrape_interval,omitempty" `
ScrapeTimeout * promutils . Duration ` yaml:"scrape_timeout,omitempty" `
2022-11-30 06:22:12 +01:00
ExternalLabels * promutils . Labels ` yaml:"external_labels,omitempty" `
2022-10-01 15:13:17 +02:00
}
2020-02-23 12:35:47 +01:00
// ScrapeConfig represents essential parts for `scrape_config` section of Prometheus config.
//
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config
type ScrapeConfig struct {
JobName string ` yaml:"job_name" `
2022-04-16 13:25:54 +02:00
ScrapeInterval * promutils . Duration ` yaml:"scrape_interval,omitempty" `
ScrapeTimeout * promutils . Duration ` yaml:"scrape_timeout,omitempty" `
2020-11-13 15:17:03 +01:00
MetricsPath string ` yaml:"metrics_path,omitempty" `
HonorLabels bool ` yaml:"honor_labels,omitempty" `
2021-10-16 19:48:15 +02:00
HonorTimestamps * bool ` yaml:"honor_timestamps,omitempty" `
2020-11-13 15:17:03 +01:00
Scheme string ` yaml:"scheme,omitempty" `
Params map [ string ] [ ] string ` yaml:"params,omitempty" `
2021-04-02 20:17:43 +02:00
HTTPClientConfig promauth . HTTPClientConfig ` yaml:",inline" `
2021-10-26 20:21:08 +02:00
ProxyURL * proxy . URL ` yaml:"proxy_url,omitempty" `
2020-11-13 15:17:03 +01:00
RelabelConfigs [ ] promrelabel . RelabelConfig ` yaml:"relabel_configs,omitempty" `
MetricRelabelConfigs [ ] promrelabel . RelabelConfig ` yaml:"metric_relabel_configs,omitempty" `
SampleLimit int ` yaml:"sample_limit,omitempty" `
2020-02-23 12:35:47 +01:00
2022-07-13 22:43:18 +02:00
AzureSDConfigs [ ] azure . SDConfig ` yaml:"azure_sd_configs,omitempty" `
2021-06-25 12:20:18 +02:00
ConsulSDConfigs [ ] consul . SDConfig ` yaml:"consul_sd_configs,omitempty" `
2023-05-04 11:36:21 +02:00
ConsulAgentSDConfigs [ ] consulagent . SDConfig ` yaml:"consulagent_sd_configs,omitempty" `
2021-06-25 12:20:18 +02:00
DigitaloceanSDConfigs [ ] digitalocean . SDConfig ` yaml:"digitalocean_sd_configs,omitempty" `
DNSSDConfigs [ ] dns . SDConfig ` yaml:"dns_sd_configs,omitempty" `
DockerSDConfigs [ ] docker . SDConfig ` yaml:"docker_sd_configs,omitempty" `
DockerSwarmSDConfigs [ ] dockerswarm . SDConfig ` yaml:"dockerswarm_sd_configs,omitempty" `
EC2SDConfigs [ ] ec2 . SDConfig ` yaml:"ec2_sd_configs,omitempty" `
EurekaSDConfigs [ ] eureka . SDConfig ` yaml:"eureka_sd_configs,omitempty" `
FileSDConfigs [ ] FileSDConfig ` yaml:"file_sd_configs,omitempty" `
GCESDConfigs [ ] gce . SDConfig ` yaml:"gce_sd_configs,omitempty" `
HTTPSDConfigs [ ] http . SDConfig ` yaml:"http_sd_configs,omitempty" `
KubernetesSDConfigs [ ] kubernetes . SDConfig ` yaml:"kubernetes_sd_configs,omitempty" `
2023-02-22 13:59:56 +01:00
KumaSDConfigs [ ] kuma . SDConfig ` yaml:"kuma_sd_configs,omitempty" `
2023-01-05 23:03:58 +01:00
NomadSDConfigs [ ] nomad . SDConfig ` yaml:"nomad_sd_configs,omitempty" `
2021-06-25 12:20:18 +02:00
OpenStackSDConfigs [ ] openstack . SDConfig ` yaml:"openstack_sd_configs,omitempty" `
StaticConfigs [ ] StaticConfig ` yaml:"static_configs,omitempty" `
2022-08-04 19:44:16 +02:00
YandexCloudSDConfigs [ ] yandexcloud . SDConfig ` yaml:"yandexcloud_sd_configs,omitempty" `
2021-04-02 20:17:43 +02:00
2020-07-02 13:19:11 +02:00
// These options are supported only by lib/promscrape.
2021-04-03 23:40:08 +02:00
DisableCompression bool ` yaml:"disable_compression,omitempty" `
DisableKeepAlive bool ` yaml:"disable_keepalive,omitempty" `
StreamParse bool ` yaml:"stream_parse,omitempty" `
2022-04-16 13:25:54 +02:00
ScrapeAlignInterval * promutils . Duration ` yaml:"scrape_align_interval,omitempty" `
ScrapeOffset * promutils . Duration ` yaml:"scrape_offset,omitempty" `
2021-09-01 13:14:37 +02:00
SeriesLimit int ` yaml:"series_limit,omitempty" `
2022-10-07 22:36:11 +02:00
NoStaleMarkers * bool ` yaml:"no_stale_markers,omitempty" `
2021-04-03 23:40:08 +02:00
ProxyClientConfig promauth . ProxyClientConfig ` yaml:",inline" `
2020-07-02 13:19:11 +02:00
2020-02-23 12:35:47 +01:00
// This is set in loadConfig
swc * scrapeWorkConfig
}
2021-04-05 21:02:09 +02:00
func ( sc * ScrapeConfig ) mustStart ( baseDir string ) {
2022-11-30 06:22:12 +01:00
swosFunc := func ( metaLabels * promutils . Labels ) interface { } {
target := metaLabels . Get ( "__address__" )
2021-04-08 08:31:05 +02:00
sw , err := sc . swc . getScrapeWork ( target , nil , metaLabels )
if err != nil {
logger . Errorf ( "cannot create kubernetes_sd_config target %q for job_name %q: %s" , target , sc . swc . jobName , err )
return nil
2021-04-05 21:02:09 +02:00
}
2021-04-08 08:31:05 +02:00
return sw
}
for i := range sc . KubernetesSDConfigs {
2021-04-05 21:02:09 +02:00
sc . KubernetesSDConfigs [ i ] . MustStart ( baseDir , swosFunc )
}
}
2021-03-01 13:13:56 +01:00
func ( sc * ScrapeConfig ) mustStop ( ) {
2022-07-13 22:43:18 +02:00
for i := range sc . AzureSDConfigs {
sc . AzureSDConfigs [ i ] . MustStop ( )
}
2021-03-01 13:13:56 +01:00
for i := range sc . ConsulSDConfigs {
sc . ConsulSDConfigs [ i ] . MustStop ( )
}
2023-05-04 11:36:21 +02:00
for i := range sc . ConsulAgentSDConfigs {
sc . ConsulAgentSDConfigs [ i ] . MustStop ( )
}
2021-06-25 11:10:20 +02:00
for i := range sc . DigitaloceanSDConfigs {
sc . DigitaloceanSDConfigs [ i ] . MustStop ( )
}
for i := range sc . DNSSDConfigs {
sc . DNSSDConfigs [ i ] . MustStop ( )
2021-03-01 13:13:56 +01:00
}
2021-06-25 10:42:47 +02:00
for i := range sc . DockerSDConfigs {
sc . DockerSDConfigs [ i ] . MustStop ( )
}
2021-03-01 13:13:56 +01:00
for i := range sc . DockerSwarmSDConfigs {
sc . DockerSwarmSDConfigs [ i ] . MustStop ( )
}
for i := range sc . EC2SDConfigs {
sc . EC2SDConfigs [ i ] . MustStop ( )
}
2021-06-25 11:10:20 +02:00
for i := range sc . EurekaSDConfigs {
sc . EurekaSDConfigs [ i ] . MustStop ( )
}
2021-03-01 13:13:56 +01:00
for i := range sc . GCESDConfigs {
sc . GCESDConfigs [ i ] . MustStop ( )
}
2021-06-25 10:39:18 +02:00
for i := range sc . HTTPSDConfigs {
sc . HTTPSDConfigs [ i ] . MustStop ( )
}
2021-06-25 11:10:20 +02:00
for i := range sc . KubernetesSDConfigs {
sc . KubernetesSDConfigs [ i ] . MustStop ( )
}
2023-02-22 13:59:56 +01:00
for i := range sc . KumaSDConfigs {
sc . KumaSDConfigs [ i ] . MustStop ( )
}
2023-01-05 23:03:58 +01:00
for i := range sc . NomadSDConfigs {
sc . NomadSDConfigs [ i ] . MustStop ( )
}
2021-06-25 11:10:20 +02:00
for i := range sc . OpenStackSDConfigs {
sc . OpenStackSDConfigs [ i ] . MustStop ( )
}
2021-03-01 13:13:56 +01:00
}
2020-02-23 12:35:47 +01:00
// FileSDConfig represents file-based service discovery config.
//
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config
type FileSDConfig struct {
Files [ ] string ` yaml:"files" `
2022-02-15 11:04:26 +01:00
// `refresh_interval` is ignored. See `-promscrape.fileSDCheckInterval`
2020-02-23 12:35:47 +01:00
}
// StaticConfig represents essential parts for `static_config` section of Prometheus config.
//
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config
type StaticConfig struct {
Targets [ ] string ` yaml:"targets" `
2022-11-30 06:22:12 +01:00
Labels * promutils . Labels ` yaml:"labels,omitempty" `
2020-02-23 12:35:47 +01:00
}
func loadStaticConfigs ( path string ) ( [ ] StaticConfig , error ) {
2021-12-02 23:08:42 +01:00
data , err := fs . ReadFileOrHTTP ( path )
2020-02-23 12:35:47 +01:00
if err != nil {
2020-06-30 21:58:18 +02:00
return nil , fmt . Errorf ( "cannot read `static_configs` from %q: %w" , path , err )
2020-02-23 12:35:47 +01:00
}
2022-10-26 13:49:20 +02:00
data , err = envtemplate . ReplaceBytes ( data )
2022-10-18 09:28:39 +02:00
if err != nil {
return nil , fmt . Errorf ( "cannot expand environment vars in %q: %w" , path , err )
}
2020-02-23 12:35:47 +01:00
var stcs [ ] StaticConfig
2020-03-06 19:18:28 +01:00
if err := yaml . UnmarshalStrict ( data , & stcs ) ; err != nil {
2020-06-30 21:58:18 +02:00
return nil , fmt . Errorf ( "cannot unmarshal `static_configs` from %q: %w" , path , err )
2020-02-23 12:35:47 +01:00
}
return stcs , nil
}
// loadConfig loads Prometheus config from the given path.
2021-11-05 13:41:14 +01:00
func loadConfig ( path string ) ( * Config , [ ] byte , error ) {
2021-12-02 23:08:42 +01:00
data , err := fs . ReadFileOrHTTP ( path )
2020-02-23 12:35:47 +01:00
if err != nil {
2021-11-05 13:41:14 +01:00
return nil , nil , fmt . Errorf ( "cannot read Prometheus config from %q: %w" , path , err )
2020-02-23 12:35:47 +01:00
}
2021-08-26 07:51:14 +02:00
var c Config
2021-11-05 13:41:14 +01:00
dataNew , err := c . parseData ( data , path )
if err != nil {
return nil , nil , fmt . Errorf ( "cannot parse Prometheus config from %q: %w" , path , err )
2021-08-26 07:51:14 +02:00
}
2021-11-05 13:41:14 +01:00
return & c , dataNew , nil
2021-08-26 07:51:14 +02:00
}
2022-04-16 19:28:46 +02:00
func loadScrapeConfigFiles ( baseDir string , scrapeConfigFiles [ ] string ) ( [ ] * ScrapeConfig , [ ] byte , error ) {
var scrapeConfigs [ ] * ScrapeConfig
2021-11-05 13:41:14 +01:00
var scsData [ ] byte
2021-08-26 07:51:14 +02:00
for _ , filePath := range scrapeConfigFiles {
2021-12-02 23:08:42 +01:00
filePath := fs . GetFilepath ( baseDir , filePath )
2021-08-26 07:51:14 +02:00
paths := [ ] string { filePath }
if strings . Contains ( filePath , "*" ) {
ps , err := filepath . Glob ( filePath )
if err != nil {
2021-11-08 12:33:29 +01:00
return nil , nil , fmt . Errorf ( "invalid pattern %q: %w" , filePath , err )
2021-08-26 07:51:14 +02:00
}
sort . Strings ( ps )
paths = ps
}
for _ , path := range paths {
2021-12-02 23:08:42 +01:00
data , err := fs . ReadFileOrHTTP ( path )
2021-08-26 07:51:14 +02:00
if err != nil {
2021-11-08 12:33:29 +01:00
return nil , nil , fmt . Errorf ( "cannot load %q: %w" , path , err )
2021-08-26 07:51:14 +02:00
}
2022-10-26 13:49:20 +02:00
data , err = envtemplate . ReplaceBytes ( data )
2022-10-18 09:28:39 +02:00
if err != nil {
return nil , nil , fmt . Errorf ( "cannot expand environment vars in %q: %w" , path , err )
}
2022-04-16 19:28:46 +02:00
var scs [ ] * ScrapeConfig
2021-08-26 07:51:14 +02:00
if err = yaml . UnmarshalStrict ( data , & scs ) ; err != nil {
2021-11-08 12:33:29 +01:00
return nil , nil , fmt . Errorf ( "cannot parse %q: %w" , path , err )
2021-08-26 07:51:14 +02:00
}
scrapeConfigs = append ( scrapeConfigs , scs ... )
2021-11-05 13:41:14 +01:00
scsData = append ( scsData , '\n' )
scsData = append ( scsData , data ... )
2021-08-26 07:51:14 +02:00
}
2020-02-23 12:35:47 +01:00
}
2021-11-05 13:41:14 +01:00
return scrapeConfigs , scsData , nil
2020-02-23 12:35:47 +01:00
}
2020-11-25 21:59:13 +01:00
// IsDryRun returns true if -promscrape.config.dryRun command-line flag is set
func IsDryRun ( ) bool {
return * dryRun
}
2021-11-05 13:41:14 +01:00
func ( cfg * Config ) parseData ( data [ ] byte , path string ) ( [ ] byte , error ) {
2022-04-16 13:25:54 +02:00
if err := cfg . unmarshal ( data , * strictParse ) ; err != nil {
2021-11-05 13:41:14 +01:00
return nil , fmt . Errorf ( "cannot unmarshal data: %w" , err )
2020-02-23 12:35:47 +01:00
}
absPath , err := filepath . Abs ( path )
if err != nil {
2021-11-05 13:41:14 +01:00
return nil , fmt . Errorf ( "cannot obtain abs path for %q: %w" , path , err )
2020-02-23 12:35:47 +01:00
}
cfg . baseDir = filepath . Dir ( absPath )
2021-08-26 07:51:14 +02:00
// Load cfg.ScrapeConfigFiles into c.ScrapeConfigs
2021-11-05 13:41:14 +01:00
scs , scsData , err := loadScrapeConfigFiles ( cfg . baseDir , cfg . ScrapeConfigFiles )
2021-08-26 07:51:14 +02:00
if err != nil {
2021-11-05 13:41:14 +01:00
return nil , fmt . Errorf ( "cannot load `scrape_config_files` from %q: %w" , path , err )
2021-08-26 07:51:14 +02:00
}
cfg . ScrapeConfigFiles = nil
cfg . ScrapeConfigs = append ( cfg . ScrapeConfigs , scs ... )
2021-11-05 13:41:14 +01:00
dataNew := append ( data , scsData ... )
2021-08-26 07:51:14 +02:00
// Check that all the scrape configs have unique JobName
m := make ( map [ string ] struct { } , len ( cfg . ScrapeConfigs ) )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
jobName := sc . JobName
2021-08-26 07:51:14 +02:00
if _ , ok := m [ jobName ] ; ok {
2021-11-05 13:41:14 +01:00
return nil , fmt . Errorf ( "duplicate `job_name` in `scrape_configs` loaded from %q: %q" , path , jobName )
2021-08-26 07:51:14 +02:00
}
m [ jobName ] = struct { } { }
}
// Initialize cfg.ScrapeConfigs
2022-04-16 19:28:46 +02:00
for i , sc := range cfg . ScrapeConfigs {
2022-04-22 12:19:20 +02:00
// Make a copy of sc in order to remove references to `data` memory.
// This should prevent from memory leaks on config reload.
sc = sc . clone ( )
cfg . ScrapeConfigs [ i ] = sc
2020-02-23 12:35:47 +01:00
swc , err := getScrapeWorkConfig ( sc , cfg . baseDir , & cfg . Global )
if err != nil {
2022-04-22 12:19:20 +02:00
return nil , fmt . Errorf ( "cannot parse `scrape_config`: %w" , err )
2020-02-23 12:35:47 +01:00
}
sc . swc = swc
}
2021-11-05 13:41:14 +01:00
return dataNew , nil
2020-02-23 12:35:47 +01:00
}
2022-04-22 12:19:20 +02:00
func ( sc * ScrapeConfig ) clone ( ) * ScrapeConfig {
2022-05-06 23:02:54 +02:00
data := sc . marshalJSON ( )
2022-04-22 12:19:20 +02:00
var scCopy ScrapeConfig
2022-05-06 23:02:54 +02:00
if err := scCopy . unmarshalJSON ( data ) ; err != nil {
2022-04-22 12:19:20 +02:00
logger . Panicf ( "BUG: cannot unmarshal scrape config: %s" , err )
}
return & scCopy
}
2020-12-08 16:50:03 +01:00
func getSWSByJob ( sws [ ] * ScrapeWork ) map [ string ] [ ] * ScrapeWork {
m := make ( map [ string ] [ ] * ScrapeWork )
2020-06-23 14:35:19 +02:00
for _ , sw := range sws {
m [ sw . jobNameOriginal ] = append ( m [ sw . jobNameOriginal ] , sw )
}
return m
}
2022-07-13 22:43:18 +02:00
// getAzureSDScrapeWork returns `azure_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getAzureSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
swsPrevByJob := getSWSByJob ( prev )
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
for _ , sc := range cfg . ScrapeConfigs {
dstLen := len ( dst )
ok := true
for j := range sc . AzureSDConfigs {
sdc := & sc . AzureSDConfigs [ j ]
var okLocal bool
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "azure_sd_config" )
if ok {
ok = okLocal
}
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
logger . Errorf ( "there were errors when discovering azure targets for job %q, so preserving the previous targets" , sc . swc . jobName )
dst = append ( dst [ : dstLen ] , swsPrev ... )
}
}
return dst
}
2021-06-25 11:10:20 +02:00
// getConsulSDScrapeWork returns `consul_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getConsulSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2021-02-26 15:54:03 +01:00
swsPrevByJob := getSWSByJob ( prev )
2020-12-08 16:50:03 +01:00
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
2021-02-26 15:54:03 +01:00
dstLen := len ( dst )
ok := true
2021-06-25 11:10:20 +02:00
for j := range sc . ConsulSDConfigs {
sdc := & sc . ConsulSDConfigs [ j ]
var okLocal bool
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "consul_sd_config" )
if ok {
ok = okLocal
2021-03-02 15:42:48 +01:00
}
2021-06-25 11:10:20 +02:00
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
logger . Errorf ( "there were errors when discovering consul targets for job %q, so preserving the previous targets" , sc . swc . jobName )
dst = append ( dst [ : dstLen ] , swsPrev ... )
}
}
return dst
}
2023-05-04 11:36:21 +02:00
// getConsulAgentSDScrapeWork returns `consulagent_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getConsulAgentSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
swsPrevByJob := getSWSByJob ( prev )
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
for _ , sc := range cfg . ScrapeConfigs {
dstLen := len ( dst )
ok := true
for j := range sc . ConsulAgentSDConfigs {
sdc := & sc . ConsulAgentSDConfigs [ j ]
var okLocal bool
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "consulagent_sd_config" )
if ok {
ok = okLocal
}
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
logger . Errorf ( "there were errors when discovering consulagent targets for job %q, so preserving the previous targets" , sc . swc . jobName )
dst = append ( dst [ : dstLen ] , swsPrev ... )
}
}
return dst
}
2021-06-25 11:10:20 +02:00
// getDigitalOceanDScrapeWork returns `digitalocean_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getDigitalOceanDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
swsPrevByJob := getSWSByJob ( prev )
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
2021-06-25 11:10:20 +02:00
dstLen := len ( dst )
ok := true
for j := range sc . DigitaloceanSDConfigs {
sdc := & sc . DigitaloceanSDConfigs [ j ]
var okLocal bool
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "digitalocean_sd_config" )
if ok {
ok = okLocal
2020-06-23 14:35:19 +02:00
}
2021-02-26 15:54:03 +01:00
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
2021-06-25 11:10:20 +02:00
logger . Errorf ( "there were errors when discovering digitalocean targets for job %q, so preserving the previous targets" , sc . swc . jobName )
2021-02-26 15:54:03 +01:00
dst = append ( dst [ : dstLen ] , swsPrev ... )
2020-06-23 14:35:19 +02:00
}
2020-04-13 20:02:27 +02:00
}
return dst
}
2021-06-25 11:10:20 +02:00
// getDNSSDScrapeWork returns `dns_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getDNSSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2020-10-05 15:45:33 +02:00
swsPrevByJob := getSWSByJob ( prev )
2020-12-08 16:50:03 +01:00
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
2020-10-05 15:45:33 +02:00
dstLen := len ( dst )
ok := true
2021-06-25 11:10:20 +02:00
for j := range sc . DNSSDConfigs {
sdc := & sc . DNSSDConfigs [ j ]
2020-10-05 15:45:33 +02:00
var okLocal bool
2021-06-25 11:10:20 +02:00
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "dns_sd_config" )
2020-10-05 15:45:33 +02:00
if ok {
ok = okLocal
}
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
2021-06-25 11:10:20 +02:00
logger . Errorf ( "there were errors when discovering dns targets for job %q, so preserving the previous targets" , sc . swc . jobName )
2020-10-05 15:45:33 +02:00
dst = append ( dst [ : dstLen ] , swsPrev ... )
}
}
return dst
}
2021-06-25 10:42:47 +02:00
// getDockerSDScrapeWork returns `docker_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getDockerSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
swsPrevByJob := getSWSByJob ( prev )
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
2021-06-25 10:42:47 +02:00
dstLen := len ( dst )
ok := true
for j := range sc . DockerSDConfigs {
sdc := & sc . DockerSDConfigs [ j ]
var okLocal bool
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "docker_sd_config" )
if ok {
ok = okLocal
}
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
logger . Errorf ( "there were errors when discovering docker targets for job %q, so preserving the previous targets" , sc . swc . jobName )
dst = append ( dst [ : dstLen ] , swsPrev ... )
}
}
return dst
}
2020-10-12 12:38:21 +02:00
// getDockerSwarmSDScrapeWork returns `dockerswarm_sd_configs` ScrapeWork from cfg.
2020-12-08 16:50:03 +01:00
func ( cfg * Config ) getDockerSwarmSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2020-10-12 12:38:21 +02:00
swsPrevByJob := getSWSByJob ( prev )
2020-12-08 16:50:03 +01:00
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
2020-10-12 12:38:21 +02:00
dstLen := len ( dst )
ok := true
2021-03-01 13:13:56 +01:00
for j := range sc . DockerSwarmSDConfigs {
sdc := & sc . DockerSwarmSDConfigs [ j ]
2020-10-12 12:38:21 +02:00
var okLocal bool
2021-02-26 14:53:42 +01:00
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "dockerswarm_sd_config" )
2020-10-12 12:38:21 +02:00
if ok {
ok = okLocal
}
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
logger . Errorf ( "there were errors when discovering dockerswarm targets for job %q, so preserving the previous targets" , sc . swc . jobName )
dst = append ( dst [ : dstLen ] , swsPrev ... )
}
}
return dst
}
2021-06-25 11:10:20 +02:00
// getEC2SDScrapeWork returns `ec2_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getEC2SDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2020-06-23 14:35:19 +02:00
swsPrevByJob := getSWSByJob ( prev )
2020-12-08 16:50:03 +01:00
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
2020-06-23 14:35:19 +02:00
dstLen := len ( dst )
ok := true
2021-06-25 11:10:20 +02:00
for j := range sc . EC2SDConfigs {
sdc := & sc . EC2SDConfigs [ j ]
2020-06-23 14:35:19 +02:00
var okLocal bool
2021-06-25 11:10:20 +02:00
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "ec2_sd_config" )
2020-06-23 14:35:19 +02:00
if ok {
ok = okLocal
}
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
2021-06-25 11:10:20 +02:00
logger . Errorf ( "there were errors when discovering ec2 targets for job %q, so preserving the previous targets" , sc . swc . jobName )
2020-06-23 14:35:19 +02:00
dst = append ( dst [ : dstLen ] , swsPrev ... )
2020-05-04 19:48:02 +02:00
}
}
return dst
}
2020-11-20 12:38:12 +01:00
// getEurekaSDScrapeWork returns `eureka_sd_configs` ScrapeWork from cfg.
2020-12-08 16:50:03 +01:00
func ( cfg * Config ) getEurekaSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2020-11-20 12:38:12 +01:00
swsPrevByJob := getSWSByJob ( prev )
2020-12-08 16:50:03 +01:00
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
2020-11-20 12:38:12 +01:00
dstLen := len ( dst )
ok := true
for j := range sc . EurekaSDConfigs {
sdc := & sc . EurekaSDConfigs [ j ]
var okLocal bool
2021-02-26 14:53:42 +01:00
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "eureka_sd_config" )
2020-11-20 12:38:12 +01:00
if ok {
ok = okLocal
}
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
logger . Errorf ( "there were errors when discovering eureka targets for job %q, so preserving the previous targets" , sc . swc . jobName )
dst = append ( dst [ : dstLen ] , swsPrev ... )
}
}
return dst
}
2021-06-25 11:10:20 +02:00
// getFileSDScrapeWork returns `file_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getFileSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2020-12-08 16:50:03 +01:00
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
2021-06-25 11:10:20 +02:00
for j := range sc . FileSDConfigs {
sdc := & sc . FileSDConfigs [ j ]
2023-04-03 06:05:01 +02:00
dst = sdc . appendScrapeWork ( dst , cfg . baseDir , sc . swc )
2020-05-05 23:01:49 +02:00
}
}
return dst
}
2021-06-25 11:10:20 +02:00
// getGCESDScrapeWork returns `gce_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getGCESDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2020-06-23 14:35:19 +02:00
swsPrevByJob := getSWSByJob ( prev )
2020-12-08 16:50:03 +01:00
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
2020-06-23 14:35:19 +02:00
dstLen := len ( dst )
ok := true
2021-06-25 11:10:20 +02:00
for j := range sc . GCESDConfigs {
sdc := & sc . GCESDConfigs [ j ]
2020-06-23 14:35:19 +02:00
var okLocal bool
2021-06-25 11:10:20 +02:00
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "gce_sd_config" )
2020-06-23 14:35:19 +02:00
if ok {
ok = okLocal
}
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
2021-06-25 11:10:20 +02:00
logger . Errorf ( "there were errors when discovering gce targets for job %q, so preserving the previous targets" , sc . swc . jobName )
2020-06-23 14:35:19 +02:00
dst = append ( dst [ : dstLen ] , swsPrev ... )
2020-04-27 18:25:45 +02:00
}
}
return dst
}
2021-06-25 11:10:20 +02:00
// getHTTPDScrapeWork returns `http_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getHTTPDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2020-06-23 14:35:19 +02:00
swsPrevByJob := getSWSByJob ( prev )
2020-12-08 16:50:03 +01:00
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
2020-06-23 14:35:19 +02:00
dstLen := len ( dst )
ok := true
2021-06-25 11:10:20 +02:00
for j := range sc . HTTPSDConfigs {
sdc := & sc . HTTPSDConfigs [ j ]
2020-06-23 14:35:19 +02:00
var okLocal bool
2021-06-25 11:10:20 +02:00
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "http_sd_config" )
2020-06-23 14:35:19 +02:00
if ok {
ok = okLocal
}
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
2021-06-25 11:10:20 +02:00
logger . Errorf ( "there were errors when discovering http targets for job %q, so preserving the previous targets" , sc . swc . jobName )
2020-06-23 14:35:19 +02:00
dst = append ( dst [ : dstLen ] , swsPrev ... )
2020-04-24 16:50:21 +02:00
}
}
return dst
}
2021-06-25 11:10:20 +02:00
// getKubernetesSDScrapeWork returns `kubernetes_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getKubernetesSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2021-06-14 12:15:04 +02:00
swsPrevByJob := getSWSByJob ( prev )
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
2021-06-14 12:15:04 +02:00
dstLen := len ( dst )
ok := true
2021-06-25 11:10:20 +02:00
for j := range sc . KubernetesSDConfigs {
sdc := & sc . KubernetesSDConfigs [ j ]
swos , err := sdc . GetScrapeWorkObjects ( )
if err != nil {
logger . Errorf ( "skipping kubernetes_sd_config targets for job_name %q because of error: %s" , sc . swc . jobName , err )
ok = false
break
}
for _ , swo := range swos {
sw := swo . ( * ScrapeWork )
dst = append ( dst , sw )
2021-06-14 12:15:04 +02:00
}
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
2021-06-25 11:10:20 +02:00
logger . Errorf ( "there were errors when discovering kubernetes_sd_config targets for job %q, so preserving the previous targets" , sc . swc . jobName )
2021-06-14 12:15:04 +02:00
dst = append ( dst [ : dstLen ] , swsPrev ... )
}
}
return dst
}
2023-02-22 13:59:56 +01:00
// getKumaSDScrapeWork returns `kuma_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getKumaSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
swsPrevByJob := getSWSByJob ( prev )
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
for _ , sc := range cfg . ScrapeConfigs {
dstLen := len ( dst )
ok := true
for j := range sc . KumaSDConfigs {
sdc := & sc . KumaSDConfigs [ j ]
var okLocal bool
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "kuma_sd_config" )
if ok {
ok = okLocal
}
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
logger . Errorf ( "there were errors when discovering kuma targets for job %q, so preserving the previous targets" , sc . swc . jobName )
dst = append ( dst [ : dstLen ] , swsPrev ... )
}
}
return dst
}
2023-01-05 23:03:58 +01:00
// getNomadSDScrapeWork returns `nomad_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getNomadSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
swsPrevByJob := getSWSByJob ( prev )
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
for _ , sc := range cfg . ScrapeConfigs {
dstLen := len ( dst )
ok := true
for j := range sc . NomadSDConfigs {
sdc := & sc . NomadSDConfigs [ j ]
var okLocal bool
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "nomad_sd_config" )
if ok {
ok = okLocal
}
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
2023-01-06 03:01:11 +01:00
logger . Errorf ( "there were errors when discovering nomad_sd_config targets for job %q, so preserving the previous targets" , sc . swc . jobName )
2023-01-05 23:03:58 +01:00
dst = append ( dst [ : dstLen ] , swsPrev ... )
}
}
return dst
}
2021-06-25 11:10:20 +02:00
// getOpenStackSDScrapeWork returns `openstack_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getOpenStackSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
2021-06-22 12:33:37 +02:00
swsPrevByJob := getSWSByJob ( prev )
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
2021-06-22 12:33:37 +02:00
dstLen := len ( dst )
ok := true
2021-06-25 11:10:20 +02:00
for j := range sc . OpenStackSDConfigs {
sdc := & sc . OpenStackSDConfigs [ j ]
2021-06-22 12:33:37 +02:00
var okLocal bool
2021-06-25 11:10:20 +02:00
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "openstack_sd_config" )
2021-06-22 12:33:37 +02:00
if ok {
ok = okLocal
}
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
2021-06-25 11:10:20 +02:00
logger . Errorf ( "there were errors when discovering openstack targets for job %q, so preserving the previous targets" , sc . swc . jobName )
2021-06-22 12:33:37 +02:00
dst = append ( dst [ : dstLen ] , swsPrev ... )
}
}
return dst
}
2022-08-04 19:44:16 +02:00
// getYandexCloudSDScrapeWork returns `yandexcloud_sd_configs` ScrapeWork from cfg.
func ( cfg * Config ) getYandexCloudSDScrapeWork ( prev [ ] * ScrapeWork ) [ ] * ScrapeWork {
swsPrevByJob := getSWSByJob ( prev )
dst := make ( [ ] * ScrapeWork , 0 , len ( prev ) )
for _ , sc := range cfg . ScrapeConfigs {
dstLen := len ( dst )
ok := true
for j := range sc . YandexCloudSDConfigs {
sdc := & sc . YandexCloudSDConfigs [ j ]
var okLocal bool
dst , okLocal = appendSDScrapeWork ( dst , sdc , cfg . baseDir , sc . swc , "yandexcloud_sd_config" )
if ok {
ok = okLocal
}
}
if ok {
continue
}
swsPrev := swsPrevByJob [ sc . swc . jobName ]
if len ( swsPrev ) > 0 {
logger . Errorf ( "there were errors when discovering yandexcloud targets for job %q, so preserving the previous targets" , sc . swc . jobName )
dst = append ( dst [ : dstLen ] , swsPrev ... )
}
}
return dst
}
2023-02-22 13:59:56 +01:00
// getStaticScrapeWork returns `static_configs` ScrapeWork from cfg.
2020-12-08 16:50:03 +01:00
func ( cfg * Config ) getStaticScrapeWork ( ) [ ] * ScrapeWork {
var dst [ ] * ScrapeWork
2022-04-16 19:28:46 +02:00
for _ , sc := range cfg . ScrapeConfigs {
2020-04-23 13:38:12 +02:00
for j := range sc . StaticConfigs {
stc := & sc . StaticConfigs [ j ]
2020-04-13 11:59:05 +02:00
dst = stc . appendScrapeWork ( dst , sc . swc , nil )
2020-02-23 12:35:47 +01:00
}
}
2020-04-13 11:59:05 +02:00
return dst
2020-02-23 12:35:47 +01:00
}
func getScrapeWorkConfig ( sc * ScrapeConfig , baseDir string , globalCfg * GlobalConfig ) ( * scrapeWorkConfig , error ) {
jobName := sc . JobName
if jobName == "" {
return nil , fmt . Errorf ( "missing `job_name` field in `scrape_config`" )
}
2022-02-11 15:17:00 +01:00
scrapeInterval := sc . ScrapeInterval . Duration ( )
2020-02-23 12:35:47 +01:00
if scrapeInterval <= 0 {
2022-02-11 15:17:00 +01:00
scrapeInterval = globalCfg . ScrapeInterval . Duration ( )
2020-02-23 12:35:47 +01:00
if scrapeInterval <= 0 {
scrapeInterval = defaultScrapeInterval
}
}
2022-02-11 15:17:00 +01:00
scrapeTimeout := sc . ScrapeTimeout . Duration ( )
2020-02-23 12:35:47 +01:00
if scrapeTimeout <= 0 {
2022-02-11 15:17:00 +01:00
scrapeTimeout = globalCfg . ScrapeTimeout . Duration ( )
2020-02-23 12:35:47 +01:00
if scrapeTimeout <= 0 {
scrapeTimeout = defaultScrapeTimeout
}
}
2021-05-13 15:09:45 +02:00
if scrapeTimeout > scrapeInterval {
// Limit the `scrape_timeout` with `scrape_interval` like Prometheus does.
// This guarantees that the scraper can miss only a single scrape if the target sometimes responds slowly.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1281#issuecomment-840538907
scrapeTimeout = scrapeInterval
}
2020-02-23 12:35:47 +01:00
honorLabels := sc . HonorLabels
2021-10-16 19:48:15 +02:00
honorTimestamps := true
if sc . HonorTimestamps != nil {
honorTimestamps = * sc . HonorTimestamps
}
2021-04-02 18:56:38 +02:00
denyRedirects := false
2023-05-26 09:39:45 +02:00
if sc . HTTPClientConfig . FollowRedirects != nil {
denyRedirects = ! * sc . HTTPClientConfig . FollowRedirects
2021-04-02 18:56:38 +02:00
}
2020-02-23 12:35:47 +01:00
metricsPath := sc . MetricsPath
if metricsPath == "" {
metricsPath = "/metrics"
}
2022-08-27 01:22:37 +02:00
scheme := strings . ToLower ( sc . Scheme )
2020-02-23 12:35:47 +01:00
if scheme == "" {
scheme = "http"
}
if scheme != "http" && scheme != "https" {
return nil , fmt . Errorf ( "unexpected `scheme` for `job_name` %q: %q; supported values: http or https" , jobName , scheme )
}
params := sc . Params
2021-04-02 20:17:43 +02:00
ac , err := sc . HTTPClientConfig . NewConfig ( baseDir )
2020-04-13 11:59:05 +02:00
if err != nil {
2020-06-30 21:58:18 +02:00
return nil , fmt . Errorf ( "cannot parse auth config for `job_name` %q: %w" , jobName , err )
2020-02-23 12:35:47 +01:00
}
2021-04-03 23:40:08 +02:00
proxyAC , err := sc . ProxyClientConfig . NewConfig ( baseDir )
2021-03-12 02:35:49 +01:00
if err != nil {
return nil , fmt . Errorf ( "cannot parse proxy auth config for `job_name` %q: %w" , jobName , err )
}
2022-12-10 11:09:21 +01:00
relabelConfigs , err := promrelabel . ParseRelabelConfigs ( sc . RelabelConfigs )
2020-02-23 12:35:47 +01:00
if err != nil {
2020-06-30 21:58:18 +02:00
return nil , fmt . Errorf ( "cannot parse `relabel_configs` for `job_name` %q: %w" , jobName , err )
2020-02-23 12:35:47 +01:00
}
2022-12-10 11:09:21 +01:00
metricRelabelConfigs , err := promrelabel . ParseRelabelConfigs ( sc . MetricRelabelConfigs )
2020-02-23 12:35:47 +01:00
if err != nil {
2020-06-30 21:58:18 +02:00
return nil , fmt . Errorf ( "cannot parse `metric_relabel_configs` for `job_name` %q: %w" , jobName , err )
2020-02-23 12:35:47 +01:00
}
2022-11-30 06:22:12 +01:00
externalLabels := globalCfg . ExternalLabels
2022-10-07 22:36:11 +02:00
noStaleTracking := * noStaleMarkers
if sc . NoStaleMarkers != nil {
noStaleTracking = * sc . NoStaleMarkers
}
2023-01-17 19:14:46 +01:00
seriesLimit := * seriesLimitPerTarget
if sc . SeriesLimit > 0 {
seriesLimit = sc . SeriesLimit
}
2020-02-23 12:35:47 +01:00
swc := & scrapeWorkConfig {
2020-04-13 11:59:05 +02:00
scrapeInterval : scrapeInterval ,
2022-04-20 14:25:41 +02:00
scrapeIntervalString : scrapeInterval . String ( ) ,
2020-04-13 11:59:05 +02:00
scrapeTimeout : scrapeTimeout ,
2022-04-20 14:25:41 +02:00
scrapeTimeoutString : scrapeTimeout . String ( ) ,
2020-04-13 11:59:05 +02:00
jobName : jobName ,
metricsPath : metricsPath ,
scheme : scheme ,
params : params ,
2020-12-24 09:56:10 +01:00
proxyURL : sc . ProxyURL ,
2021-03-12 02:35:49 +01:00
proxyAuthConfig : proxyAC ,
2020-04-13 11:59:05 +02:00
authConfig : ac ,
honorLabels : honorLabels ,
honorTimestamps : honorTimestamps ,
2021-04-02 18:56:38 +02:00
denyRedirects : denyRedirects ,
2022-10-01 15:13:17 +02:00
externalLabels : externalLabels ,
2020-04-13 11:59:05 +02:00
relabelConfigs : relabelConfigs ,
metricRelabelConfigs : metricRelabelConfigs ,
2020-04-14 10:58:15 +02:00
sampleLimit : sc . SampleLimit ,
2020-07-02 13:19:11 +02:00
disableCompression : sc . DisableCompression ,
disableKeepAlive : sc . DisableKeepAlive ,
2020-11-01 22:12:13 +01:00
streamParse : sc . StreamParse ,
2022-02-11 15:17:00 +01:00
scrapeAlignInterval : sc . ScrapeAlignInterval . Duration ( ) ,
scrapeOffset : sc . ScrapeOffset . Duration ( ) ,
2023-01-17 19:14:46 +01:00
seriesLimit : seriesLimit ,
2022-10-07 22:36:11 +02:00
noStaleMarkers : noStaleTracking ,
2020-02-23 12:35:47 +01:00
}
return swc , nil
}
type scrapeWorkConfig struct {
2020-04-13 11:59:05 +02:00
scrapeInterval time . Duration
2022-04-20 14:25:41 +02:00
scrapeIntervalString string
2020-04-13 11:59:05 +02:00
scrapeTimeout time . Duration
2022-04-20 14:25:41 +02:00
scrapeTimeoutString string
2020-04-13 11:59:05 +02:00
jobName string
metricsPath string
scheme string
params map [ string ] [ ] string
2021-10-26 20:21:08 +02:00
proxyURL * proxy . URL
2021-03-12 02:35:49 +01:00
proxyAuthConfig * promauth . Config
2020-04-13 11:59:05 +02:00
authConfig * promauth . Config
honorLabels bool
honorTimestamps bool
2021-04-02 18:56:38 +02:00
denyRedirects bool
2022-11-30 06:22:12 +01:00
externalLabels * promutils . Labels
2021-02-22 15:33:55 +01:00
relabelConfigs * promrelabel . ParsedConfigs
metricRelabelConfigs * promrelabel . ParsedConfigs
2020-04-14 10:58:15 +02:00
sampleLimit int
2020-07-02 13:19:11 +02:00
disableCompression bool
disableKeepAlive bool
2020-11-01 22:12:13 +01:00
streamParse bool
2021-02-18 22:51:29 +01:00
scrapeAlignInterval time . Duration
2021-03-08 10:58:25 +01:00
scrapeOffset time . Duration
2021-09-01 13:14:37 +02:00
seriesLimit int
2022-10-07 22:36:11 +02:00
noStaleMarkers bool
2020-02-23 12:35:47 +01:00
}
2021-02-26 14:53:42 +01:00
type targetLabelsGetter interface {
2022-11-30 06:22:12 +01:00
GetLabels ( baseDir string ) ( [ ] * promutils . Labels , error )
2020-04-27 18:25:45 +02:00
}
2021-02-26 14:53:42 +01:00
func appendSDScrapeWork ( dst [ ] * ScrapeWork , sdc targetLabelsGetter , baseDir string , swc * scrapeWorkConfig , discoveryType string ) ( [ ] * ScrapeWork , bool ) {
targetLabels , err := sdc . GetLabels ( baseDir )
2020-04-24 16:50:21 +02:00
if err != nil {
2021-02-26 14:53:42 +01:00
logger . Errorf ( "skipping %s targets for job_name %q because of error: %s" , discoveryType , swc . jobName , err )
2020-06-23 14:35:19 +02:00
return dst , false
2020-04-24 16:50:21 +02:00
}
2021-02-26 14:53:42 +01:00
return appendScrapeWorkForTargetLabels ( dst , swc , targetLabels , discoveryType ) , true
2020-04-24 16:50:21 +02:00
}
2022-11-30 06:22:12 +01:00
func appendScrapeWorkForTargetLabels ( dst [ ] * ScrapeWork , swc * scrapeWorkConfig , targetLabels [ ] * promutils . Labels , discoveryType string ) [ ] * ScrapeWork {
2021-02-21 22:21:17 +01:00
startTime := time . Now ( )
2021-02-26 11:46:28 +01:00
// Process targetLabels in parallel in order to reduce processing time for big number of targetLabels.
type result struct {
sw * ScrapeWork
err error
}
goroutines := cgroup . AvailableCPUs ( )
2021-02-28 15:05:13 +01:00
resultCh := make ( chan result , len ( targetLabels ) )
2022-11-30 06:22:12 +01:00
workCh := make ( chan * promutils . Labels , goroutines )
2021-02-26 11:46:28 +01:00
for i := 0 ; i < goroutines ; i ++ {
go func ( ) {
for metaLabels := range workCh {
2022-11-30 06:22:12 +01:00
target := metaLabels . Get ( "__address__" )
2021-02-26 20:41:54 +01:00
sw , err := swc . getScrapeWork ( target , nil , metaLabels )
2021-02-26 11:46:28 +01:00
if err != nil {
2021-02-26 14:53:42 +01:00
err = fmt . Errorf ( "skipping %s target %q for job_name %q because of error: %w" , discoveryType , target , swc . jobName , err )
2021-02-26 11:46:28 +01:00
}
resultCh <- result {
sw : sw ,
err : err ,
}
}
} ( )
}
2020-04-13 20:02:27 +02:00
for _ , metaLabels := range targetLabels {
2021-02-26 11:46:28 +01:00
workCh <- metaLabels
}
close ( workCh )
for range targetLabels {
r := <- resultCh
if r . err != nil {
logger . Errorf ( "%s" , r . err )
2020-04-13 20:02:27 +02:00
continue
}
2021-02-26 11:46:28 +01:00
if r . sw != nil {
dst = append ( dst , r . sw )
}
2020-04-13 20:02:27 +02:00
}
2021-02-26 14:53:42 +01:00
metrics . GetOrCreateHistogram ( fmt . Sprintf ( "vm_promscrape_target_relabel_duration_seconds{type=%q}" , discoveryType ) ) . UpdateDuration ( startTime )
2020-04-13 20:02:27 +02:00
return dst
}
2023-04-03 06:05:01 +02:00
func ( sdc * FileSDConfig ) appendScrapeWork ( dst [ ] * ScrapeWork , baseDir string , swc * scrapeWorkConfig ) [ ] * ScrapeWork {
2022-11-30 06:22:12 +01:00
metaLabels := promutils . GetLabels ( )
defer promutils . PutLabels ( metaLabels )
2020-02-23 12:35:47 +01:00
for _ , file := range sdc . Files {
2021-12-02 23:08:42 +01:00
pathPattern := fs . GetFilepath ( baseDir , file )
2020-02-23 12:35:47 +01:00
paths := [ ] string { pathPattern }
if strings . Contains ( pathPattern , "*" ) {
var err error
paths , err = filepath . Glob ( pathPattern )
if err != nil {
2020-04-13 11:59:05 +02:00
// Do not return this error, since other files may contain valid scrape configs.
2023-04-03 06:05:01 +02:00
logger . Errorf ( "invalid pattern %q in `file_sd_config->files` section of job_name=%q: %s; skipping it" , file , swc . jobName , err )
2020-04-13 11:59:05 +02:00
continue
2020-02-23 12:35:47 +01:00
}
}
for _ , path := range paths {
stcs , err := loadStaticConfigs ( path )
if err != nil {
// Do not return this error, since other paths may contain valid scrape configs.
2023-04-03 06:05:01 +02:00
logger . Errorf ( "cannot load file %q for job_name=%q at `file_sd_configs`: %s; skipping this file" , path , swc . jobName , err )
2020-02-23 12:35:47 +01:00
continue
}
pathShort := path
if strings . HasPrefix ( pathShort , baseDir ) {
pathShort = path [ len ( baseDir ) : ]
if len ( pathShort ) > 0 && pathShort [ 0 ] == filepath . Separator {
pathShort = pathShort [ 1 : ]
}
}
2022-11-30 06:22:12 +01:00
metaLabels . Reset ( )
metaLabels . Add ( "__meta_filepath" , pathShort )
2020-02-23 12:35:47 +01:00
for i := range stcs {
2020-04-13 11:59:05 +02:00
dst = stcs [ i ] . appendScrapeWork ( dst , swc , metaLabels )
2020-02-23 12:35:47 +01:00
}
}
}
2020-04-13 11:59:05 +02:00
return dst
2020-02-23 12:35:47 +01:00
}
2022-11-30 06:22:12 +01:00
func ( stc * StaticConfig ) appendScrapeWork ( dst [ ] * ScrapeWork , swc * scrapeWorkConfig , metaLabels * promutils . Labels ) [ ] * ScrapeWork {
2020-02-23 12:35:47 +01:00
for _ , target := range stc . Targets {
if target == "" {
2020-04-13 11:59:05 +02:00
// Do not return this error, since other targets may be valid
logger . Errorf ( "`static_configs` target for `job_name` %q cannot be empty; skipping it" , swc . jobName )
2020-02-23 12:35:47 +01:00
continue
}
2021-02-26 20:41:54 +01:00
sw , err := swc . getScrapeWork ( target , stc . Labels , metaLabels )
2020-04-13 11:59:05 +02:00
if err != nil {
// Do not return this error, since other targets may be valid
logger . Errorf ( "error when parsing `static_configs` target %q for `job_name` %q: %s; skipping it" , target , swc . jobName , err )
2020-02-23 12:35:47 +01:00
continue
}
2021-02-26 11:46:28 +01:00
if sw != nil {
dst = append ( dst , sw )
}
2020-02-23 12:35:47 +01:00
}
2020-04-13 11:59:05 +02:00
return dst
}
2022-11-30 06:22:12 +01:00
func appendScrapeWorkKey ( dst [ ] byte , labels * promutils . Labels ) [ ] byte {
for _ , label := range labels . GetLabels ( ) {
2021-10-12 16:03:09 +02:00
// Do not use strconv.AppendQuote, since it is slow according to CPU profile.
dst = append ( dst , label . Name ... )
dst = append ( dst , '=' )
dst = append ( dst , label . Value ... )
dst = append ( dst , ',' )
}
2021-02-28 21:29:34 +01:00
return dst
2021-02-26 20:41:54 +01:00
}
2021-03-04 09:20:15 +01:00
func needSkipScrapeWork ( key string , membersCount , replicasCount , memberNum int ) bool {
if membersCount <= 1 {
2021-02-28 17:39:57 +01:00
return false
}
2021-03-05 08:05:52 +01:00
h := xxhash . Sum64 ( bytesutil . ToUnsafeBytes ( key ) )
idx := int ( h % uint64 ( membersCount ) )
2021-03-04 09:20:15 +01:00
if replicasCount < 1 {
replicasCount = 1
}
for i := 0 ; i < replicasCount ; i ++ {
if idx == memberNum {
return false
}
idx ++
2021-05-13 10:14:51 +02:00
if idx >= membersCount {
2021-03-04 09:20:15 +01:00
idx = 0
}
}
return true
2021-02-28 17:39:57 +01:00
}
2021-03-02 15:42:48 +01:00
var scrapeWorkKeyBufPool bytesutil . ByteBufferPool
2022-11-30 06:22:12 +01:00
func ( swc * scrapeWorkConfig ) getScrapeWork ( target string , extraLabels , metaLabels * promutils . Labels ) ( * ScrapeWork , error ) {
labels := promutils . GetLabels ( )
defer promutils . PutLabels ( labels )
2022-10-09 13:51:14 +02:00
2022-11-30 06:22:12 +01:00
mergeLabels ( labels , swc , target , extraLabels , metaLabels )
var originalLabels * promutils . Labels
2020-11-04 10:08:30 +01:00
if ! * dropOriginalLabels {
2022-11-30 06:22:12 +01:00
originalLabels = labels . Clone ( )
2020-11-04 10:08:30 +01:00
}
2022-11-30 06:22:12 +01:00
labels . Labels = swc . relabelConfigs . Apply ( labels . Labels , 0 )
2022-10-07 21:39:28 +02:00
// Remove labels starting from "__meta_" prefix according to https://www.robustperception.io/life-of-a-label/
2022-11-30 06:22:12 +01:00
labels . RemoveMetaLabels ( )
2020-11-07 15:16:56 +01:00
2021-10-12 16:03:09 +02:00
// Verify whether the scrape work must be skipped because of `-promscrape.cluster.*` configs.
// Perform the verification on labels after the relabeling in order to guarantee that targets with the same set of labels
// go to the same vmagent shard.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1687#issuecomment-940629495
if * clusterMembersCount > 1 {
bb := scrapeWorkKeyBufPool . Get ( )
bb . B = appendScrapeWorkKey ( bb . B [ : 0 ] , labels )
2022-04-12 11:24:11 +02:00
needSkip := needSkipScrapeWork ( bytesutil . ToUnsafeString ( bb . B ) , * clusterMembersCount , * clusterReplicationFactor , clusterMemberID )
2021-10-12 16:03:09 +02:00
scrapeWorkKeyBufPool . Put ( bb )
if needSkip {
return nil , nil
}
}
2021-10-22 12:51:52 +02:00
if ! * dropOriginalLabels {
2022-11-30 06:22:12 +01:00
originalLabels . Sort ( )
2021-10-22 12:51:52 +02:00
// Reduce memory usage by interning all the strings in originalLabels.
2022-11-30 06:22:12 +01:00
originalLabels . InternStrings ( )
2021-10-22 12:51:52 +02:00
}
2022-11-30 06:22:12 +01:00
if labels . Len ( ) == 0 {
2020-04-13 11:59:05 +02:00
// Drop target without labels.
2023-01-05 11:49:26 +01:00
droppedTargetsMap . Register ( originalLabels , swc . relabelConfigs )
2021-02-26 11:46:28 +01:00
return nil , nil
2020-04-13 11:59:05 +02:00
}
2023-03-21 06:07:52 +01:00
scrapeURL , address := promrelabel . GetScrapeURL ( labels , swc . params )
if scrapeURL == "" {
// Drop target without URL.
2023-01-05 11:49:26 +01:00
droppedTargetsMap . Register ( originalLabels , swc . relabelConfigs )
2021-02-26 11:46:28 +01:00
return nil , nil
2020-04-13 11:59:05 +02:00
}
2023-03-21 06:07:52 +01:00
if _ , err := url . Parse ( scrapeURL ) ; err != nil {
return nil , fmt . Errorf ( "invalid target url=%q for job=%q: %w" , scrapeURL , swc . jobName , err )
2020-04-13 11:59:05 +02:00
}
2022-08-08 13:10:18 +02:00
var at * auth . Token
2022-11-30 06:22:12 +01:00
tenantID := labels . Get ( "__tenant_id__" )
2022-10-07 21:39:28 +02:00
if len ( tenantID ) > 0 {
2022-08-08 13:46:24 +02:00
newToken , err := auth . NewToken ( tenantID )
2022-08-08 13:10:18 +02:00
if err != nil {
2022-10-07 21:39:28 +02:00
return nil , fmt . Errorf ( "cannot parse __tenant_id__=%q for job=%q: %w" , tenantID , swc . jobName , err )
2022-08-08 13:10:18 +02:00
}
at = newToken
}
2021-09-12 12:33:39 +02:00
// Read __scrape_interval__ and __scrape_timeout__ from labels.
scrapeInterval := swc . scrapeInterval
2022-11-30 06:22:12 +01:00
if s := labels . Get ( "__scrape_interval__" ) ; len ( s ) > 0 {
2022-02-11 15:17:00 +01:00
d , err := promutils . ParseDuration ( s )
2021-09-12 12:33:39 +02:00
if err != nil {
return nil , fmt . Errorf ( "cannot parse __scrape_interval__=%q: %w" , s , err )
}
scrapeInterval = d
}
scrapeTimeout := swc . scrapeTimeout
2022-11-30 06:22:12 +01:00
if s := labels . Get ( "__scrape_timeout__" ) ; len ( s ) > 0 {
2022-02-11 15:17:00 +01:00
d , err := promutils . ParseDuration ( s )
2021-09-12 12:33:39 +02:00
if err != nil {
return nil , fmt . Errorf ( "cannot parse __scrape_timeout__=%q: %w" , s , err )
}
scrapeTimeout = d
}
2021-09-09 17:49:37 +02:00
// Read series_limit option from __series_limit__ label.
// See https://docs.victoriametrics.com/vmagent.html#cardinality-limiter
seriesLimit := swc . seriesLimit
2022-11-30 06:22:12 +01:00
if s := labels . Get ( "__series_limit__" ) ; len ( s ) > 0 {
2021-09-09 17:49:37 +02:00
n , err := strconv . Atoi ( s )
if err != nil {
return nil , fmt . Errorf ( "cannot parse __series_limit__=%q: %w" , s , err )
}
seriesLimit = n
}
// Read stream_parse option from __stream_parse__ label.
// See https://docs.victoriametrics.com/vmagent.html#stream-parsing-mode
streamParse := swc . streamParse
2022-11-30 06:22:12 +01:00
if s := labels . Get ( "__stream_parse__" ) ; len ( s ) > 0 {
2021-09-09 17:49:37 +02:00
b , err := strconv . ParseBool ( s )
if err != nil {
return nil , fmt . Errorf ( "cannot parse __stream_parse__=%q: %w" , s , err )
}
streamParse = b
}
2022-10-07 21:39:28 +02:00
// Remove labels with "__" prefix according to https://www.robustperception.io/life-of-a-label/
2022-11-30 06:22:12 +01:00
labels . RemoveLabelsWithDoubleUnderscorePrefix ( )
2022-10-09 13:51:14 +02:00
// Add missing "instance" label according to https://www.robustperception.io/life-of-a-label
2022-11-30 06:22:12 +01:00
if labels . Get ( "instance" ) == "" {
labels . Add ( "instance" , address )
2022-10-09 13:51:14 +02:00
}
2022-11-30 06:22:12 +01:00
// Remove references to deleted labels, so GC could clean strings for label name and label value past len(labels.Labels).
// This should reduce memory usage when relabeling creates big number of temporary labels with long names and/or values.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/825 for details.
labelsCopy := labels . Clone ( )
// Sort labels in alphabetical order of their names.
labelsCopy . Sort ( )
2020-12-08 11:22:57 +01:00
// Reduce memory usage by interning all the strings in labels.
2022-11-30 06:22:12 +01:00
labelsCopy . InternStrings ( )
2022-10-07 21:39:28 +02:00
2021-02-26 11:46:28 +01:00
sw := & ScrapeWork {
2020-04-13 11:59:05 +02:00
ScrapeURL : scrapeURL ,
2021-09-12 12:33:39 +02:00
ScrapeInterval : scrapeInterval ,
ScrapeTimeout : scrapeTimeout ,
2020-04-13 11:59:05 +02:00
HonorLabels : swc . honorLabels ,
HonorTimestamps : swc . honorTimestamps ,
2021-04-02 18:56:38 +02:00
DenyRedirects : swc . denyRedirects ,
2020-10-08 17:50:22 +02:00
OriginalLabels : originalLabels ,
2022-11-30 06:22:12 +01:00
Labels : labelsCopy ,
2022-10-01 15:13:17 +02:00
ExternalLabels : swc . externalLabels ,
2020-12-24 09:52:37 +01:00
ProxyURL : swc . proxyURL ,
2021-03-12 02:35:49 +01:00
ProxyAuthConfig : swc . proxyAuthConfig ,
2020-04-13 11:59:05 +02:00
AuthConfig : swc . authConfig ,
2022-12-10 11:09:21 +01:00
RelabelConfigs : swc . relabelConfigs ,
2020-04-13 11:59:05 +02:00
MetricRelabelConfigs : swc . metricRelabelConfigs ,
2020-04-14 10:58:15 +02:00
SampleLimit : swc . sampleLimit ,
2020-07-02 13:19:11 +02:00
DisableCompression : swc . disableCompression ,
DisableKeepAlive : swc . disableKeepAlive ,
2021-09-09 17:49:37 +02:00
StreamParse : streamParse ,
2021-02-18 22:51:29 +01:00
ScrapeAlignInterval : swc . scrapeAlignInterval ,
2021-03-08 10:58:25 +01:00
ScrapeOffset : swc . scrapeOffset ,
2021-09-09 17:49:37 +02:00
SeriesLimit : seriesLimit ,
2022-10-07 22:36:11 +02:00
NoStaleMarkers : swc . noStaleMarkers ,
2022-08-08 13:10:18 +02:00
AuthToken : at ,
2020-06-23 14:35:19 +02:00
jobNameOriginal : swc . jobName ,
2021-02-26 11:46:28 +01:00
}
return sw , nil
2020-02-23 12:35:47 +01:00
}
2022-11-30 06:22:12 +01:00
func mergeLabels ( dst * promutils . Labels , swc * scrapeWorkConfig , target string , extraLabels , metaLabels * promutils . Labels ) {
if n := dst . Len ( ) ; n > 0 {
logger . Panicf ( "BUG: len(dst.Labels) must be 0; got %d" , n )
2022-04-20 14:25:41 +02:00
}
2020-02-23 12:35:47 +01:00
// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
2022-11-30 06:22:12 +01:00
dst . Add ( "job" , swc . jobName )
dst . Add ( "__address__" , target )
dst . Add ( "__scheme__" , swc . scheme )
dst . Add ( "__metrics_path__" , swc . metricsPath )
dst . Add ( "__scrape_interval__" , swc . scrapeIntervalString )
dst . Add ( "__scrape_timeout__" , swc . scrapeTimeoutString )
2021-09-12 12:33:39 +02:00
for k , args := range swc . params {
2020-02-23 12:35:47 +01:00
if len ( args ) == 0 {
continue
}
k = "__param_" + k
v := args [ 0 ]
2022-11-30 06:22:12 +01:00
dst . Add ( k , v )
2020-02-23 12:35:47 +01:00
}
2022-11-30 06:22:12 +01:00
dst . AddFrom ( extraLabels )
dst . AddFrom ( metaLabels )
dst . RemoveDuplicates ( )
2020-02-23 12:35:47 +01:00
}
const (
defaultScrapeInterval = time . Minute
defaultScrapeTimeout = 10 * time . Second
)