2020-02-23 12:35:47 +01:00
package promscrape
import (
2020-11-01 22:12:13 +01:00
"context"
2020-02-23 12:35:47 +01:00
"crypto/tls"
"flag"
"fmt"
2020-11-01 22:12:13 +01:00
"io"
"net/http"
2020-12-24 09:56:10 +01:00
"net/url"
2020-02-23 12:35:47 +01:00
"strings"
"time"
2020-11-26 17:08:39 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil"
2020-08-16 16:05:52 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil"
2020-11-01 22:12:13 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/logger"
2023-02-24 21:11:44 +01:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape/discoveryutils"
2021-04-03 23:40:08 +02:00
"github.com/VictoriaMetrics/VictoriaMetrics/lib/proxy"
2020-04-29 15:20:23 +02:00
"github.com/VictoriaMetrics/fasthttp"
2020-02-23 12:35:47 +01:00
"github.com/VictoriaMetrics/metrics"
)
var (
2020-08-16 16:05:52 +02:00
maxScrapeSize = flagutil . NewBytes ( "promscrape.maxScrapeSize" , 16 * 1024 * 1024 , "The maximum size of scrape response in bytes to process from Prometheus targets. " +
2020-02-23 12:35:47 +01:00
"Bigger responses are rejected" )
2021-11-03 21:26:56 +01:00
maxResponseHeadersSize = flagutil . NewBytes ( "promscrape.maxResponseHeadersSize" , 4096 , "The maximum size of http response headers from Prometheus scrape targets" )
disableCompression = flag . Bool ( "promscrape.disableCompression" , false , "Whether to disable sending 'Accept-Encoding: gzip' request headers to all the scrape targets. " +
2020-07-02 13:19:11 +02:00
"This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization. " +
2023-05-10 09:50:41 +02:00
"It is possible to set 'disable_compression: true' individually per each 'scrape_config' section in '-promscrape.config' for fine-grained control" )
2020-07-02 13:19:11 +02:00
disableKeepAlive = flag . Bool ( "promscrape.disableKeepAlive" , false , "Whether to disable HTTP keep-alive connections when scraping all the targets. " +
"This may be useful when targets has no support for HTTP keep-alive connection. " +
2023-05-10 09:50:41 +02:00
"It is possible to set 'disable_keepalive: true' individually per each 'scrape_config' section in '-promscrape.config' for fine-grained control. " +
2020-07-02 13:19:11 +02:00
"Note that disabling HTTP keep-alive may increase load on both vmagent and scrape targets" )
2020-11-01 22:12:13 +01:00
streamParse = flag . Bool ( "promscrape.streamParse" , false , "Whether to enable stream parsing for metrics obtained from scrape targets. This may be useful " +
"for reducing memory usage when millions of metrics are exposed per each scrape target. " +
2023-05-10 09:50:41 +02:00
"It is possible to set 'stream_parse: true' individually per each 'scrape_config' section in '-promscrape.config' for fine-grained control" )
2020-02-23 12:35:47 +01:00
)
type client struct {
2020-11-01 22:12:13 +01:00
// hc is the default client optimized for common case of scraping targets with moderate number of metrics.
2020-02-23 12:35:47 +01:00
hc * fasthttp . HostClient
2023-02-13 19:51:55 +01:00
// sc (aka `stream client`) is used instead of hc if ScrapeWork.StreamParse is set.
2020-11-01 22:12:13 +01:00
// It may be useful for scraping targets with millions of metrics per target.
sc * http . Client
2023-02-09 20:13:06 +01:00
ctx context . Context
2021-04-05 11:15:07 +02:00
scrapeURL string
scrapeTimeoutSecondsStr string
2022-07-07 01:25:31 +02:00
hostPort string
2021-04-05 11:15:07 +02:00
requestURI string
2023-10-17 11:58:19 +02:00
setHeaders func ( req * http . Request ) error
setProxyHeaders func ( req * http . Request ) error
setFasthttpHeaders func ( req * fasthttp . Request ) error
setFasthttpProxyHeaders func ( req * fasthttp . Request ) error
2021-04-05 11:15:07 +02:00
denyRedirects bool
disableCompression bool
disableKeepAlive bool
2020-02-23 12:35:47 +01:00
}
2022-07-07 01:25:31 +02:00
func addMissingPort ( addr string , isTLS bool ) string {
if strings . Contains ( addr , ":" ) {
return addr
}
if isTLS {
2022-11-30 06:22:12 +01:00
return concatTwoStrings ( addr , ":443" )
2022-07-07 01:25:31 +02:00
}
2022-11-30 06:22:12 +01:00
return concatTwoStrings ( addr , ":80" )
}
func concatTwoStrings ( x , y string ) string {
bb := bbPool . Get ( )
b := bb . B [ : 0 ]
b = append ( b , x ... )
b = append ( b , y ... )
2023-01-04 07:14:20 +01:00
s := bytesutil . InternBytes ( b )
2022-11-30 06:22:12 +01:00
bb . B = b
bbPool . Put ( bb )
return s
2022-07-07 01:25:31 +02:00
}
2023-08-24 11:36:42 +02:00
const scrapeUserAgent = "vm_promscrape"
2023-10-17 11:58:19 +02:00
func newClient ( ctx context . Context , sw * ScrapeWork ) ( * client , error ) {
2020-02-23 12:35:47 +01:00
var u fasthttp . URI
u . Update ( sw . ScrapeURL )
2022-07-07 01:25:31 +02:00
hostPort := string ( u . Host ( ) )
dialAddr := hostPort
2020-02-23 12:35:47 +01:00
requestURI := string ( u . RequestURI ( ) )
isTLS := string ( u . Scheme ( ) ) == "https"
var tlsCfg * tls . Config
2021-03-09 17:54:09 +01:00
if isTLS {
2020-04-13 11:59:05 +02:00
tlsCfg = sw . AuthConfig . NewTLSConfig ( )
2020-02-23 12:35:47 +01:00
}
2023-10-17 11:58:19 +02:00
setProxyHeaders := func ( req * http . Request ) error { return nil }
setFasthttpProxyHeaders := func ( req * fasthttp . Request ) error { return nil }
2021-04-03 23:40:08 +02:00
proxyURL := sw . ProxyURL
if ! isTLS && proxyURL . IsHTTPOrHTTPS ( ) {
// Send full sw.ScrapeURL in requests to a proxy host for non-TLS scrape targets
// like net/http package from Go does.
// See https://en.wikipedia.org/wiki/Proxy_server#Web_proxy_servers
2022-05-06 23:02:54 +02:00
pu := proxyURL . GetURL ( )
2022-07-07 01:25:31 +02:00
dialAddr = pu . Host
2021-04-03 23:40:08 +02:00
requestURI = sw . ScrapeURL
isTLS = pu . Scheme == "https"
if isTLS {
tlsCfg = sw . ProxyAuthConfig . NewTLSConfig ( )
}
2021-10-26 20:21:08 +02:00
proxyURLOrig := proxyURL
2023-10-17 11:58:19 +02:00
setProxyHeaders = func ( req * http . Request ) error {
return proxyURLOrig . SetHeaders ( sw . ProxyAuthConfig , req )
2022-06-22 19:38:43 +02:00
}
2023-10-17 11:58:19 +02:00
setFasthttpProxyHeaders = func ( req * fasthttp . Request ) error {
return proxyURLOrig . SetFasthttpHeaders ( sw . ProxyAuthConfig , req )
2021-05-14 19:00:05 +02:00
}
2021-10-26 20:21:08 +02:00
proxyURL = & proxy . URL { }
2021-04-03 23:40:08 +02:00
}
2022-07-07 01:25:31 +02:00
hostPort = addMissingPort ( hostPort , isTLS )
dialAddr = addMissingPort ( dialAddr , isTLS )
2021-04-03 23:40:08 +02:00
dialFunc , err := newStatDialFunc ( proxyURL , sw . ProxyAuthConfig )
2020-12-24 09:56:10 +01:00
if err != nil {
2023-10-25 21:24:01 +02:00
return nil , fmt . Errorf ( "cannot create dial func: %w" , err )
2020-12-24 09:56:10 +01:00
}
2020-02-23 12:35:47 +01:00
hc := & fasthttp . HostClient {
2023-08-24 11:36:42 +02:00
Addr : dialAddr ,
// Name used in User-Agent request header
Name : scrapeUserAgent ,
2020-12-24 09:56:10 +01:00
Dial : dialFunc ,
2020-04-29 15:20:23 +02:00
IsTLS : isTLS ,
TLSConfig : tlsCfg ,
MaxIdleConnDuration : 2 * sw . ScrapeInterval ,
ReadTimeout : sw . ScrapeTimeout ,
WriteTimeout : 10 * time . Second ,
2022-12-15 04:26:24 +01:00
MaxResponseBodySize : maxScrapeSize . IntN ( ) ,
2020-04-29 15:20:23 +02:00
MaxIdempotentRequestAttempts : 1 ,
2022-12-15 04:26:24 +01:00
ReadBufferSize : maxResponseHeadersSize . IntN ( ) ,
2020-02-23 12:35:47 +01:00
}
2020-11-01 22:12:13 +01:00
var sc * http . Client
2021-10-16 12:18:20 +02:00
var proxyURLFunc func ( * http . Request ) ( * url . URL , error )
2022-05-06 23:02:54 +02:00
if pu := sw . ProxyURL . GetURL ( ) ; pu != nil {
2021-10-26 20:21:08 +02:00
proxyURLFunc = http . ProxyURL ( pu )
2021-10-16 12:18:20 +02:00
}
sc = & http . Client {
Transport : & http . Transport {
2021-11-03 21:26:56 +01:00
TLSClientConfig : tlsCfg ,
Proxy : proxyURLFunc ,
TLSHandshakeTimeout : 10 * time . Second ,
IdleConnTimeout : 2 * sw . ScrapeInterval ,
DisableCompression : * disableCompression || sw . DisableCompression ,
DisableKeepAlives : * disableKeepAlive || sw . DisableKeepAlive ,
DialContext : statStdDial ,
MaxIdleConnsPerHost : 100 ,
MaxResponseHeaderBytes : int64 ( maxResponseHeadersSize . N ) ,
2021-10-16 12:18:20 +02:00
} ,
2023-08-25 15:47:11 +02:00
Timeout : sw . ScrapeTimeout ,
2021-10-16 12:18:20 +02:00
}
if sw . DenyRedirects {
sc . CheckRedirect = func ( req * http . Request , via [ ] * http . Request ) error {
return http . ErrUseLastResponse
2021-04-02 18:56:38 +02:00
}
2020-11-01 22:12:13 +01:00
}
2023-06-05 16:31:58 +02:00
2020-02-23 12:35:47 +01:00
return & client {
2021-04-05 11:15:07 +02:00
hc : hc ,
2023-02-09 20:13:06 +01:00
ctx : ctx ,
2021-04-05 11:15:07 +02:00
sc : sc ,
scrapeURL : sw . ScrapeURL ,
scrapeTimeoutSecondsStr : fmt . Sprintf ( "%.3f" , sw . ScrapeTimeout . Seconds ( ) ) ,
2022-07-07 01:25:31 +02:00
hostPort : hostPort ,
2021-04-05 11:15:07 +02:00
requestURI : requestURI ,
2023-10-17 11:58:19 +02:00
setHeaders : func ( req * http . Request ) error { return sw . AuthConfig . SetHeaders ( req , true ) } ,
2022-06-22 19:38:43 +02:00
setProxyHeaders : setProxyHeaders ,
2023-10-17 11:58:19 +02:00
setFasthttpHeaders : func ( req * fasthttp . Request ) error { return sw . AuthConfig . SetFasthttpHeaders ( req , true ) } ,
2022-06-22 19:38:43 +02:00
setFasthttpProxyHeaders : setFasthttpProxyHeaders ,
2021-04-05 11:15:07 +02:00
denyRedirects : sw . DenyRedirects ,
disableCompression : sw . DisableCompression ,
disableKeepAlive : sw . DisableKeepAlive ,
2023-10-17 11:58:19 +02:00
} , nil
2020-02-23 12:35:47 +01:00
}
2020-11-01 22:12:13 +01:00
func ( c * client ) GetStreamReader ( ) ( * streamReader , error ) {
2021-09-12 14:20:42 +02:00
deadline := time . Now ( ) . Add ( c . sc . Timeout )
2023-02-09 20:13:06 +01:00
ctx , cancel := context . WithDeadline ( c . ctx , deadline )
2023-02-23 03:58:44 +01:00
req , err := http . NewRequestWithContext ( ctx , http . MethodGet , c . scrapeURL , nil )
2020-11-01 22:12:13 +01:00
if err != nil {
cancel ( )
return nil , fmt . Errorf ( "cannot create request for %q: %w" , c . scrapeURL , err )
}
// The following `Accept` header has been copied from Prometheus sources.
// See https://github.com/prometheus/prometheus/blob/f9d21f10ecd2a343a381044f131ea4e46381ce09/scrape/scrape.go#L532 .
// This is needed as a workaround for scraping stupid Java-based servers such as Spring Boot.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/608 for details.
// Do not bloat the `Accept` header with OpenMetrics shit, since it looks like dead standard now.
req . Header . Set ( "Accept" , "text/plain;version=0.0.4;q=1,*/*;q=0.1" )
2021-04-05 11:15:07 +02:00
// Set X-Prometheus-Scrape-Timeout-Seconds like Prometheus does, since it is used by some exporters such as PushProx.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1179#issuecomment-813117162
req . Header . Set ( "X-Prometheus-Scrape-Timeout-Seconds" , c . scrapeTimeoutSecondsStr )
2023-08-24 11:36:42 +02:00
req . Header . Set ( "User-Agent" , scrapeUserAgent )
2023-10-17 11:58:19 +02:00
err = c . setHeaders ( req )
if err != nil {
cancel ( )
2023-10-25 21:24:01 +02:00
return nil , fmt . Errorf ( "failed to create request to %q: %w" , c . scrapeURL , err )
2023-10-17 11:58:19 +02:00
}
err = c . setProxyHeaders ( req )
if err != nil {
cancel ( )
2023-10-25 21:24:01 +02:00
return nil , fmt . Errorf ( "failed to create request to %q: %w" , c . scrapeURL , err )
2023-10-17 11:58:19 +02:00
}
2022-08-16 13:52:38 +02:00
scrapeRequests . Inc ( )
2020-11-01 22:12:13 +01:00
resp , err := c . sc . Do ( req )
if err != nil {
cancel ( )
return nil , fmt . Errorf ( "cannot scrape %q: %w" , c . scrapeURL , err )
}
if resp . StatusCode != http . StatusOK {
metrics . GetOrCreateCounter ( fmt . Sprintf ( ` vm_promscrape_scrapes_total { status_code="%d"} ` , resp . StatusCode ) ) . Inc ( )
2022-08-21 23:13:44 +02:00
respBody , _ := io . ReadAll ( resp . Body )
2020-11-01 22:12:13 +01:00
_ = resp . Body . Close ( )
cancel ( )
return nil , fmt . Errorf ( "unexpected status code returned when scraping %q: %d; expecting %d; response body: %q" ,
c . scrapeURL , resp . StatusCode , http . StatusOK , respBody )
}
scrapesOK . Inc ( )
return & streamReader {
2021-05-27 13:52:44 +02:00
r : resp . Body ,
cancel : cancel ,
scrapeURL : c . scrapeURL ,
maxBodySize : int64 ( c . hc . MaxResponseBodySize ) ,
2020-11-01 22:12:13 +01:00
} , nil
}
2022-05-03 12:31:31 +02:00
// checks fasthttp status code for redirect as standard http/client does.
func isStatusRedirect ( statusCode int ) bool {
switch statusCode {
case 301 , 302 , 303 , 307 , 308 :
return true
}
return false
}
2020-02-23 12:35:47 +01:00
func ( c * client ) ReadData ( dst [ ] byte ) ( [ ] byte , error ) {
2020-07-03 20:33:17 +02:00
deadline := time . Now ( ) . Add ( c . hc . ReadTimeout )
2020-02-23 12:35:47 +01:00
req := fasthttp . AcquireRequest ( )
req . SetRequestURI ( c . requestURI )
2022-07-07 01:25:31 +02:00
req . Header . SetHost ( c . hostPort )
2020-07-08 18:47:08 +02:00
// The following `Accept` header has been copied from Prometheus sources.
// See https://github.com/prometheus/prometheus/blob/f9d21f10ecd2a343a381044f131ea4e46381ce09/scrape/scrape.go#L532 .
// This is needed as a workaround for scraping stupid Java-based servers such as Spring Boot.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/608 for details.
// Do not bloat the `Accept` header with OpenMetrics shit, since it looks like dead standard now.
req . Header . Set ( "Accept" , "text/plain;version=0.0.4;q=1,*/*;q=0.1" )
2021-04-05 11:15:07 +02:00
// Set X-Prometheus-Scrape-Timeout-Seconds like Prometheus does, since it is used by some exporters such as PushProx.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1179#issuecomment-813117162
req . Header . Set ( "X-Prometheus-Scrape-Timeout-Seconds" , c . scrapeTimeoutSecondsStr )
2023-10-17 11:58:19 +02:00
err := c . setFasthttpHeaders ( req )
if err != nil {
2023-10-25 21:24:01 +02:00
return nil , fmt . Errorf ( "failed to create request to %q: %w" , c . scrapeURL , err )
2023-10-17 11:58:19 +02:00
}
err = c . setFasthttpProxyHeaders ( req )
if err != nil {
2023-10-25 21:24:01 +02:00
return nil , fmt . Errorf ( "failed to create request to %q: %w" , c . scrapeURL , err )
2023-10-17 11:58:19 +02:00
}
2020-11-01 22:12:13 +01:00
if ! * disableCompression && ! c . disableCompression {
2020-02-23 12:35:47 +01:00
req . Header . Set ( "Accept-Encoding" , "gzip" )
}
2020-07-02 13:19:11 +02:00
if * disableKeepAlive || c . disableKeepAlive {
2020-07-01 01:19:58 +02:00
req . SetConnectionClose ( )
}
2020-02-23 12:35:47 +01:00
resp := fasthttp . AcquireResponse ( )
2020-09-18 11:31:16 +02:00
swapResponseBodies := len ( dst ) == 0
if swapResponseBodies {
// An optimization: write response directly to dst.
2023-03-03 11:02:13 +01:00
// This should reduce memory usage when scraping big targets.
2020-09-18 11:31:16 +02:00
dst = resp . SwapBody ( dst )
}
2023-02-22 17:05:16 +01:00
2023-02-24 21:11:44 +01:00
ctx , cancel := context . WithDeadline ( c . ctx , deadline )
defer cancel ( )
2023-10-17 11:58:19 +02:00
err = doRequestWithPossibleRetry ( ctx , c . hc , req , resp )
2020-04-28 01:13:02 +02:00
statusCode := resp . StatusCode ( )
2021-12-15 23:13:34 +01:00
redirectsCount := 0
2022-05-03 12:31:31 +02:00
for err == nil && isStatusRedirect ( statusCode ) {
2021-12-15 23:13:34 +01:00
if redirectsCount > 5 {
err = fmt . Errorf ( "too many redirects" )
break
}
2021-04-02 18:56:38 +02:00
if c . denyRedirects {
err = fmt . Errorf ( "cannot follow redirects if `follow_redirects: false` is set" )
2021-12-15 23:13:34 +01:00
break
}
// It is expected that the redirect is made on the same host.
// Otherwise it won't work.
location := resp . Header . Peek ( "Location" )
if len ( location ) == 0 {
err = fmt . Errorf ( "missing Location header" )
break
2020-04-28 01:13:02 +02:00
}
2021-12-15 23:13:34 +01:00
req . URI ( ) . UpdateBytes ( location )
2023-02-24 21:11:44 +01:00
err = doRequestWithPossibleRetry ( ctx , c . hc , req , resp )
2021-12-15 23:13:34 +01:00
statusCode = resp . StatusCode ( )
redirectsCount ++
2020-04-28 01:13:02 +02:00
}
2020-11-02 00:09:37 +01:00
if swapResponseBodies {
dst = resp . SwapBody ( dst )
}
2020-02-23 12:35:47 +01:00
fasthttp . ReleaseRequest ( req )
if err != nil {
fasthttp . ReleaseResponse ( resp )
if err == fasthttp . ErrTimeout {
scrapesTimedout . Inc ( )
2020-06-30 21:58:18 +02:00
return dst , fmt . Errorf ( "error when scraping %q with timeout %s: %w" , c . scrapeURL , c . hc . ReadTimeout , err )
2020-02-23 12:35:47 +01:00
}
2020-05-24 13:41:08 +02:00
if err == fasthttp . ErrBodyTooLarge {
2021-09-23 13:47:20 +02:00
maxScrapeSizeExceeded . Inc ( )
2020-05-24 13:41:08 +02:00
return dst , fmt . Errorf ( "the response from %q exceeds -promscrape.maxScrapeSize=%d; " +
2020-08-16 16:05:52 +02:00
"either reduce the response size for the target or increase -promscrape.maxScrapeSize" , c . scrapeURL , maxScrapeSize . N )
2020-05-24 13:41:08 +02:00
}
2020-06-30 21:58:18 +02:00
return dst , fmt . Errorf ( "error when scraping %q: %w" , c . scrapeURL , err )
2020-02-23 12:35:47 +01:00
}
if ce := resp . Header . Peek ( "Content-Encoding" ) ; string ( ce ) == "gzip" {
var err error
2020-09-18 11:31:16 +02:00
if swapResponseBodies {
2020-11-26 17:08:39 +01:00
zb := gunzipBufPool . Get ( )
zb . B , err = fasthttp . AppendGunzipBytes ( zb . B [ : 0 ] , dst )
dst = append ( dst [ : 0 ] , zb . B ... )
gunzipBufPool . Put ( zb )
2020-09-18 11:31:16 +02:00
} else {
2020-11-26 17:08:39 +01:00
dst , err = fasthttp . AppendGunzipBytes ( dst , resp . Body ( ) )
2020-09-18 11:31:16 +02:00
}
2020-02-23 12:35:47 +01:00
if err != nil {
fasthttp . ReleaseResponse ( resp )
scrapesGunzipFailed . Inc ( )
2020-06-30 21:58:18 +02:00
return dst , fmt . Errorf ( "cannot ungzip response from %q: %w" , c . scrapeURL , err )
2020-02-23 12:35:47 +01:00
}
scrapesGunzipped . Inc ( )
2020-09-18 11:31:16 +02:00
} else if ! swapResponseBodies {
2020-02-23 12:35:47 +01:00
dst = append ( dst , resp . Body ( ) ... )
}
2022-12-28 23:42:07 +01:00
fasthttp . ReleaseResponse ( resp )
2022-12-28 21:19:41 +01:00
if len ( dst ) > c . hc . MaxResponseBodySize {
maxScrapeSizeExceeded . Inc ( )
2022-12-28 23:42:07 +01:00
return dst , fmt . Errorf ( "the response from %q exceeds -promscrape.maxScrapeSize=%d (the actual response size is %d bytes); " +
2023-01-24 06:04:50 +01:00
"either reduce the response size for the target or increase -promscrape.maxScrapeSize" , c . scrapeURL , maxScrapeSize . N , len ( dst ) )
2022-12-28 21:19:41 +01:00
}
2020-02-23 12:35:47 +01:00
if statusCode != fasthttp . StatusOK {
metrics . GetOrCreateCounter ( fmt . Sprintf ( ` vm_promscrape_scrapes_total { status_code="%d"} ` , statusCode ) ) . Inc ( )
return dst , fmt . Errorf ( "unexpected status code returned when scraping %q: %d; expecting %d; response body: %q" ,
2020-10-29 15:17:52 +01:00
c . scrapeURL , statusCode , fasthttp . StatusOK , dst )
2020-02-23 12:35:47 +01:00
}
scrapesOK . Inc ( )
return dst , nil
}
2020-11-26 17:08:39 +01:00
var gunzipBufPool bytesutil . ByteBufferPool
2020-02-23 12:35:47 +01:00
var (
2021-09-23 13:47:20 +02:00
maxScrapeSizeExceeded = metrics . NewCounter ( ` vm_promscrape_max_scrape_size_exceeded_errors_total ` )
scrapesTimedout = metrics . NewCounter ( ` vm_promscrape_scrapes_timed_out_total ` )
scrapesOK = metrics . NewCounter ( ` vm_promscrape_scrapes_total { status_code="200"} ` )
scrapesGunzipped = metrics . NewCounter ( ` vm_promscrape_scrapes_gunziped_total ` )
scrapesGunzipFailed = metrics . NewCounter ( ` vm_promscrape_scrapes_gunzip_failed_total ` )
2022-08-16 13:52:38 +02:00
scrapeRequests = metrics . NewCounter ( ` vm_promscrape_scrape_requests_total ` )
2021-09-23 13:47:20 +02:00
scrapeRetries = metrics . NewCounter ( ` vm_promscrape_scrape_retries_total ` )
2020-02-23 12:35:47 +01:00
)
2020-04-16 22:24:33 +02:00
2023-02-24 21:11:44 +01:00
func doRequestWithPossibleRetry ( ctx context . Context , hc * fasthttp . HostClient , req * fasthttp . Request , resp * fasthttp . Response ) error {
2023-01-06 04:34:47 +01:00
scrapeRequests . Inc ( )
2023-02-24 20:39:56 +01:00
var reqErr error
// Return true if the request execution is completed and retry is not required
attempt := func ( ) bool {
2023-02-22 17:05:16 +01:00
// Use DoCtx instead of Do in order to support context cancellation
2023-02-24 21:11:44 +01:00
reqErr = hc . DoCtx ( ctx , req , resp )
2023-02-24 20:39:56 +01:00
if reqErr == nil {
2023-01-06 04:34:47 +01:00
statusCode := resp . StatusCode ( )
if statusCode != fasthttp . StatusTooManyRequests {
2023-02-24 20:39:56 +01:00
return true
2023-01-06 04:34:47 +01:00
}
2023-02-24 20:39:56 +01:00
} else if reqErr != fasthttp . ErrConnectionClosed && ! strings . Contains ( reqErr . Error ( ) , "broken pipe" ) {
return true
}
return false
}
if attempt ( ) {
return reqErr
}
2023-02-24 21:11:44 +01:00
// The first attempt was unsuccessful. Use exponential backoff for further attempts.
// Perform the second attempt immediately after the first attempt - this should help
// in cases when the remote side closes the keep-alive connection before the first attempt.
// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/3293
2023-02-24 20:39:56 +01:00
sleepTime := time . Second
2023-02-24 21:11:44 +01:00
// It is expected that the deadline is already set to ctx, so the loop below
// should eventually finish if all the attempt() calls are unsuccessful.
2023-02-24 20:39:56 +01:00
for {
scrapeRetries . Inc ( )
if attempt ( ) {
return reqErr
2023-01-06 04:34:47 +01:00
}
sleepTime += sleepTime
2023-02-24 21:11:44 +01:00
if ! discoveryutils . SleepCtx ( ctx , sleepTime ) {
return reqErr
2023-01-06 04:34:47 +01:00
}
}
2020-04-16 22:24:33 +02:00
}
2020-11-01 22:12:13 +01:00
type streamReader struct {
2021-05-27 13:52:44 +02:00
r io . ReadCloser
cancel context . CancelFunc
bytesRead int64
scrapeURL string
maxBodySize int64
2020-11-01 22:12:13 +01:00
}
func ( sr * streamReader ) Read ( p [ ] byte ) ( int , error ) {
n , err := sr . r . Read ( p )
sr . bytesRead += int64 ( n )
2021-05-27 14:03:30 +02:00
if err == nil && sr . bytesRead > sr . maxBodySize {
2021-09-23 13:47:20 +02:00
maxScrapeSizeExceeded . Inc ( )
2021-05-27 14:03:30 +02:00
err = fmt . Errorf ( "the response from %q exceeds -promscrape.maxScrapeSize=%d; " +
2021-05-27 13:52:44 +02:00
"either reduce the response size for the target or increase -promscrape.maxScrapeSize" , sr . scrapeURL , sr . maxBodySize )
}
2020-11-01 22:12:13 +01:00
return n , err
}
func ( sr * streamReader ) MustClose ( ) {
sr . cancel ( )
if err := sr . r . Close ( ) ; err != nil {
logger . Errorf ( "cannot close reader: %s" , err )
}
}