mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2025-01-05 22:32:20 +01:00
lib/promscrape: show real timestamp and real duration for the scape on /targets
page
Previously the scrape duration may be negative when calculated scrape timestamp drifts away from the real scrape timestamp
This commit is contained in:
parent
43830b1699
commit
e3999ac010
@ -3,6 +3,7 @@ package promscrape
|
|||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -162,7 +163,7 @@ func (sw *scrapeWork) run(stopCh <-chan struct{}) {
|
|||||||
case <-timer.C:
|
case <-timer.C:
|
||||||
ticker = time.NewTicker(scrapeInterval)
|
ticker = time.NewTicker(scrapeInterval)
|
||||||
timestamp = time.Now().UnixNano() / 1e6
|
timestamp = time.Now().UnixNano() / 1e6
|
||||||
sw.scrapeAndLogError(timestamp)
|
sw.scrapeAndLogError(timestamp, timestamp)
|
||||||
}
|
}
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
for {
|
for {
|
||||||
@ -170,13 +171,13 @@ func (sw *scrapeWork) run(stopCh <-chan struct{}) {
|
|||||||
select {
|
select {
|
||||||
case <-stopCh:
|
case <-stopCh:
|
||||||
return
|
return
|
||||||
case <-ticker.C:
|
case tt := <-ticker.C:
|
||||||
t := time.Now().UnixNano() / 1e6
|
t := tt.UnixNano() / 1e6
|
||||||
if d := t - timestamp; d > 0 && float64(d)/float64(scrapeInterval.Milliseconds()) > 0.1 {
|
if d := math.Abs(float64(t - timestamp)); d > 0 && d/float64(scrapeInterval.Milliseconds()) > 0.1 {
|
||||||
// Too big jitter. Adjust timestamp
|
// Too big jitter. Adjust timestamp
|
||||||
timestamp = t
|
timestamp = t
|
||||||
}
|
}
|
||||||
sw.scrapeAndLogError(timestamp)
|
sw.scrapeAndLogError(timestamp, t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -187,8 +188,8 @@ func (sw *scrapeWork) logError(s string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sw *scrapeWork) scrapeAndLogError(timestamp int64) {
|
func (sw *scrapeWork) scrapeAndLogError(scrapeTimestamp, realTimestamp int64) {
|
||||||
if err := sw.scrapeInternal(timestamp); err != nil && !*suppressScrapeErrors {
|
if err := sw.scrapeInternal(scrapeTimestamp, realTimestamp); err != nil && !*suppressScrapeErrors {
|
||||||
logger.Errorf("error when scraping %q from job %q with labels %s: %s", sw.Config.ScrapeURL, sw.Config.Job(), sw.Config.LabelsString(), err)
|
logger.Errorf("error when scraping %q from job %q with labels %s: %s", sw.Config.ScrapeURL, sw.Config.Job(), sw.Config.LabelsString(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -202,11 +203,11 @@ var (
|
|||||||
pushDataDuration = metrics.NewHistogram("vm_promscrape_push_data_duration_seconds")
|
pushDataDuration = metrics.NewHistogram("vm_promscrape_push_data_duration_seconds")
|
||||||
)
|
)
|
||||||
|
|
||||||
func (sw *scrapeWork) scrapeInternal(timestamp int64) error {
|
func (sw *scrapeWork) scrapeInternal(scrapeTimestamp, realTimestamp int64) error {
|
||||||
var err error
|
var err error
|
||||||
sw.bodyBuf, err = sw.ReadData(sw.bodyBuf[:0])
|
sw.bodyBuf, err = sw.ReadData(sw.bodyBuf[:0])
|
||||||
endTimestamp := time.Now().UnixNano() / 1e6
|
endTimestamp := time.Now().UnixNano() / 1e6
|
||||||
duration := float64(endTimestamp-timestamp) / 1e3
|
duration := float64(endTimestamp-realTimestamp) / 1e3
|
||||||
scrapeDuration.Update(duration)
|
scrapeDuration.Update(duration)
|
||||||
scrapeResponseSize.Update(float64(len(sw.bodyBuf)))
|
scrapeResponseSize.Update(float64(len(sw.bodyBuf)))
|
||||||
up := 1
|
up := 1
|
||||||
@ -221,7 +222,7 @@ func (sw *scrapeWork) scrapeInternal(timestamp int64) error {
|
|||||||
samplesScraped := len(srcRows)
|
samplesScraped := len(srcRows)
|
||||||
scrapedSamples.Update(float64(samplesScraped))
|
scrapedSamples.Update(float64(samplesScraped))
|
||||||
for i := range srcRows {
|
for i := range srcRows {
|
||||||
sw.addRowToTimeseries(&srcRows[i], timestamp, true)
|
sw.addRowToTimeseries(&srcRows[i], scrapeTimestamp, true)
|
||||||
}
|
}
|
||||||
sw.rows.Reset()
|
sw.rows.Reset()
|
||||||
if sw.Config.SampleLimit > 0 && len(sw.writeRequest.Timeseries) > sw.Config.SampleLimit {
|
if sw.Config.SampleLimit > 0 && len(sw.writeRequest.Timeseries) > sw.Config.SampleLimit {
|
||||||
@ -231,18 +232,18 @@ func (sw *scrapeWork) scrapeInternal(timestamp int64) error {
|
|||||||
}
|
}
|
||||||
samplesPostRelabeling := len(sw.writeRequest.Timeseries)
|
samplesPostRelabeling := len(sw.writeRequest.Timeseries)
|
||||||
seriesAdded := sw.getSeriesAdded()
|
seriesAdded := sw.getSeriesAdded()
|
||||||
sw.addAutoTimeseries("up", float64(up), timestamp)
|
sw.addAutoTimeseries("up", float64(up), scrapeTimestamp)
|
||||||
sw.addAutoTimeseries("scrape_duration_seconds", duration, timestamp)
|
sw.addAutoTimeseries("scrape_duration_seconds", duration, scrapeTimestamp)
|
||||||
sw.addAutoTimeseries("scrape_samples_scraped", float64(samplesScraped), timestamp)
|
sw.addAutoTimeseries("scrape_samples_scraped", float64(samplesScraped), scrapeTimestamp)
|
||||||
sw.addAutoTimeseries("scrape_samples_post_metric_relabeling", float64(samplesPostRelabeling), timestamp)
|
sw.addAutoTimeseries("scrape_samples_post_metric_relabeling", float64(samplesPostRelabeling), scrapeTimestamp)
|
||||||
sw.addAutoTimeseries("scrape_series_added", float64(seriesAdded), timestamp)
|
sw.addAutoTimeseries("scrape_series_added", float64(seriesAdded), scrapeTimestamp)
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
sw.PushData(&sw.writeRequest)
|
sw.PushData(&sw.writeRequest)
|
||||||
pushDataDuration.UpdateDuration(startTime)
|
pushDataDuration.UpdateDuration(startTime)
|
||||||
prompbmarshal.ResetWriteRequest(&sw.writeRequest)
|
prompbmarshal.ResetWriteRequest(&sw.writeRequest)
|
||||||
sw.labels = sw.labels[:0]
|
sw.labels = sw.labels[:0]
|
||||||
sw.samples = sw.samples[:0]
|
sw.samples = sw.samples[:0]
|
||||||
tsmGlobal.Update(&sw.Config, sw.ScrapeGroup, up == 1, timestamp, int64(duration*1000), err)
|
tsmGlobal.Update(&sw.Config, sw.ScrapeGroup, up == 1, realTimestamp, int64(duration*1000), err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ func TestScrapeWorkScrapeInternalFailure(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
timestamp := int64(123)
|
timestamp := int64(123)
|
||||||
if err := sw.scrapeInternal(timestamp); err == nil {
|
if err := sw.scrapeInternal(timestamp, timestamp); err == nil {
|
||||||
t.Fatalf("expecting non-nil error")
|
t.Fatalf("expecting non-nil error")
|
||||||
}
|
}
|
||||||
if pushDataErr != nil {
|
if pushDataErr != nil {
|
||||||
@ -79,7 +79,7 @@ func TestScrapeWorkScrapeInternalSuccess(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
timestamp := int64(123)
|
timestamp := int64(123)
|
||||||
if err := sw.scrapeInternal(timestamp); err != nil {
|
if err := sw.scrapeInternal(timestamp, timestamp); err != nil {
|
||||||
t.Fatalf("unexpected error: %s", err)
|
t.Fatalf("unexpected error: %s", err)
|
||||||
}
|
}
|
||||||
if pushDataErr != nil {
|
if pushDataErr != nil {
|
||||||
|
@ -41,7 +41,7 @@ vm_tcplistener_write_calls_total{name="https", addr=":443"} 132356
|
|||||||
sw.PushData = func(wr *prompbmarshal.WriteRequest) {}
|
sw.PushData = func(wr *prompbmarshal.WriteRequest) {}
|
||||||
timestamp := int64(0)
|
timestamp := int64(0)
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
if err := sw.scrapeInternal(timestamp); err != nil {
|
if err := sw.scrapeInternal(timestamp, timestamp); err != nil {
|
||||||
panic(fmt.Errorf("unexpected error: %w", err))
|
panic(fmt.Errorf("unexpected error: %w", err))
|
||||||
}
|
}
|
||||||
timestamp++
|
timestamp++
|
||||||
|
Loading…
Reference in New Issue
Block a user