mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-12-15 00:13:30 +01:00
all: show timeRange in traces in human-readable format instead of timestamps in milliseconds
This commit is contained in:
parent
4b41a05ca7
commit
6386f117c8
@ -178,6 +178,12 @@ func (ec *EvalConfig) mayCache() bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ec *EvalConfig) timeRangeString() string {
|
||||||
|
start := storage.TimestampToHumanReadableFormat(ec.Start)
|
||||||
|
end := storage.TimestampToHumanReadableFormat(ec.End)
|
||||||
|
return fmt.Sprintf("[%s..%s]", start, end)
|
||||||
|
}
|
||||||
|
|
||||||
func (ec *EvalConfig) getSharedTimestamps() []int64 {
|
func (ec *EvalConfig) getSharedTimestamps() []int64 {
|
||||||
ec.timestampsOnce.Do(ec.timestampsInit)
|
ec.timestampsOnce.Do(ec.timestampsInit)
|
||||||
return ec.timestamps
|
return ec.timestamps
|
||||||
@ -213,7 +219,7 @@ func evalExpr(qt *querytracer.Tracer, ec *EvalConfig, e metricsql.Expr) ([]*time
|
|||||||
if qt.Enabled() {
|
if qt.Enabled() {
|
||||||
query := e.AppendString(nil)
|
query := e.AppendString(nil)
|
||||||
mayCache := ec.mayCache()
|
mayCache := ec.mayCache()
|
||||||
qt = qt.NewChild("eval: query=%s, timeRange=[%d..%d], step=%d, mayCache=%v", query, ec.Start, ec.End, ec.Step, mayCache)
|
qt = qt.NewChild("eval: query=%s, timeRange=%s, step=%d, mayCache=%v", query, ec.timeRangeString(), ec.Step, mayCache)
|
||||||
}
|
}
|
||||||
rv, err := evalExprInternal(qt, ec, e)
|
rv, err := evalExprInternal(qt, ec, e)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -875,10 +881,12 @@ func evalRollupFuncWithMetricExpr(qt *querytracer.Tracer, ec *EvalConfig, funcNa
|
|||||||
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, windowExpr *metricsql.DurationExpr) ([]*timeseries, error) {
|
expr metricsql.Expr, me *metricsql.MetricExpr, iafc *incrementalAggrFuncContext, windowExpr *metricsql.DurationExpr) ([]*timeseries, error) {
|
||||||
var rollupMemorySize int64
|
var rollupMemorySize int64
|
||||||
window := windowExpr.Duration(ec.Step)
|
window := windowExpr.Duration(ec.Step)
|
||||||
qt = qt.NewChild("rollup %s(): timeRange=[%d..%d], step=%d, window=%d", funcName, ec.Start, ec.End, ec.Step, window)
|
if qt.Enabled() {
|
||||||
|
qt = qt.NewChild("rollup %s(): timeRange=%s, step=%d, window=%d", funcName, ec.timeRangeString(), ec.Step, window)
|
||||||
defer func() {
|
defer func() {
|
||||||
qt.Donef("neededMemoryBytes=%d", rollupMemorySize)
|
qt.Donef("neededMemoryBytes=%d", rollupMemorySize)
|
||||||
}()
|
}()
|
||||||
|
}
|
||||||
if me.IsEmpty() {
|
if me.IsEmpty() {
|
||||||
return evalNumber(ec, nan), nil
|
return evalNumber(ec, nan), nil
|
||||||
}
|
}
|
||||||
|
@ -156,7 +156,7 @@ func ResetRollupResultCache() {
|
|||||||
func (rrc *rollupResultCache) Get(qt *querytracer.Tracer, ec *EvalConfig, expr metricsql.Expr, window int64) (tss []*timeseries, newStart int64) {
|
func (rrc *rollupResultCache) Get(qt *querytracer.Tracer, ec *EvalConfig, expr metricsql.Expr, window int64) (tss []*timeseries, newStart int64) {
|
||||||
if qt.Enabled() {
|
if qt.Enabled() {
|
||||||
query := expr.AppendString(nil)
|
query := expr.AppendString(nil)
|
||||||
qt = qt.NewChild("rollup cache get: query=%s, timeRange=[%d..%d], step=%d, window=%d", query, ec.Start, ec.End, ec.Step, window)
|
qt = qt.NewChild("rollup cache get: query=%s, timeRange=%s, step=%d, window=%d", query, ec.timeRangeString(), ec.Step, window)
|
||||||
defer qt.Done()
|
defer qt.Done()
|
||||||
}
|
}
|
||||||
if !ec.mayCache() {
|
if !ec.mayCache() {
|
||||||
@ -243,7 +243,11 @@ func (rrc *rollupResultCache) Get(qt *querytracer.Tracer, ec *EvalConfig, expr m
|
|||||||
|
|
||||||
timestamps = tss[0].Timestamps
|
timestamps = tss[0].Timestamps
|
||||||
newStart = timestamps[len(timestamps)-1] + ec.Step
|
newStart = timestamps[len(timestamps)-1] + ec.Step
|
||||||
qt.Printf("return %d series on a timeRange=[%d..%d]", len(tss), ec.Start, newStart-ec.Step)
|
if qt.Enabled() {
|
||||||
|
startString := storage.TimestampToHumanReadableFormat(ec.Start)
|
||||||
|
endString := storage.TimestampToHumanReadableFormat(newStart - ec.Step)
|
||||||
|
qt.Printf("return %d series on a timeRange=[%s..%s]", len(tss), startString, endString)
|
||||||
|
}
|
||||||
return tss, newStart
|
return tss, newStart
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,7 +256,7 @@ var resultBufPool bytesutil.ByteBufferPool
|
|||||||
func (rrc *rollupResultCache) Put(qt *querytracer.Tracer, ec *EvalConfig, expr metricsql.Expr, window int64, tss []*timeseries) {
|
func (rrc *rollupResultCache) Put(qt *querytracer.Tracer, ec *EvalConfig, expr metricsql.Expr, window int64, tss []*timeseries) {
|
||||||
if qt.Enabled() {
|
if qt.Enabled() {
|
||||||
query := expr.AppendString(nil)
|
query := expr.AppendString(nil)
|
||||||
qt = qt.NewChild("rollup cache put: query=%s, timeRange=[%d..%d], step=%d, window=%d, series=%d", query, ec.Start, ec.End, ec.Step, window, len(tss))
|
qt = qt.NewChild("rollup cache put: query=%s, timeRange=%s, step=%d, window=%d, series=%d", query, ec.timeRangeString(), ec.Step, window, len(tss))
|
||||||
defer qt.Done()
|
defer qt.Done()
|
||||||
}
|
}
|
||||||
if len(tss) == 0 || !ec.mayCache() {
|
if len(tss) == 0 || !ec.mayCache() {
|
||||||
@ -302,7 +306,11 @@ func (rrc *rollupResultCache) Put(qt *querytracer.Tracer, ec *EvalConfig, expr m
|
|||||||
start := timestamps[0]
|
start := timestamps[0]
|
||||||
end := timestamps[len(timestamps)-1]
|
end := timestamps[len(timestamps)-1]
|
||||||
if mi.CoversTimeRange(start, end) {
|
if mi.CoversTimeRange(start, end) {
|
||||||
qt.Printf("series on the given timeRange=[%d..%d] already exist in the cache", start, end)
|
if qt.Enabled() {
|
||||||
|
startString := storage.TimestampToHumanReadableFormat(start)
|
||||||
|
endString := storage.TimestampToHumanReadableFormat(end)
|
||||||
|
qt.Printf("series on the given timeRange=[%s..%s] already exist in the cache", startString, endString)
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -315,7 +323,11 @@ func (rrc *rollupResultCache) Put(qt *querytracer.Tracer, ec *EvalConfig, expr m
|
|||||||
qt.Printf("cannot store series in the cache, since they would occupy more than %d bytes", maxMarshaledSize)
|
qt.Printf("cannot store series in the cache, since they would occupy more than %d bytes", maxMarshaledSize)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
qt.Printf("marshal %d series on a timeRange=[%d..%d] into %d bytes", len(tss), start, end, len(resultBuf.B))
|
if qt.Enabled() {
|
||||||
|
startString := storage.TimestampToHumanReadableFormat(start)
|
||||||
|
endString := storage.TimestampToHumanReadableFormat(end)
|
||||||
|
qt.Printf("marshal %d series on a timeRange=[%s..%s] into %d bytes", len(tss), startString, endString, len(resultBuf.B))
|
||||||
|
}
|
||||||
compressedResultBuf := resultBufPool.Get()
|
compressedResultBuf := resultBufPool.Get()
|
||||||
defer resultBufPool.Put(compressedResultBuf)
|
defer resultBufPool.Put(compressedResultBuf)
|
||||||
compressedResultBuf.B = encoding.CompressZSTDLevel(compressedResultBuf.B[:0], resultBuf.B, 1)
|
compressedResultBuf.B = encoding.CompressZSTDLevel(compressedResultBuf.B[:0], resultBuf.B, 1)
|
||||||
|
@ -32,6 +32,8 @@ scrape_configs:
|
|||||||
- targets: ["host123:8080"]
|
- targets: ["host123:8080"]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* FEATURE: [query tracing](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#query-tracing): show timestamps in query traces in human-readable format (aka `RFC3339` in UTC timezone) instead of milliseconds since Unix epoch. For example, `2022-06-27T10:32:54.506Z` instead of `1656325974506`.
|
||||||
|
|
||||||
* BUGFIX: limit max memory occupied by the cache, which stores parsed regular expressions. Previously too long regular expressions passed in [MetricsQL queries](https://docs.victoriametrics.com/MetricsQL.html) could result in big amounts of used memory (e.g. multiple of gigabytes). Now the max cache size for parsed regexps is limited to a a few megabytes.
|
* BUGFIX: limit max memory occupied by the cache, which stores parsed regular expressions. Previously too long regular expressions passed in [MetricsQL queries](https://docs.victoriametrics.com/MetricsQL.html) could result in big amounts of used memory (e.g. multiple of gigabytes). Now the max cache size for parsed regexps is limited to a a few megabytes.
|
||||||
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): make sure that [stale markers](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) are generated with the actual timestamp when unsuccessful scrape occurs. This should prevent from possible time series overlap on scrape target restart in dynmaic envirnoments such as Kubernetes.
|
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): make sure that [stale markers](https://docs.victoriametrics.com/vmagent.html#prometheus-staleness-markers) are generated with the actual timestamp when unsuccessful scrape occurs. This should prevent from possible time series overlap on scrape target restart in dynmaic envirnoments such as Kubernetes.
|
||||||
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): assume that the response is complete if `-search.denyPartialResponse` is enabled and up to `-replicationFactor - 1` `vmstorage` nodes are unavailable. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1767).
|
* BUGFIX: [VictoriaMetrics cluster](https://docs.victoriametrics.com/Cluster-VictoriaMetrics.html): assume that the response is complete if `-search.denyPartialResponse` is enabled and up to `-replicationFactor - 1` `vmstorage` nodes are unavailable. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1767).
|
||||||
|
@ -383,7 +383,9 @@ func (sq *SearchQuery) String() string {
|
|||||||
for i, tfs := range sq.TagFilterss {
|
for i, tfs := range sq.TagFilterss {
|
||||||
a[i] = tagFiltersToString(tfs)
|
a[i] = tagFiltersToString(tfs)
|
||||||
}
|
}
|
||||||
return fmt.Sprintf("accountID=%d, projectID=%d, filters=%s, timeRange=[%d..%d]", sq.AccountID, sq.ProjectID, a, sq.MinTimestamp, sq.MaxTimestamp)
|
start := TimestampToHumanReadableFormat(sq.MinTimestamp)
|
||||||
|
end := TimestampToHumanReadableFormat(sq.MaxTimestamp)
|
||||||
|
return fmt.Sprintf("accountID=%d, projectID=%d, filters=%s, timeRange=[%s..%s]", sq.AccountID, sq.ProjectID, a, start, end)
|
||||||
}
|
}
|
||||||
|
|
||||||
func tagFiltersToString(tfs []TagFilter) string {
|
func tagFiltersToString(tfs []TagFilter) string {
|
||||||
|
@ -26,7 +26,15 @@ type TimeRange struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (tr *TimeRange) String() string {
|
func (tr *TimeRange) String() string {
|
||||||
return fmt.Sprintf("[%d..%d]", tr.MinTimestamp, tr.MaxTimestamp)
|
start := TimestampToHumanReadableFormat(tr.MinTimestamp)
|
||||||
|
end := TimestampToHumanReadableFormat(tr.MaxTimestamp)
|
||||||
|
return fmt.Sprintf("[%s..%s]", start, end)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampToHumanReadableFormat converts the given timestamp to human-readable format.
|
||||||
|
func TimestampToHumanReadableFormat(timestamp int64) string {
|
||||||
|
t := timestampToTime(timestamp).UTC()
|
||||||
|
return t.Format("2006-01-02T15:04:05.999Z")
|
||||||
}
|
}
|
||||||
|
|
||||||
// timestampToPartitionName returns partition name for the given timestamp.
|
// timestampToPartitionName returns partition name for the given timestamp.
|
||||||
|
Loading…
Reference in New Issue
Block a user