vendor: update github.com/VictoriaMetrics/metrics from v1.13.1 to v1.14.0

The new version switches from log-linear histograms to log-based histograms,
which provide up to 3.6 times better accuracy.
This commit is contained in:
Aliaksandr Valialkin 2021-02-15 14:32:57 +02:00
parent 9f5ac603a7
commit 4e39bf148c
6 changed files with 161 additions and 168 deletions

View File

@ -3669,8 +3669,9 @@ func TestExecSuccess(t *testing.T) {
t.Run(`histogram(scalar)`, func(t *testing.T) {
t.Parallel()
q := `sort(histogram(123)+(
label_set(0, "le", "1.0e2"),
label_set(0, "le", "1.5e2"),
label_set(0, "le", "1.000e+02"),
label_set(0, "le", "1.136e+02"),
label_set(0, "le", "1.292e+02"),
label_set(1, "le", "+Inf"),
))`
r1 := netstorage.Result{
@ -3681,7 +3682,7 @@ func TestExecSuccess(t *testing.T) {
r1.MetricName.Tags = []storage.Tag{
{
Key: []byte("le"),
Value: []byte("1.0e2"),
Value: []byte("1.136e+02"),
},
}
r2 := netstorage.Result{
@ -3692,7 +3693,7 @@ func TestExecSuccess(t *testing.T) {
r2.MetricName.Tags = []storage.Tag{
{
Key: []byte("le"),
Value: []byte("1.5e2"),
Value: []byte("1.292e+02"),
},
}
r3 := netstorage.Result{
@ -3716,9 +3717,9 @@ func TestExecSuccess(t *testing.T) {
label_set(1.1, "xx", "yy"),
alias(1.15, "foobar"),
))+(
label_set(0, "le", "9.5e-1"),
label_set(0, "le", "1.0e0"),
label_set(0, "le", "1.5e0"),
label_set(0, "le", "8.799e-01"),
label_set(0, "le", "1.000e+00"),
label_set(0, "le", "1.292e+00"),
label_set(1, "le", "+Inf"),
))`
r1 := netstorage.Result{
@ -3729,7 +3730,7 @@ func TestExecSuccess(t *testing.T) {
r1.MetricName.Tags = []storage.Tag{
{
Key: []byte("le"),
Value: []byte("9.5e-1"),
Value: []byte("8.799e-01"),
},
}
r2 := netstorage.Result{
@ -3740,7 +3741,7 @@ func TestExecSuccess(t *testing.T) {
r2.MetricName.Tags = []storage.Tag{
{
Key: []byte("le"),
Value: []byte("1.0e0"),
Value: []byte("1.000e+00"),
},
}
r3 := netstorage.Result{
@ -3751,7 +3752,7 @@ func TestExecSuccess(t *testing.T) {
r3.MetricName.Tags = []storage.Tag{
{
Key: []byte("le"),
Value: []byte("1.5e0"),
Value: []byte("1.292e+00"),
},
}
r4 := netstorage.Result{
@ -4021,10 +4022,10 @@ func TestExecSuccess(t *testing.T) {
})
t.Run(`histogram_over_time`, func(t *testing.T) {
t.Parallel()
q := `sort(histogram_over_time(alias(label_set(rand(0)*1.3+1.1, "foo", "bar"), "xxx")[200s:5s]))`
q := `sort_by_label(histogram_over_time(alias(label_set(rand(0)*1.3+1.1, "foo", "bar"), "xxx")[200s:5s]), "vmrange")`
r1 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{14, 16, 12, 13, 15, 11},
Values: []float64{1, 2, 2, 2, nan, 1},
Timestamps: timestampsExpected,
}
r1.MetricName.Tags = []storage.Tag{
@ -4034,12 +4035,12 @@ func TestExecSuccess(t *testing.T) {
},
{
Key: []byte("vmrange"),
Value: []byte("2.0e0...2.5e0"),
Value: []byte("1.000e+00...1.136e+00"),
},
}
r2 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{13, 14, 12, 8, 12, 13},
Values: []float64{3, 3, 4, 2, 8, 3},
Timestamps: timestampsExpected,
}
r2.MetricName.Tags = []storage.Tag{
@ -4049,12 +4050,12 @@ func TestExecSuccess(t *testing.T) {
},
{
Key: []byte("vmrange"),
Value: []byte("1.0e0...1.5e0"),
Value: []byte("1.136e+00...1.292e+00"),
},
}
r3 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{13, 10, 16, 19, 13, 16},
Values: []float64{7, 7, 5, 3, 3, 9},
Timestamps: timestampsExpected,
}
r3.MetricName.Tags = []storage.Tag{
@ -4064,46 +4065,111 @@ func TestExecSuccess(t *testing.T) {
},
{
Key: []byte("vmrange"),
Value: []byte("1.5e0...2.0e0"),
Value: []byte("1.292e+00...1.468e+00"),
},
}
resultExpected := []netstorage.Result{r1, r2, r3}
r4 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{7, 4, 6, 5, 6, 4},
Timestamps: timestampsExpected,
}
r4.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("bar"),
},
{
Key: []byte("vmrange"),
Value: []byte("1.468e+00...1.668e+00"),
},
}
r5 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{6, 6, 9, 13, 7, 7},
Timestamps: timestampsExpected,
}
r5.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("bar"),
},
{
Key: []byte("vmrange"),
Value: []byte("1.668e+00...1.896e+00"),
},
}
r6 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{5, 9, 4, 6, 7, 9},
Timestamps: timestampsExpected,
}
r6.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("bar"),
},
{
Key: []byte("vmrange"),
Value: []byte("1.896e+00...2.154e+00"),
},
}
r7 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{11, 9, 10, 9, 9, 7},
Timestamps: timestampsExpected,
}
r7.MetricName.Tags = []storage.Tag{
{
Key: []byte("foo"),
Value: []byte("bar"),
},
{
Key: []byte("vmrange"),
Value: []byte("2.154e+00...2.448e+00"),
},
}
resultExpected := []netstorage.Result{r1, r2, r3, r4, r5, r6, r7}
f(q, resultExpected)
})
t.Run(`sum(histogram_over_time) by (vmrange)`, func(t *testing.T) {
t.Parallel()
q := `sort(sum(histogram_over_time(alias(label_set(rand(0)*1.3+1.1, "foo", "bar"), "xxx")[200s:5s])) by (vmrange))`
q := `sort_desc(
buckets_limit(
3,
sum(histogram_over_time(alias(label_set(rand(0)*1.3+1.1, "foo", "bar"), "xxx")[200s:5s])) by (vmrange)
)
)`
r1 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{14, 16, 12, 13, 15, 11},
Values: []float64{40, 40, 40, 40, 40, 40},
Timestamps: timestampsExpected,
}
r1.MetricName.Tags = []storage.Tag{
{
Key: []byte("vmrange"),
Value: []byte("2.0e0...2.5e0"),
Key: []byte("le"),
Value: []byte("+Inf"),
},
}
r2 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{13, 14, 12, 8, 12, 13},
Values: []float64{24, 22, 26, 25, 24, 24},
Timestamps: timestampsExpected,
}
r2.MetricName.Tags = []storage.Tag{
{
Key: []byte("vmrange"),
Value: []byte("1.0e0...1.5e0"),
Key: []byte("le"),
Value: []byte("1.896e+00"),
},
}
r3 := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{13, 10, 16, 19, 13, 16},
Values: []float64{11, 12, 11, 7, 11, 13},
Timestamps: timestampsExpected,
}
r3.MetricName.Tags = []storage.Tag{
{
Key: []byte("vmrange"),
Value: []byte("1.5e0...2.0e0"),
Key: []byte("le"),
Value: []byte("1.468e+00"),
},
}
resultExpected := []netstorage.Result{r1, r2, r3}
@ -4125,7 +4191,7 @@ func TestExecSuccess(t *testing.T) {
q := `topk_max(1, histogram_over_time(alias(label_set(rand(0)*1.3+1.1, "foo", "bar"), "xxx")[200s:5s]))`
r := netstorage.Result{
MetricName: metricNameExpected,
Values: []float64{13, 10, 16, 19, 13, 16},
Values: []float64{6, 6, 9, 13, 7, 7},
Timestamps: timestampsExpected,
}
r.MetricName.Tags = []storage.Tag{
@ -4135,7 +4201,7 @@ func TestExecSuccess(t *testing.T) {
},
{
Key: []byte("vmrange"),
Value: []byte("1.5e0...2.0e0"),
Value: []byte("1.668e+00...1.896e+00"),
},
}
resultExpected := []netstorage.Result{r}

2
go.mod
View File

@ -8,7 +8,7 @@ require (
// Do not use the original github.com/valyala/fasthttp because of issues
// like https://github.com/valyala/fasthttp/commit/996610f021ff45fdc98c2ce7884d5fa4e7f9199b
github.com/VictoriaMetrics/fasthttp v1.0.12
github.com/VictoriaMetrics/metrics v1.13.1
github.com/VictoriaMetrics/metrics v1.14.0
github.com/VictoriaMetrics/metricsql v0.10.1
github.com/aws/aws-sdk-go v1.37.7
github.com/cespare/xxhash/v2 v2.1.1

4
go.sum
View File

@ -85,8 +85,8 @@ github.com/VictoriaMetrics/fastcache v1.5.7/go.mod h1:ptDBkNMQI4RtmVo8VS/XwRY6Ro
github.com/VictoriaMetrics/fasthttp v1.0.12 h1:Ag0E119yrH4BTxVyjKD9TeiSImtG9bUcg/stItLJhSE=
github.com/VictoriaMetrics/fasthttp v1.0.12/go.mod h1:3SeUL4zwB/p/a9aEeRc6gdlbrtNHXBJR6N376EgiSHU=
github.com/VictoriaMetrics/metrics v1.12.2/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
github.com/VictoriaMetrics/metrics v1.13.1 h1:1S9QrbXLPrcDBYLiDNIqWk9AC/lk5Ptk8eIjDIFFDsQ=
github.com/VictoriaMetrics/metrics v1.13.1/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
github.com/VictoriaMetrics/metrics v1.14.0 h1:yvyEVo7cPN2Hv+Hrm1zPTA1f/squmEZTq6xtPH/8F64=
github.com/VictoriaMetrics/metrics v1.14.0/go.mod h1:Z1tSfPfngDn12bTfZSCqArT3OPY3u88J12hSoOhuiRE=
github.com/VictoriaMetrics/metricsql v0.10.1 h1:wLl/YbMmBGFPyLKMfqNLC333iygibosSM5iSvlH2B4A=
github.com/VictoriaMetrics/metricsql v0.10.1/go.mod h1:ylO7YITho/Iw6P71oEaGyHbO94bGoGtzWfLGqFhMIg8=
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=

View File

@ -844,9 +844,9 @@ func (s *Storage) mustSaveAndStopCache(c *workingsetcache.Cache, info, name stri
func nextRetentionDuration(retentionMsecs int64) time.Duration {
// Round retentionMsecs to days. This guarantees that per-day inverted index works as expected.
retentionMsecs = ((retentionMsecs+msecPerDay-1)/msecPerDay)*msecPerDay
retentionMsecs = ((retentionMsecs + msecPerDay - 1) / msecPerDay) * msecPerDay
t := time.Now().UnixNano() / 1e6
deadline := ((t+retentionMsecs-1)/retentionMsecs)*retentionMsecs
deadline := ((t + retentionMsecs - 1) / retentionMsecs) * retentionMsecs
// Schedule the deadline to +4 hours from the next retention period start.
// This should prevent from possible double deletion of indexdb
// due to time drift - see https://github.com/VictoriaMetrics/VictoriaMetrics/issues/248 .

View File

@ -9,14 +9,15 @@ import (
)
const (
e10Min = -9
e10Max = 18
decimalMultiplier = 2
bucketSize = 9 * decimalMultiplier
bucketsCount = e10Max - e10Min
decimalPrecision = 1e-12
e10Min = -9
e10Max = 18
bucketsPerDecimal = 18
decimalBucketsCount = e10Max - e10Min
bucketsCount = decimalBucketsCount * bucketsPerDecimal
)
var bucketMultiplier = math.Pow(10, 1.0/bucketsPerDecimal)
// Histogram is a histogram for non-negative values with automatically created buckets.
//
// See https://medium.com/@valyala/improving-histogram-usability-for-prometheus-and-grafana-bc7e5df0e350
@ -48,9 +49,8 @@ type Histogram struct {
// Mu gurantees synchronous update for all the counters and sum.
mu sync.Mutex
buckets [bucketsCount]*histogramBucket
decimalBuckets [decimalBucketsCount]*[bucketsPerDecimal]uint64
zeros uint64
lower uint64
upper uint64
@ -65,15 +65,14 @@ func (h *Histogram) Reset() {
}
func (h *Histogram) resetLocked() {
for _, hb := range h.buckets[:] {
if hb == nil {
for _, db := range h.decimalBuckets[:] {
if db == nil {
continue
}
for offset := range hb.counts[:] {
hb.counts[offset] = 0
for i := range db[:] {
db[i] = 0
}
}
h.zeros = 0
h.lower = 0
h.upper = 0
}
@ -86,31 +85,31 @@ func (h *Histogram) Update(v float64) {
// Skip NaNs and negative values.
return
}
bucketIdx, offset := getBucketIdxAndOffset(v)
bucketIdx := (math.Log10(v) - e10Min) * bucketsPerDecimal
idx := uint(bucketIdx)
if bucketIdx == float64(idx) {
// Edge case for 10^n values, which must go to the lower bucket
// according to Prometheus logic for `le`-based histograms.
idx--
}
decimalBucketIdx := idx / bucketsPerDecimal
offset := idx % bucketsPerDecimal
h.mu.Lock()
h.updateLocked(v, bucketIdx, offset)
h.mu.Unlock()
}
func (h *Histogram) updateLocked(v float64, bucketIdx int, offset uint) {
h.sum += v
if bucketIdx < 0 {
// Special cases for zero, too small or too big value
if offset == 0 {
h.zeros++
} else if offset == 1 {
h.lower++
} else {
h.upper++
h.lower++
} else if bucketIdx >= bucketsCount {
h.upper++
} else {
db := h.decimalBuckets[decimalBucketIdx]
if db == nil {
var b [bucketsPerDecimal]uint64
db = &b
h.decimalBuckets[decimalBucketIdx] = db
}
return
db[offset]++
}
hb := h.buckets[bucketIdx]
if hb == nil {
hb = &histogramBucket{}
h.buckets[bucketIdx] = hb
}
hb.counts[offset]++
h.mu.Unlock()
}
// VisitNonZeroBuckets calls f for all buckets with non-zero counters.
@ -121,38 +120,25 @@ func (h *Histogram) updateLocked(v float64, bucketIdx int, offset uint) {
// with `le` (less or equal) labels.
func (h *Histogram) VisitNonZeroBuckets(f func(vmrange string, count uint64)) {
h.mu.Lock()
h.visitNonZeroBucketsLocked(f)
h.mu.Unlock()
}
func (h *Histogram) visitNonZeroBucketsLocked(f func(vmrange string, count uint64)) {
if h.zeros > 0 {
vmrange := getVMRange(-1, 0)
f(vmrange, h.zeros)
}
if h.lower > 0 {
vmrange := getVMRange(-1, 1)
f(vmrange, h.lower)
f(lowerBucketRange, h.lower)
}
for bucketIdx, hb := range h.buckets[:] {
if hb == nil {
for decimalBucketIdx, db := range h.decimalBuckets[:] {
if db == nil {
continue
}
for offset, count := range hb.counts[:] {
for offset, count := range db[:] {
if count > 0 {
vmrange := getVMRange(bucketIdx, uint(offset))
bucketIdx := decimalBucketIdx*bucketsPerDecimal + offset
vmrange := getVMRange(bucketIdx)
f(vmrange, count)
}
}
}
if h.upper > 0 {
vmrange := getVMRange(-1, 2)
f(vmrange, h.upper)
f(upperBucketRange, h.upper)
}
}
type histogramBucket struct {
counts [bucketSize]uint64
h.mu.Unlock()
}
// NewHistogram creates and returns new histogram with the given name.
@ -193,43 +179,27 @@ func (h *Histogram) UpdateDuration(startTime time.Time) {
h.Update(d)
}
func getVMRange(bucketIdx int, offset uint) string {
func getVMRange(bucketIdx int) string {
bucketRangesOnce.Do(initBucketRanges)
if bucketIdx < 0 {
if offset > 2 {
panic(fmt.Errorf("BUG: offset must be in range [0...2] for negative bucketIdx; got %d", offset))
}
return bucketRanges[offset]
}
idx := 3 + uint(bucketIdx)*bucketSize + offset
return bucketRanges[idx]
return bucketRanges[bucketIdx]
}
func initBucketRanges() {
bucketRanges[0] = "0...0"
bucketRanges[1] = fmt.Sprintf("0...%.1fe%d", 1.0, e10Min)
bucketRanges[2] = fmt.Sprintf("%.1fe%d...+Inf", 1.0, e10Max)
idx := 3
start := fmt.Sprintf("%.1fe%d", 1.0, e10Min)
for bucketIdx := 0; bucketIdx < bucketsCount; bucketIdx++ {
for offset := 0; offset < bucketSize; offset++ {
e10 := e10Min + bucketIdx
m := 1 + float64(offset+1)/decimalMultiplier
if math.Abs(m-10) < decimalPrecision {
m = 1
e10++
}
end := fmt.Sprintf("%.1fe%d", m, e10)
bucketRanges[idx] = start + "..." + end
idx++
start = end
}
v := math.Pow10(e10Min)
start := fmt.Sprintf("%.3e", v)
for i := 0; i < bucketsCount; i++ {
v *= bucketMultiplier
end := fmt.Sprintf("%.3e", v)
bucketRanges[i] = start + "..." + end
start = end
}
}
var (
// 3 additional buckets for zero, lower and upper.
bucketRanges [3 + bucketsCount*bucketSize]string
lowerBucketRange = fmt.Sprintf("0...%.3e", math.Pow10(e10Min))
upperBucketRange = fmt.Sprintf("%.3e...+Inf", math.Pow10(e10Max))
bucketRanges [bucketsCount]string
bucketRangesOnce sync.Once
)
@ -238,21 +208,21 @@ func (h *Histogram) marshalTo(prefix string, w io.Writer) {
h.VisitNonZeroBuckets(func(vmrange string, count uint64) {
tag := fmt.Sprintf("vmrange=%q", vmrange)
metricName := addTag(prefix, tag)
name, filters := splitMetricName(metricName)
fmt.Fprintf(w, "%s_bucket%s %d\n", name, filters, count)
name, labels := splitMetricName(metricName)
fmt.Fprintf(w, "%s_bucket%s %d\n", name, labels, count)
countTotal += count
})
if countTotal == 0 {
return
}
name, filters := splitMetricName(prefix)
name, labels := splitMetricName(prefix)
sum := h.getSum()
if float64(int64(sum)) == sum {
fmt.Fprintf(w, "%s_sum%s %d\n", name, filters, int64(sum))
fmt.Fprintf(w, "%s_sum%s %d\n", name, labels, int64(sum))
} else {
fmt.Fprintf(w, "%s_sum%s %g\n", name, filters, sum)
fmt.Fprintf(w, "%s_sum%s %g\n", name, labels, sum)
}
fmt.Fprintf(w, "%s_count%s %d\n", name, filters, countTotal)
fmt.Fprintf(w, "%s_count%s %d\n", name, labels, countTotal)
}
func (h *Histogram) getSum() float64 {
@ -261,46 +231,3 @@ func (h *Histogram) getSum() float64 {
h.mu.Unlock()
return sum
}
func getBucketIdxAndOffset(v float64) (int, uint) {
if v < 0 {
panic(fmt.Errorf("BUG: v must be positive; got %g", v))
}
if v == 0 {
return -1, 0
}
if math.IsInf(v, 1) {
return -1, 2
}
e10 := int(math.Floor(math.Log10(v)))
bucketIdx := e10 - e10Min
if bucketIdx < 0 {
return -1, 1
}
if bucketIdx >= bucketsCount {
if bucketIdx == bucketsCount && math.Abs(math.Pow10(e10)-v) < decimalPrecision {
// Adjust m to be on par with Prometheus 'le' buckets (aka 'less or equal')
return bucketsCount - 1, bucketSize - 1
}
return -1, 2
}
m := ((v / math.Pow10(e10)) - 1) * decimalMultiplier
offset := int(m)
if offset < 0 {
offset = 0
} else if offset >= bucketSize {
offset = bucketSize - 1
}
if math.Abs(float64(offset)-m) < decimalPrecision {
// Adjust offset to be on par with Prometheus 'le' buckets (aka 'less or equal')
offset--
if offset < 0 {
bucketIdx--
offset = bucketSize - 1
if bucketIdx < 0 {
return -1, 1
}
}
}
return bucketIdx, uint(offset)
}

2
vendor/modules.txt vendored
View File

@ -14,7 +14,7 @@ github.com/VictoriaMetrics/fastcache
github.com/VictoriaMetrics/fasthttp
github.com/VictoriaMetrics/fasthttp/fasthttputil
github.com/VictoriaMetrics/fasthttp/stackless
# github.com/VictoriaMetrics/metrics v1.13.1
# github.com/VictoriaMetrics/metrics v1.14.0
github.com/VictoriaMetrics/metrics
# github.com/VictoriaMetrics/metricsql v0.10.1
github.com/VictoriaMetrics/metricsql