chore: remove redundant words (#6348)

(cherry picked from commit 9289c7512d)
This commit is contained in:
yumeiyin 2024-05-29 20:08:38 +08:00 committed by hagen1778
parent 189af53142
commit 95b8cf76f8
No known key found for this signature in database
GPG Key ID: 3BF75F3741CA9640
3 changed files with 3 additions and 3 deletions

View File

@ -34,7 +34,7 @@ func testPushWriteRequest(t *testing.T, rowsCount, expectedBlockLenProm, expecte
return true
}
if !tryPushWriteRequest(wr, pushBlock, isVMRemoteWrite) {
t.Fatalf("cannot push data to to remote storage")
t.Fatalf("cannot push data to remote storage")
}
if math.Abs(float64(pushBlockLen-expectedBlockLen)/float64(expectedBlockLen)*100) > tolerancePrc {
t.Fatalf("unexpected block len for rowsCount=%d, isVMRemoteWrite=%v; got %d bytes; expecting %d bytes +- %.0f%%",

View File

@ -26,7 +26,7 @@ type Retention struct {
FirstOrder string
SecondOrder string
AggTime string
// The actual ranges will will attempt to query (as offsets from now)
// The actual ranges will attempt to query (as offsets from now)
QueryRanges []TimeRange
}

View File

@ -1073,7 +1073,7 @@ This may lead to the following issues:
since they ignore the first sample in a new time series.
- Unexpected spikes for [total](#total) and [increase](#increase) outputs, since they assume that new time series start from 0.
These issues can be be fixed in the following ways:
These issues can be fixed in the following ways:
- By increasing the `interval` option at [stream aggregation config](#stream-aggregation-config), so it covers the expected
delays in data ingestion pipelines.