app/vmselect: de-duplicate data exported via /api/v1/export/csv by default

Previously the exported data wasn't de-duplicated.
Now it is possible to export the raw data without deduplication
by passing reduce_mem_usage=1 query arg to /api/v1/export/csv

See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1837
This commit is contained in:
Aliaksandr Valialkin 2021-12-17 10:56:03 +02:00
parent f30ed13155
commit 193331d522
No known key found for this signature in database
GPG Key ID: A72BEC6CD3D0DED1
5 changed files with 58 additions and 28 deletions

View File

@ -837,7 +837,7 @@ unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) val
The exported CSV data can be imported to VictoriaMetrics via [/api/v1/import/csv](#how-to-import-csv-data). The exported CSV data can be imported to VictoriaMetrics via [/api/v1/import/csv](#how-to-import-csv-data).
The [deduplication](#deduplication) isn't applied for the data exported in CSV. It is expected that the de-duplication is performed during data import. The [deduplication](#deduplication) is applied for the data exported in CSV by default. It is possible to export raw data without de-duplication by passing `reduce_mem_usage=1` query arg to `/api/v1/export/csv`.
### How to export data in native format ### How to export data in native format

View File

@ -129,6 +129,7 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
if err != nil { if err != nil {
return err return err
} }
reduceMemUsage := searchutils.GetBool(r, "reduce_mem_usage")
deadline := searchutils.GetDeadlineForExport(r, startTime) deadline := searchutils.GetDeadlineForExport(r, startTime)
tagFilterss, err := getTagFilterssFromRequest(r) tagFilterss, err := getTagFilterssFromRequest(r)
if err != nil { if err != nil {
@ -140,30 +141,58 @@ func ExportCSVHandler(startTime time.Time, w http.ResponseWriter, r *http.Reques
defer bufferedwriter.Put(bw) defer bufferedwriter.Put(bw)
resultsCh := make(chan *quicktemplate.ByteBuffer, cgroup.AvailableCPUs()) resultsCh := make(chan *quicktemplate.ByteBuffer, cgroup.AvailableCPUs())
doneCh := make(chan error) writeCSVLine := func(xb *exportBlock) {
go func() { if len(xb.timestamps) == 0 {
err := netstorage.ExportBlocks(sq, deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error { return
if err := bw.Error(); err != nil { }
return err bb := quicktemplate.AcquireByteBuffer()
} WriteExportCSVLine(bb, xb, fieldNames)
if err := b.UnmarshalData(); err != nil { resultsCh <- bb
return fmt.Errorf("cannot unmarshal block during export: %s", err) }
} doneCh := make(chan error, 1)
xb := exportBlockPool.Get().(*exportBlock) if !reduceMemUsage {
xb.mn = mn rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
xb.timestamps, xb.values = b.AppendRowsWithTimeRangeFilter(xb.timestamps[:0], xb.values[:0], tr) if err != nil {
if len(xb.timestamps) > 0 { return fmt.Errorf("cannot fetch data for %q: %w", sq, err)
bb := quicktemplate.AcquireByteBuffer() }
WriteExportCSVLine(bb, xb, fieldNames) go func() {
resultsCh <- bb err := rss.RunParallel(func(rs *netstorage.Result, workerID uint) error {
} if err := bw.Error(); err != nil {
xb.reset() return err
exportBlockPool.Put(xb) }
return nil xb := exportBlockPool.Get().(*exportBlock)
}) xb.mn = &rs.MetricName
close(resultsCh) xb.timestamps = rs.Timestamps
doneCh <- err xb.values = rs.Values
}() writeCSVLine(xb)
xb.reset()
exportBlockPool.Put(xb)
return nil
})
close(resultsCh)
doneCh <- err
}()
} else {
go func() {
err := netstorage.ExportBlocks(sq, deadline, func(mn *storage.MetricName, b *storage.Block, tr storage.TimeRange) error {
if err := bw.Error(); err != nil {
return err
}
if err := b.UnmarshalData(); err != nil {
return fmt.Errorf("cannot unmarshal block during export: %s", err)
}
xb := exportBlockPool.Get().(*exportBlock)
xb.mn = mn
xb.timestamps, xb.values = b.AppendRowsWithTimeRangeFilter(xb.timestamps[:0], xb.values[:0], tr)
writeCSVLine(xb)
xb.reset()
exportBlockPool.Put(xb)
return nil
})
close(resultsCh)
doneCh <- err
}()
}
// Consume all the data from resultsCh. // Consume all the data from resultsCh.
for bb := range resultsCh { for bb := range resultsCh {
// Do not check for error in bw.Write, since this error is checked inside netstorage.ExportBlocks above. // Do not check for error in bw.Write, since this error is checked inside netstorage.ExportBlocks above.
@ -360,7 +389,7 @@ func exportHandler(w http.ResponseWriter, matches []string, etfs [][]storage.Tag
defer bufferedwriter.Put(bw) defer bufferedwriter.Put(bw)
resultsCh := make(chan *quicktemplate.ByteBuffer, cgroup.AvailableCPUs()) resultsCh := make(chan *quicktemplate.ByteBuffer, cgroup.AvailableCPUs())
doneCh := make(chan error) doneCh := make(chan error, 1)
if !reduceMemUsage { if !reduceMemUsage {
rss, err := netstorage.ProcessSearchQuery(sq, true, deadline) rss, err := netstorage.ProcessSearchQuery(sq, true, deadline)
if err != nil { if err != nil {

View File

@ -24,6 +24,7 @@ sort: 15
* BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix navigation over query history with `Ctrl+up/down` and fix zoom relatively to the cursor position. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1936). * BUGFIX: [vmui](https://docs.victoriametrics.com/#vmui): fix navigation over query history with `Ctrl+up/down` and fix zoom relatively to the cursor position. See [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/1936).
* BUGFIX: deduplicate samples more thoroughly if [deduplication](https://docs.victoriametrics.com/#deduplication) is enabled. Previously some duplicate samples may be left on disk for time series with high churn rate. This may result in bigger storage space requirements. * BUGFIX: deduplicate samples more thoroughly if [deduplication](https://docs.victoriametrics.com/#deduplication) is enabled. Previously some duplicate samples may be left on disk for time series with high churn rate. This may result in bigger storage space requirements.
* BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): follow up to 5 redirects when `follow_redirects: true` is set for a particular scrape config. Previously only a single redirect was performed in this case. It is expected these redirects are performed to the original hostname. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1945). * BUGFIX: [vmagent](https://docs.victoriametrics.com/vmagent.html): follow up to 5 redirects when `follow_redirects: true` is set for a particular scrape config. Previously only a single redirect was performed in this case. It is expected these redirects are performed to the original hostname. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1945).
* BUGFIX: de-duplicate data exported via [/api/v1/export/csv](https://docs.victoriametrics.com/#how-to-export-csv-data) by default if [deduplication](https://docs.victoriametrics.com/#deduplication) is enabled. The de-duplication can be disabled by passing `reduce_mem_usage=1` query arg to `/api/v1/export/csv`. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/1837).
## [v1.70.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.70.0) ## [v1.70.0](https://github.com/VictoriaMetrics/VictoriaMetrics/releases/tag/v1.70.0)

View File

@ -837,7 +837,7 @@ unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) val
The exported CSV data can be imported to VictoriaMetrics via [/api/v1/import/csv](#how-to-import-csv-data). The exported CSV data can be imported to VictoriaMetrics via [/api/v1/import/csv](#how-to-import-csv-data).
The [deduplication](#deduplication) isn't applied for the data exported in CSV. It is expected that the de-duplication is performed during data import. The [deduplication](#deduplication) is applied for the data exported in CSV by default. It is possible to export raw data without de-duplication by passing `reduce_mem_usage=1` query arg to `/api/v1/export/csv`.
### How to export data in native format ### How to export data in native format

View File

@ -841,7 +841,7 @@ unix timestamp in seconds or [RFC3339](https://www.ietf.org/rfc/rfc3339.txt) val
The exported CSV data can be imported to VictoriaMetrics via [/api/v1/import/csv](#how-to-import-csv-data). The exported CSV data can be imported to VictoriaMetrics via [/api/v1/import/csv](#how-to-import-csv-data).
The [deduplication](#deduplication) isn't applied for the data exported in CSV. It is expected that the de-duplication is performed during data import. The [deduplication](#deduplication) is applied for the data exported in CSV by default. It is possible to export raw data without de-duplication by passing `reduce_mem_usage=1` query arg to `/api/v1/export/csv`.
### How to export data in native format ### How to export data in native format