mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-23 12:31:07 +01:00
Originally implemented here https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5797 --------- Signed-off-by: hagen1778 <roman@victoriametrics.com> Co-authored-by: khushijain21 <khushij393@gmail.com>
This commit is contained in:
parent
4034d081f4
commit
bb1279bfc4
@ -123,15 +123,20 @@ var (
|
||||
)
|
||||
|
||||
const (
|
||||
otsdbAddr = "otsdb-addr"
|
||||
otsdbConcurrency = "otsdb-concurrency"
|
||||
otsdbQueryLimit = "otsdb-query-limit"
|
||||
otsdbOffsetDays = "otsdb-offset-days"
|
||||
otsdbHardTSStart = "otsdb-hard-ts-start"
|
||||
otsdbRetentions = "otsdb-retentions"
|
||||
otsdbFilters = "otsdb-filters"
|
||||
otsdbNormalize = "otsdb-normalize"
|
||||
otsdbMsecsTime = "otsdb-msecstime"
|
||||
otsdbAddr = "otsdb-addr"
|
||||
otsdbConcurrency = "otsdb-concurrency"
|
||||
otsdbQueryLimit = "otsdb-query-limit"
|
||||
otsdbOffsetDays = "otsdb-offset-days"
|
||||
otsdbHardTSStart = "otsdb-hard-ts-start"
|
||||
otsdbRetentions = "otsdb-retentions"
|
||||
otsdbFilters = "otsdb-filters"
|
||||
otsdbNormalize = "otsdb-normalize"
|
||||
otsdbMsecsTime = "otsdb-msecstime"
|
||||
otsdbCertFile = "otsdb-cert-file"
|
||||
otsdbKeyFile = "otsdb-key-file"
|
||||
otsdbCAFile = "otsdb-CA-file"
|
||||
otsdbServerName = "otsdb-server-name"
|
||||
otsdbInsecureSkipVerify = "otsdb-insecure-skip-verify"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -191,6 +196,27 @@ var (
|
||||
Value: false,
|
||||
Usage: "Whether to normalize all data received to lower case before forwarding to VictoriaMetrics",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: otsdbCertFile,
|
||||
Usage: "Optional path to client-side TLS certificate file to use when connecting to -otsdb-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: otsdbKeyFile,
|
||||
Usage: "Optional path to client-side TLS key to use when connecting to -otsdb-addr",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: otsdbCAFile,
|
||||
Usage: "Optional path to TLS CA file to use for verifying connections to -otsdb-addr. By default, system CA is used",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: otsdbServerName,
|
||||
Usage: "Optional TLS server name to use for connections to -otsdb-addr. By default, the server name from otsdbAddr is used",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: otsdbInsecureSkipVerify,
|
||||
Usage: "Whether to skip tls verification when connecting to -otsdb-addr",
|
||||
Value: false,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -50,8 +50,20 @@ func main() {
|
||||
Action: func(c *cli.Context) error {
|
||||
fmt.Println("OpenTSDB import mode")
|
||||
|
||||
// create Transport with given TLS config
|
||||
certFile := c.String(otsdbCertFile)
|
||||
keyFile := c.String(otsdbKeyFile)
|
||||
caFile := c.String(otsdbCAFile)
|
||||
serverName := c.String(otsdbServerName)
|
||||
insecureSkipVerify := c.Bool(otsdbInsecureSkipVerify)
|
||||
addr := c.String(otsdbAddr)
|
||||
|
||||
tr, err := httputils.Transport(addr, certFile, caFile, keyFile, serverName, insecureSkipVerify)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Transport: %s", err)
|
||||
}
|
||||
oCfg := opentsdb.Config{
|
||||
Addr: c.String(otsdbAddr),
|
||||
Addr: addr,
|
||||
Limit: c.Int(otsdbQueryLimit),
|
||||
Offset: c.Int64(otsdbOffsetDays),
|
||||
HardTS: c.Int64(otsdbHardTSStart),
|
||||
@ -59,6 +71,7 @@ func main() {
|
||||
Filters: c.StringSlice(otsdbFilters),
|
||||
Normalize: c.Bool(otsdbNormalize),
|
||||
MsecsTime: c.Bool(otsdbMsecsTime),
|
||||
Transport: tr,
|
||||
}
|
||||
otsdbClient, err := opentsdb.NewClient(oCfg)
|
||||
if err != nil {
|
||||
|
@ -47,6 +47,8 @@ type Client struct {
|
||||
Normalize bool
|
||||
HardTS int64
|
||||
MsecsTime bool
|
||||
|
||||
c *http.Client
|
||||
}
|
||||
|
||||
// Config contains fields required
|
||||
@ -60,6 +62,7 @@ type Config struct {
|
||||
Filters []string
|
||||
Normalize bool
|
||||
MsecsTime bool
|
||||
Transport *http.Transport
|
||||
}
|
||||
|
||||
// TimeRange contains data about time ranges to query
|
||||
@ -107,7 +110,8 @@ type Metric struct {
|
||||
// FindMetrics discovers all metrics that OpenTSDB knows about (given a filter)
|
||||
// e.g. /api/suggest?type=metrics&q=system&max=100000
|
||||
func (c Client) FindMetrics(q string) ([]string, error) {
|
||||
resp, err := http.Get(q)
|
||||
|
||||
resp, err := c.c.Get(q)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to send GET request to %q: %s", q, err)
|
||||
}
|
||||
@ -131,7 +135,7 @@ func (c Client) FindMetrics(q string) ([]string, error) {
|
||||
// e.g. /api/search/lookup?m=system.load5&limit=1000000
|
||||
func (c Client) FindSeries(metric string) ([]Meta, error) {
|
||||
q := fmt.Sprintf("%s/api/search/lookup?m=%s&limit=%d", c.Addr, metric, c.Limit)
|
||||
resp, err := http.Get(q)
|
||||
resp, err := c.c.Get(q)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to set GET request to %q: %s", q, err)
|
||||
}
|
||||
@ -184,7 +188,7 @@ func (c Client) GetData(series Meta, rt RetentionMeta, start int64, end int64, m
|
||||
series.Metric, tagStr)
|
||||
|
||||
q := fmt.Sprintf("%s/api/query?%s", c.Addr, queryStr)
|
||||
resp, err := http.Get(q)
|
||||
resp, err := c.c.Get(q)
|
||||
if err != nil {
|
||||
return Metric{}, fmt.Errorf("failed to send GET request to %q: %s", q, err)
|
||||
}
|
||||
@ -325,6 +329,7 @@ func NewClient(cfg Config) (*Client, error) {
|
||||
Normalize: cfg.Normalize,
|
||||
HardTS: cfg.HardTS,
|
||||
MsecsTime: cfg.MsecsTime,
|
||||
c: &http.Client{Transport: cfg.Transport},
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
@ -34,8 +34,7 @@ See also [LTS releases](https://docs.victoriametrics.com/LTS-releases.html).
|
||||
* FEATURE: [Single-node VictoriaMetrics](https://docs.victoriametrics.com/) and `vmstorage` in [VictoriaMetrics cluster](https://docs.victoriametrics.com/cluster-victoriametrics/): expose `vm_last_partition_parts` [metrics](https://docs.victoriametrics.com/#monitoring), which show the number of [parts in the latest partition](https://docs.victoriametrics.com/#storage). These metrics may help debugging query performance slowdown related to the increased number of parts in the last partition, since usually all the ingested data is written to the last partition and all the queries are performed over the recently ingested data, e.g. the last partition.
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for `client_id` option into [kuma_sd_configs](https://docs.victoriametrics.com/sd_configs/#kuma_sd_configs) in the same way as Prometheus does. See [this pull request](https://github.com/prometheus/prometheus/pull/13278).
|
||||
* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for `enable_compression` option in [scrape_configs](https://docs.victoriametrics.com/sd_configs/#scrape_configs) in order to be compatible with Prometheus scrape configs. See [this pull request](https://github.com/prometheus/prometheus/pull/13166) and [this feature request](https://github.com/prometheus/prometheus/issues/12319). Note that `vmagent` was always supporting [`disable_compression` option](https://docs.victoriametrics.com/vmagent/#scrape_config-enhancements) before Prometheus added `enable_compression` option.
|
||||
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): support client-side TLS configuration for [InfluxDB](https://docs.victoriametrics.com/vmctl/#migrating-data-from-influxdb-1x). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5748). Thanks to @khushijain21 for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5783).
|
||||
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): support client-side TLS configuration for [Remote Read protocol](https://docs.victoriametrics.com/vmctl/#migrating-data-by-remote-read-protocol). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5748). Thanks to @khushijain21 for [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5798).
|
||||
* FEATURE: [vmctl](https://docs.victoriametrics.com/vmctl.html): support client-side TLS configuration for [InfluxDB](https://docs.victoriametrics.com/vmctl/#migrating-data-from-influxdb-1x), [Remote Read protocol](https://docs.victoriametrics.com/vmctl/#migrating-data-by-remote-read-protocol) and [OpenTSDB](https://docs.victoriametrics.com/vmctl/#migrating-data-from-opentsdb). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5748). Thanks to @khushijain21 for pull requests [1](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5783), [2](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5798), [3](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5797).
|
||||
* FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): preserve [`WITH` templates](https://play.victoriametrics.com/select/accounting/1/6a716b0f-38bc-4856-90ce-448fd713e3fe/expand-with-exprs) when clicking the `prettify query` button at the right side of query input field. See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/5383).
|
||||
* FEATURE: [vmalert](https://docs.victoriametrics.com/#vmalert): support filtering by group, rule or labels in [vmalert's UI](https://docs.victoriametrics.com/vmalert/#web) for `/groups` and `/alerts` pages. See [the pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/5791) by @victoramsantos.
|
||||
|
||||
|
@ -207,6 +207,14 @@ http://opentsdb:4242/api/query?start=721h-ago&end=720h-ago&m=sum:1m-avg-none:<se
|
||||
|
||||
Chunking the data like this means each individual query returns faster, so we can start populating data into VictoriaMetrics quicker.
|
||||
|
||||
### Configuration
|
||||
|
||||
Run the following command to get all configuration options:
|
||||
|
||||
```sh
|
||||
./vmctl opentsdb --help
|
||||
```
|
||||
|
||||
### Restarting OpenTSDB migrations
|
||||
|
||||
One important note for OpenTSDB migration: Queries/HBase scans can "get stuck" within OpenTSDB itself. This can cause instability and performance issues within an OpenTSDB cluster, so stopping the migrator to deal with it may be necessary. Because of this, we provide the timestamp we started collecting data from at the beginning of the run. You can stop and restart the importer using this "hard timestamp" to ensure you collect data from the same time range over multiple runs.
|
||||
|
Loading…
Reference in New Issue
Block a user