diff --git a/README.md b/README.md index e4ba31a4c..63eb1e071 100644 --- a/README.md +++ b/README.md @@ -86,6 +86,7 @@ VictoriaMetrics has the following prominent features: * [Arbitrary CSV data](#how-to-import-csv-data). * [Native binary format](#how-to-import-data-in-native-format). * [DataDog agent or DogStatsD](#how-to-send-data-from-datadog-agent). + * [OpenTelemetry metrics format](#sending-data-via-opentelemetry). * It supports powerful [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html), which can be used as a [statsd](https://github.com/statsd/statsd) alternative. * It supports metrics [relabeling](#relabeling). * It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and @@ -1173,6 +1174,7 @@ Additionally, VictoriaMetrics can accept metrics via the following popular data * DataDog `submit metrics` API. See [these docs](#how-to-send-data-from-datadog-agent) for details. * InfluxDB line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details. * Graphite plaintext protocol. See [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details. +* OpenTelemetry http API. See [these docs](#sending-data-via-opentelemetry) for details. * OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details. * OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details. * `/api/v1/import` for importing data obtained from [/api/v1/export](#how-to-export-data-in-json-line-format). @@ -1356,6 +1358,13 @@ Note that it could be required to flush response cache after importing historica VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter). +## Sending data via OpenTelemetry + +VictoriaMetrics supports data ingestion via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md) at `/opentemetry/api/v1/push` path. + +VictoriaMetrics expects `protobuf`-encoded requests at `/opentelemetry/api/v1/push`. +Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed data to `/opentelemetry/api/v1/push`. + ## Relabeling VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points diff --git a/app/vmagent/README.md b/app/vmagent/README.md index 7f2498ce4..400908205 100644 --- a/app/vmagent/README.md +++ b/app/vmagent/README.md @@ -93,6 +93,7 @@ additionally to pull-based Prometheus-compatible targets' scraping: * DataDog "submit metrics" API. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent). * InfluxDB line protocol via `http://:8429/write`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf). * Graphite plaintext protocol if `-graphiteListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-graphite-compatible-agents-such-as-statsd). +* OpenTelemetry http API. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-data-via-opentelemetry). * OpenTSDB telnet and http protocols if `-opentsdbListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-opentsdb-compatible-agents). * Prometheus remote write protocol via `http://:8429/api/v1/write`. * JSON lines import protocol via `http://:8429/api/v1/import`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-json-line-format). diff --git a/app/vmagent/main.go b/app/vmagent/main.go index a0db615b8..51fc5cf6b 100644 --- a/app/vmagent/main.go +++ b/app/vmagent/main.go @@ -16,6 +16,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/graphite" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/influx" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/native" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/opentelemetry" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/opentsdb" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/opentsdbhttp" "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/prometheusimport" @@ -308,6 +309,15 @@ func requestHandler(w http.ResponseWriter, r *http.Request) bool { influxQueryRequests.Inc() influxutils.WriteDatabaseNames(w) return true + case "/opentelemetry/api/v1/push": + opentelemetryPushRequests.Inc() + if err := opentelemetry.InsertHandler(nil, r); err != nil { + opentelemetryPushErrors.Inc() + httpserver.Errorf(w, r, "%s", err) + return true + } + w.WriteHeader(http.StatusOK) + return true case "/datadog/api/v1/series": datadogWriteRequests.Inc() if err := datadog.InsertHandlerForHTTP(nil, r); err != nil { @@ -499,6 +509,15 @@ func processMultitenantRequest(w http.ResponseWriter, r *http.Request, path stri influxQueryRequests.Inc() influxutils.WriteDatabaseNames(w) return true + case "opentelemetry/api/v1/push": + opentelemetryPushRequests.Inc() + if err := opentelemetry.InsertHandler(at, r); err != nil { + opentelemetryPushErrors.Inc() + httpserver.Errorf(w, r, "%s", err) + return true + } + w.WriteHeader(http.StatusOK) + return true case "datadog/api/v1/series": datadogWriteRequests.Inc() if err := datadog.InsertHandlerForHTTP(at, r); err != nil { @@ -568,6 +587,9 @@ var ( datadogIntakeRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/intake", protocol="datadog"}`) datadogMetadataRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/datadog/api/v1/metadata", protocol="datadog"}`) + opentelemetryPushRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`) + opentelemetryPushErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`) + promscrapeTargetsRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/targets"}`) promscrapeServiceDiscoveryRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/service-discovery"}`) diff --git a/app/vmagent/opentelemetry/request_handler.go b/app/vmagent/opentelemetry/request_handler.go new file mode 100644 index 000000000..047978149 --- /dev/null +++ b/app/vmagent/opentelemetry/request_handler.go @@ -0,0 +1,65 @@ +package opentelemetry + +import ( + "net/http" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/auth" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/stream" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/tenantmetrics" + "github.com/VictoriaMetrics/metrics" +) + +var ( + rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="opentelemetry"}`) + rowsTenantInserted = tenantmetrics.NewCounterMap(`vmagent_tenant_inserted_rows_total{type="opentelemetry"}`) + rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="opentelemetry"}`) +) + +// InsertHandler processes opentelemetry metrics. +func InsertHandler(at *auth.Token, req *http.Request) error { + extraLabels, err := parserCommon.GetExtraLabels(req) + if err != nil { + return err + } + isGzipped := req.Header.Get("Content-Encoding") == "gzip" + return stream.ParseStream(req.Body, isGzipped, func(tss []prompbmarshal.TimeSeries) error { + return insertRows(at, tss, extraLabels) + }) +} + +func insertRows(at *auth.Token, tss []prompbmarshal.TimeSeries, extraLabels []prompbmarshal.Label) error { + ctx := common.GetPushCtx() + defer common.PutPushCtx(ctx) + + rowsTotal := 0 + tssDst := ctx.WriteRequest.Timeseries[:0] + labels := ctx.Labels[:0] + samples := ctx.Samples[:0] + for i := range tss { + ts := &tss[i] + rowsTotal += len(ts.Samples) + labelsLen := len(labels) + labels = append(labels, ts.Labels...) + labels = append(labels, extraLabels...) + samplesLen := len(samples) + samples = append(samples, ts.Samples...) + tssDst = append(tssDst, prompbmarshal.TimeSeries{ + Labels: labels[labelsLen:], + Samples: samples[samplesLen:], + }) + } + ctx.WriteRequest.Timeseries = tssDst + ctx.Labels = labels + ctx.Samples = samples + remotewrite.Push(at, &ctx.WriteRequest) + rowsInserted.Add(rowsTotal) + if at != nil { + rowsTenantInserted.Get(at).Add(rowsTotal) + } + rowsPerInsert.Update(float64(rowsTotal)) + return nil +} diff --git a/app/vminsert/main.go b/app/vminsert/main.go index 39ee61158..ee86f0eb5 100644 --- a/app/vminsert/main.go +++ b/app/vminsert/main.go @@ -15,6 +15,7 @@ import ( "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/graphite" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/influx" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/native" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentelemetry" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdb" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdbhttp" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/prometheusimport" @@ -210,6 +211,15 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { addInfluxResponseHeaders(w) influxutils.WriteDatabaseNames(w) return true + case "/opentelemetry/api/v1/push": + opentelemetryPushRequests.Inc() + if err := opentelemetry.InsertHandler(r); err != nil { + opentelemetryPushErrors.Inc() + httpserver.Errorf(w, r, "%s", err) + return true + } + w.WriteHeader(http.StatusOK) + return true case "/datadog/api/v1/series": datadogWriteRequests.Inc() if err := datadog.InsertHandlerForHTTP(r); err != nil { @@ -344,6 +354,9 @@ var ( datadogIntakeRequests = metrics.NewCounter(`vm_http_requests_total{path="/datadog/intake", protocol="datadog"}`) datadogMetadataRequests = metrics.NewCounter(`vm_http_requests_total{path="/datadog/api/v1/metadata", protocol="datadog"}`) + opentelemetryPushRequests = metrics.NewCounter(`vm_http_requests_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`) + opentelemetryPushErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/opentelemetry/api/v1/push", protocol="opentelemetry"}`) + promscrapeTargetsRequests = metrics.NewCounter(`vm_http_requests_total{path="/targets"}`) promscrapeServiceDiscoveryRequests = metrics.NewCounter(`vm_http_requests_total{path="/service-discovery"}`) diff --git a/app/vminsert/opentelemetry/request_handler.go b/app/vminsert/opentelemetry/request_handler.go new file mode 100644 index 000000000..245b63f97 --- /dev/null +++ b/app/vminsert/opentelemetry/request_handler.go @@ -0,0 +1,74 @@ +package opentelemetry + +import ( + "net/http" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/relabel" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + parserCommon "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/stream" + "github.com/VictoriaMetrics/metrics" +) + +var ( + rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="opentelemetry"}`) + rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="opentelemetry"}`) +) + +// InsertHandler processes opentelemetry metrics. +func InsertHandler(req *http.Request) error { + extraLabels, err := parserCommon.GetExtraLabels(req) + if err != nil { + return err + } + isGzipped := req.Header.Get("Content-Encoding") == "gzip" + return stream.ParseStream(req.Body, isGzipped, func(tss []prompbmarshal.TimeSeries) error { + return insertRows(tss, extraLabels) + }) +} + +func insertRows(tss []prompbmarshal.TimeSeries, extraLabels []prompbmarshal.Label) error { + ctx := common.GetInsertCtx() + defer common.PutInsertCtx(ctx) + + rowsLen := 0 + for i := range tss { + rowsLen += len(tss[i].Samples) + } + ctx.Reset(rowsLen) + rowsTotal := 0 + hasRelabeling := relabel.HasRelabeling() + for i := range tss { + ts := &tss[i] + rowsTotal += len(ts.Samples) + ctx.Labels = ctx.Labels[:0] + for _, label := range ts.Labels { + ctx.AddLabel(label.Name, label.Value) + } + for _, label := range extraLabels { + ctx.AddLabel(label.Name, label.Value) + } + if hasRelabeling { + ctx.ApplyRelabeling() + } + if len(ctx.Labels) == 0 { + // Skip metric without labels. + continue + } + ctx.SortLabelsIfNeeded() + var metricNameRaw []byte + var err error + samples := ts.Samples + for i := range samples { + r := &samples[i] + metricNameRaw, err = ctx.WriteDataPointExt(metricNameRaw, ctx.Labels, r.Timestamp, r.Value) + if err != nil { + return err + } + } + } + rowsInserted.Add(rowsTotal) + rowsPerInsert.Update(float64(rowsTotal)) + return ctx.FlushBufs() +} diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md index 6635b7038..5cb1cc45f 100644 --- a/docs/CHANGELOG.md +++ b/docs/CHANGELOG.md @@ -49,6 +49,7 @@ The previous behavior can be restored in the following ways: - `WITH (f(window, step, off) = m[window:step] offset off) f(5m, 10s, 1h)` is automatically transformed to `m[5m:10s] offset 1h` Thanks to @lujiajing1126 for the initial idea and [implementation](https://github.com/VictoriaMetrics/metricsql/pull/13). See [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4025). * FEATURE: [vmui](https://docs.victoriametrics.com/#vmui): added a new page with the list of currently running queries. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4598) and [these docs](https://docs.victoriametrics.com/#active-queries). +* FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): add support for data ingestion via [OpenTelemetry protocol](https://opentelemetry.io/docs/reference/specification/metrics/). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-data-via-opentelemetry), [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/2424) and [this pull request](https://github.com/VictoriaMetrics/VictoriaMetrics/pull/2570). * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow sharding outgoing time series among the configured remote storage systems. This can be useful for building horizontally scalable [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html), when samples for the same time series must be aggregated by the same `vmagent` instance at the second level. See [these docs](https://docs.victoriametrics.com/vmagent.html#sharding-among-remote-storages) and [this feature request](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4637) for details. * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow configuring staleness interval in [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html) config. See [this issue](https://github.com/VictoriaMetrics/VictoriaMetrics/issues/4667) for details. * FEATURE: [vmagent](https://docs.victoriametrics.com/vmagent.html): allow specifying a list of [series selectors](https://docs.victoriametrics.com/keyConcepts.html#filtering) inside `if` option of relabeling rules. The corresponding relabeling rule is executed when at least a single series selector matches. See [these docs](https://docs.victoriametrics.com/vmagent.html#relabeling-enhancements). diff --git a/docs/Cluster-VictoriaMetrics.md b/docs/Cluster-VictoriaMetrics.md index a365ba6fd..af7289dbb 100644 --- a/docs/Cluster-VictoriaMetrics.md +++ b/docs/Cluster-VictoriaMetrics.md @@ -335,13 +335,14 @@ Check practical examples of VictoriaMetrics API [here](https://docs.victoriametr The `` can be set to `multitenant` string, e.g. `http://:8480/insert/multitenant/`. Such urls accept data from multiple tenants specified via `vm_account_id` and `vm_project_id` labels. See [multitenancy via labels](#multitenancy-via-labels) for more details. - `` may have the following values: - - `prometheus` and `prometheus/api/v1/write` - for inserting data with [Prometheus remote write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). + - `prometheus` and `prometheus/api/v1/write` - for ingesting data with [Prometheus remote write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write). - `prometheus/api/v1/import` - for importing data obtained via `api/v1/export` at `vmselect` (see below), JSON line format. - `prometheus/api/v1/import/native` - for importing data obtained via `api/v1/export/native` on `vmselect` (see below). - `prometheus/api/v1/import/csv` - for importing arbitrary CSV data. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-csv-data) for details. - `prometheus/api/v1/import/prometheus` - for importing data in [Prometheus text exposition format](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) and in [OpenMetrics format](https://github.com/OpenObservability/OpenMetrics/blob/master/specification/OpenMetrics.md). This endpoint also supports [Pushgateway protocol](https://github.com/prometheus/pushgateway#url). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-prometheus-exposition-format) for details. - - `datadog/api/v1/series` - for inserting data with [DataDog submit metrics API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent) for details. - - `influx/write` and `influx/api/v2/write` - for inserting data with [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details. + - `opentemetry/api/v1/push` - for ingesting data via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-data-via-opentelemetry). + - `datadog/api/v1/series` - for ingesting data with [DataDog submit metrics API](https://docs.datadoghq.com/api/latest/metrics/#submit-metrics). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent) for details. + - `influx/write` and `influx/api/v2/write` - for ingesting data with [InfluxDB line protocol](https://docs.influxdata.com/influxdb/v1.7/write_protocols/line_protocol_tutorial/). See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details. - `opentsdb/api/put` - for accepting [OpenTSDB HTTP /api/put requests](http://opentsdb.net/docs/build/html/api_http/put.html). This handler is disabled by default. It is exposed on a distinct TCP address set via `-opentsdbHTTPListenAddr` command-line flag. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-opentsdb-data-via-http-apiput-requests) for details. - URLs for [Prometheus querying API](https://prometheus.io/docs/prometheus/latest/querying/api/): `http://:8481/select//prometheus/`, where: diff --git a/docs/README.md b/docs/README.md index 17d13545c..52d1de851 100644 --- a/docs/README.md +++ b/docs/README.md @@ -89,6 +89,7 @@ VictoriaMetrics has the following prominent features: * [Arbitrary CSV data](#how-to-import-csv-data). * [Native binary format](#how-to-import-data-in-native-format). * [DataDog agent or DogStatsD](#how-to-send-data-from-datadog-agent). + * [OpenTelemetry metrics format](#sending-data-via-opentelemetry). * It supports powerful [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html), which can be used as a [statsd](https://github.com/statsd/statsd) alternative. * It supports metrics [relabeling](#relabeling). * It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and @@ -1176,6 +1177,7 @@ Additionally, VictoriaMetrics can accept metrics via the following popular data * DataDog `submit metrics` API. See [these docs](#how-to-send-data-from-datadog-agent) for details. * InfluxDB line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details. * Graphite plaintext protocol. See [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details. +* OpenTelemetry http API. See [these docs](#sending-data-via-opentelemetry) for details. * OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details. * OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details. * `/api/v1/import` for importing data obtained from [/api/v1/export](#how-to-export-data-in-json-line-format). @@ -1359,6 +1361,13 @@ Note that it could be required to flush response cache after importing historica VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter). +## Sending data via OpenTelemetry + +VictoriaMetrics supports data ingestion via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md) at `/opentemetry/api/v1/push` path. + +VictoriaMetrics expects `protobuf`-encoded requests at `/opentelemetry/api/v1/push`. +Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed data to `/opentelemetry/api/v1/push`. + ## Relabeling VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md index cdd5c3e37..2c3517e9b 100644 --- a/docs/Single-server-VictoriaMetrics.md +++ b/docs/Single-server-VictoriaMetrics.md @@ -97,6 +97,7 @@ VictoriaMetrics has the following prominent features: * [Arbitrary CSV data](#how-to-import-csv-data). * [Native binary format](#how-to-import-data-in-native-format). * [DataDog agent or DogStatsD](#how-to-send-data-from-datadog-agent). + * [OpenTelemetry metrics format](#sending-data-via-opentelemetry). * It supports powerful [stream aggregation](https://docs.victoriametrics.com/stream-aggregation.html), which can be used as a [statsd](https://github.com/statsd/statsd) alternative. * It supports metrics [relabeling](#relabeling). * It can deal with [high cardinality issues](https://docs.victoriametrics.com/FAQ.html#what-is-high-cardinality) and @@ -1184,6 +1185,7 @@ Additionally, VictoriaMetrics can accept metrics via the following popular data * DataDog `submit metrics` API. See [these docs](#how-to-send-data-from-datadog-agent) for details. * InfluxDB line protocol. See [these docs](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for details. * Graphite plaintext protocol. See [these docs](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) for details. +* OpenTelemetry http API. See [these docs](#sending-data-via-opentelemetry) for details. * OpenTSDB telnet put protocol. See [these docs](#sending-data-via-telnet-put-protocol) for details. * OpenTSDB http `/api/put` protocol. See [these docs](#sending-opentsdb-data-via-http-apiput-requests) for details. * `/api/v1/import` for importing data obtained from [/api/v1/export](#how-to-export-data-in-json-line-format). @@ -1367,6 +1369,13 @@ Note that it could be required to flush response cache after importing historica VictoriaMetrics also may scrape Prometheus targets - see [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter). +## Sending data via OpenTelemetry + +VictoriaMetrics supports data ingestion via [OpenTelemetry protocol for metrics](https://github.com/open-telemetry/opentelemetry-specification/blob/ffddc289462dfe0c2041e3ca42a7b1df805706de/specification/metrics/data-model.md) at `/opentemetry/api/v1/push` path. + +VictoriaMetrics expects `protobuf`-encoded requests at `/opentelemetry/api/v1/push`. +Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed data to `/opentelemetry/api/v1/push`. + ## Relabeling VictoriaMetrics supports Prometheus-compatible relabeling for all the ingested metrics if `-relabelConfig` command-line flag points diff --git a/docs/vmagent.md b/docs/vmagent.md index c2011f2dc..585ada019 100644 --- a/docs/vmagent.md +++ b/docs/vmagent.md @@ -104,6 +104,7 @@ additionally to pull-based Prometheus-compatible targets' scraping: * DataDog "submit metrics" API. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-datadog-agent). * InfluxDB line protocol via `http://:8429/write`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf). * Graphite plaintext protocol if `-graphiteListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-graphite-compatible-agents-such-as-statsd). +* OpenTelemetry http API. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#sending-data-via-opentelemetry). * OpenTSDB telnet and http protocols if `-opentsdbListenAddr` command-line flag is set. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-send-data-from-opentsdb-compatible-agents). * Prometheus remote write protocol via `http://:8429/api/v1/write`. * JSON lines import protocol via `http://:8429/api/v1/import`. See [these docs](https://docs.victoriametrics.com/Single-server-VictoriaMetrics.html#how-to-import-data-in-json-line-format). diff --git a/lib/protoparser/opentelemetry/pb/common.pb.go b/lib/protoparser/opentelemetry/pb/common.pb.go new file mode 100644 index 000000000..6e2b25c29 --- /dev/null +++ b/lib/protoparser/opentelemetry/pb/common.pb.go @@ -0,0 +1,120 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: lib/protoparser/opentelemetry/proto/common.proto + +package pb + +// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +type AnyValue struct { + unknownFields []byte + + // The value is one of the listed fields. It is valid for all values to be unspecified + // in which case this AnyValue is considered to be "empty". + // + // Types that are assignable to Value: + // + // *AnyValue_StringValue + // *AnyValue_BoolValue + // *AnyValue_IntValue + // *AnyValue_DoubleValue + // *AnyValue_ArrayValue + // *AnyValue_KvlistValue + // *AnyValue_BytesValue + Value isAnyValue_Value `protobuf_oneof:"value"` +} + +type isAnyValue_Value interface { + isAnyValue_Value() +} + +type AnyValue_StringValue struct { + StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type AnyValue_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type AnyValue_IntValue struct { + IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type AnyValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type AnyValue_ArrayValue struct { + ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof"` +} + +type AnyValue_KvlistValue struct { + KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof"` +} + +type AnyValue_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +func (*AnyValue_StringValue) isAnyValue_Value() {} + +func (*AnyValue_BoolValue) isAnyValue_Value() {} + +func (*AnyValue_IntValue) isAnyValue_Value() {} + +func (*AnyValue_DoubleValue) isAnyValue_Value() {} + +func (*AnyValue_ArrayValue) isAnyValue_Value() {} + +func (*AnyValue_KvlistValue) isAnyValue_Value() {} + +func (*AnyValue_BytesValue) isAnyValue_Value() {} + +// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +// since oneof in AnyValue does not allow repeated fields. +type ArrayValue struct { + unknownFields []byte + // Array of values. The array may be empty (contain 0 elements). + Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +// are semantically equivalent. +type KeyValueList struct { + unknownFields []byte + + // A collection of key/value pairs of key-value pairs. The list may be empty (may + // contain 0 elements). + // The keys MUST be unique (it is not allowed to have more than one + // value with the same key). + Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +// KeyValue is a key-value pair that is used to store Span attributes, Link +// attributes, etc. +type KeyValue struct { + unknownFields []byte + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} diff --git a/lib/protoparser/opentelemetry/pb/common_vtproto.pb.go b/lib/protoparser/opentelemetry/pb/common_vtproto.pb.go new file mode 100644 index 000000000..15036b2e7 --- /dev/null +++ b/lib/protoparser/opentelemetry/pb/common_vtproto.pb.go @@ -0,0 +1,1079 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: lib/protoparser/opentelemetry/proto/common.proto + +package pb + +import ( + binary "encoding/binary" + fmt "fmt" + io "io" + math "math" + bits "math/bits" +) + +func (m *AnyValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AnyValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AnyValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if vtmsg, ok := m.Value.(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + return len(dAtA) - i, nil +} + +func (m *AnyValue_StringValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AnyValue_StringValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = encodeVarint(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} +func (m *AnyValue_BoolValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AnyValue_BoolValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + return len(dAtA) - i, nil +} +func (m *AnyValue_IntValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AnyValue_IntValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarint(dAtA, i, uint64(m.IntValue)) + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} +func (m *AnyValue_DoubleValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AnyValue_DoubleValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue)))) + i-- + dAtA[i] = 0x21 + return len(dAtA) - i, nil +} +func (m *AnyValue_ArrayValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AnyValue_ArrayValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ArrayValue != nil { + size, err := m.ArrayValue.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *AnyValue_KvlistValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AnyValue_KvlistValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + if m.KvlistValue != nil { + size, err := m.KvlistValue.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *AnyValue_BytesValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AnyValue_BytesValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.BytesValue) + copy(dAtA[i:], m.BytesValue) + i = encodeVarint(dAtA, i, uint64(len(m.BytesValue))) + i-- + dAtA[i] = 0x3a + return len(dAtA) - i, nil +} +func (m *ArrayValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArrayValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ArrayValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Values[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *KeyValueList) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValueList) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *KeyValueList) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Values[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *KeyValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *KeyValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != nil { + size, err := m.Value.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AnyValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if vtmsg, ok := m.Value.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *AnyValue_StringValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + sov(uint64(l)) + return n +} +func (m *AnyValue_BoolValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *AnyValue_IntValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sov(uint64(m.IntValue)) + return n +} +func (m *AnyValue_DoubleValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *AnyValue_ArrayValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ArrayValue != nil { + l = m.ArrayValue.SizeVT() + n += 1 + l + sov(uint64(l)) + } + return n +} +func (m *AnyValue_KvlistValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.KvlistValue != nil { + l = m.KvlistValue.SizeVT() + n += 1 + l + sov(uint64(l)) + } + return n +} +func (m *AnyValue_BytesValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.BytesValue) + n += 1 + l + sov(uint64(l)) + return n +} +func (m *ArrayValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *KeyValueList) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *KeyValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Value != nil { + l = m.Value.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *AnyValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AnyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AnyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = &AnyValue_StringValue{StringValue: string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Value = &AnyValue_BoolValue{BoolValue: b} + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = &AnyValue_IntValue{IntValue: v} + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = &AnyValue_DoubleValue{DoubleValue: float64(math.Float64frombits(v))} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Value.(*AnyValue_ArrayValue); ok { + if err := oneof.ArrayValue.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &ArrayValue{} + if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &AnyValue_ArrayValue{ArrayValue: v} + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KvlistValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Value.(*AnyValue_KvlistValue); ok { + if err := oneof.KvlistValue.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &KeyValueList{} + if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &AnyValue_KvlistValue{KvlistValue: v} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BytesValue", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := make([]byte, postIndex-iNdEx) + copy(v, dAtA[iNdEx:postIndex]) + m.Value = &AnyValue_BytesValue{BytesValue: v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArrayValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArrayValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArrayValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &AnyValue{}) + if err := m.Values[len(m.Values)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyValueList) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValueList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValueList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &KeyValue{}) + if err := m.Values[len(m.Values)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &AnyValue{} + } + if err := m.Value.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/lib/protoparser/opentelemetry/pb/helpers.go b/lib/protoparser/opentelemetry/pb/helpers.go new file mode 100644 index 000000000..b8683101b --- /dev/null +++ b/lib/protoparser/opentelemetry/pb/helpers.go @@ -0,0 +1,69 @@ +package pb + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "math" + "strconv" +) + +// FormatString formats strings +func (x *AnyValue) FormatString() string { + switch v := x.Value.(type) { + case *AnyValue_StringValue: + return v.StringValue + + case *AnyValue_BoolValue: + return strconv.FormatBool(v.BoolValue) + + case *AnyValue_DoubleValue: + return float64AsString(v.DoubleValue) + + case *AnyValue_IntValue: + return strconv.FormatInt(v.IntValue, 10) + + case *AnyValue_KvlistValue: + jsonStr, _ := json.Marshal(v.KvlistValue.Values) + return string(jsonStr) + + case *AnyValue_BytesValue: + return base64.StdEncoding.EncodeToString(v.BytesValue) + + case *AnyValue_ArrayValue: + jsonStr, _ := json.Marshal(v.ArrayValue.Values) + return string(jsonStr) + + default: + return "" + } +} + +func float64AsString(f float64) string { + if math.IsInf(f, 0) || math.IsNaN(f) { + return fmt.Sprintf("json: unsupported value: %s", strconv.FormatFloat(f, 'g', -1, 64)) + } + + // Convert as if by ES6 number to string conversion. + // This matches most other JSON generators. + // See golang.org/issue/6384 and golang.org/issue/14135. + // Like fmt %g, but the exponent cutoffs are different + // and exponents themselves are not padded to two digits. + scratch := [64]byte{} + b := scratch[:0] + abs := math.Abs(f) + fmt := byte('f') + if abs != 0 && (abs < 1e-6 || abs >= 1e21) { + fmt = 'e' + } + b = strconv.AppendFloat(b, f, fmt, -1, 64) + if fmt == 'e' { + // clean up e-09 to e-9 + n := len(b) + if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' { + b[n-2] = b[n-1] + b = b[:n-1] + } + } + return string(b) +} diff --git a/lib/protoparser/opentelemetry/pb/metrics.pb.go b/lib/protoparser/opentelemetry/pb/metrics.pb.go new file mode 100644 index 000000000..57cf5ea33 --- /dev/null +++ b/lib/protoparser/opentelemetry/pb/metrics.pb.go @@ -0,0 +1,736 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: lib/protoparser/opentelemetry/proto/metrics.proto + +package pb + +// AggregationTemporality defines how a metric aggregator reports aggregated +// values. It describes how those values relate to the time interval over +// which they are aggregated. +type AggregationTemporality int32 + +const ( + // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. + AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED AggregationTemporality = 0 + // DELTA is an AggregationTemporality for a metric aggregator which reports + // changes since last report time. Successive metrics contain aggregation of + // values from continuous and non-overlapping intervals. + // + // The values for a DELTA metric are based only on the time interval + // associated with one measurement cycle. There is no dependency on + // previous measurements like is the case for CUMULATIVE metrics. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // DELTA metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0+1 to + // t_0+2 with a value of 2. + AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA AggregationTemporality = 1 + // CUMULATIVE is an AggregationTemporality for a metric aggregator which + // reports changes since a fixed start time. This means that current values + // of a CUMULATIVE metric depend on all previous measurements since the + // start time. Because of this, the sender is required to retain this state + // in some form. If this state is lost or invalidated, the CUMULATIVE metric + // values MUST be reset and a new fixed start time following the last + // reported measurement time sent MUST be used. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // CUMULATIVE metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+2 with a value of 5. + // 9. The system experiences a fault and loses state. + // 10. The system recovers and resumes receiving at time=t_1. + // 11. A request is received, the system measures 1 request. + // 12. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_1 to + // t_0+1 with a value of 1. + // + // Note: Even though, when reporting changes since last report time, using + // CUMULATIVE is valid, it is not recommended. This may cause problems for + // systems that do not use start_time to determine when the aggregation + // value was reset (e.g. Prometheus). + AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE AggregationTemporality = 2 +) + +// Enum value maps for AggregationTemporality. +var ( + AggregationTemporality_name = map[int32]string{ + 0: "AGGREGATION_TEMPORALITY_UNSPECIFIED", + 1: "AGGREGATION_TEMPORALITY_DELTA", + 2: "AGGREGATION_TEMPORALITY_CUMULATIVE", + } + AggregationTemporality_value = map[string]int32{ + "AGGREGATION_TEMPORALITY_UNSPECIFIED": 0, + "AGGREGATION_TEMPORALITY_DELTA": 1, + "AGGREGATION_TEMPORALITY_CUMULATIVE": 2, + } +) + +func (x AggregationTemporality) Enum() *AggregationTemporality { + p := new(AggregationTemporality) + *p = x + return p +} + +// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a +// bit-field representing 32 distinct boolean flags. Each flag defined in this +// enum is a bit-mask. To test the presence of a single flag in the flags of +// a data point, for example, use an expression like: +// +// (point.flags & FLAG_NO_RECORDED_VALUE) == FLAG_NO_RECORDED_VALUE +type DataPointFlags int32 + +const ( + DataPointFlags_FLAG_NONE DataPointFlags = 0 + // This DataPoint is valid but has no recorded value. This value + // SHOULD be used to reflect explicitly missing data in a series, as + // for an equivalent to the Prometheus "staleness marker". + DataPointFlags_FLAG_NO_RECORDED_VALUE DataPointFlags = 1 +) + +// Enum value maps for DataPointFlags. +var ( + DataPointFlags_name = map[int32]string{ + 0: "FLAG_NONE", + 1: "FLAG_NO_RECORDED_VALUE", + } + DataPointFlags_value = map[string]int32{ + "FLAG_NONE": 0, + "FLAG_NO_RECORDED_VALUE": 1, + } +) + +func (x DataPointFlags) Enum() *DataPointFlags { + p := new(DataPointFlags) + *p = x + return p +} + +// MetricsData represents the metrics data that can be stored in a persistent +// storage, OR can be embedded by other protocols that transfer OTLP metrics +// data but do not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type MetricsData struct { + unknownFields []byte + + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"` +} + +// A collection of ScopeMetrics from a Resource. +type ResourceMetrics struct { + unknownFields []byte + + // The resource for the metrics in this message. + // If this field is not set then no resource info is known. + Resource *Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // A list of metrics that originate from a resource. + ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_metrics" field which have their own schema_url field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +// A collection of Metrics produced by an Scope. +type ScopeMetrics struct { + unknownFields []byte + + // A list of metrics that originate from an instrumentation library. + Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` + // This schema_url applies to all metrics in the "metrics" field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +// Defines a Metric which has one or more timeseries. The following is a +// brief summary of the Metric data model. For more details, see: +// +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md +// +// The data model and relation between entities is shown in the +// diagram below. Here, "DataPoint" is the term used to refer to any +// one of the specific data point value types, and "points" is the term used +// to refer to any one of the lists of points contained in the Metric. +// +// - Metric is composed of a metadata and data. +// +// - Metadata part contains a name, description, unit. +// +// - Data is one of the possible types (Sum, Gauge, Histogram, Summary). +// +// - DataPoint contains timestamps, attributes, and one of the possible value type +// fields. +// +// Metric +// +------------+ +// |name | +// |description | +// |unit | +------------------------------------+ +// |data |---> |Gauge, Sum, Histogram, Summary, ... | +// +------------+ +------------------------------------+ +// +// Data [One of Gauge, Sum, Histogram, Summary, ...] +// +-----------+ +// |... | // Metadata about the Data. +// |points |--+ +// +-----------+ | +// | +---------------------------+ +// | |DataPoint 1 | +// v |+------+------+ +------+ | +// +-----+ ||label |label |...|label | | +// | 1 |-->||value1|value2|...|valueN| | +// +-----+ |+------+------+ +------+ | +// | . | |+-----+ | +// | . | ||value| | +// | . | |+-----+ | +// | . | +---------------------------+ +// | . | . +// | . | . +// | . | . +// | . | +---------------------------+ +// | . | |DataPoint M | +// +-----+ |+------+------+ +------+ | +// | M |-->||label |label |...|label | | +// +-----+ ||value1|value2|...|valueN| | +// |+------+------+ +------+ | +// |+-----+ | +// ||value| | +// |+-----+ | +// +---------------------------+ +// +// Each distinct type of DataPoint represents the output of a specific +// aggregation function, the result of applying the DataPoint's +// associated function of to one or more measurements. +// +// All DataPoint types have three common fields: +// - Attributes includes key-value pairs associated with the data point +// - TimeUnixNano is required, set to the end time of the aggregation +// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints +// having an AggregationTemporality field, as discussed below. +// +// Both TimeUnixNano and StartTimeUnixNano values are expressed as +// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. +// +// # TimeUnixNano +// +// This field is required, having consistent interpretation across +// DataPoint types. TimeUnixNano is the moment corresponding to when +// the data point's aggregate value was captured. +// +// Data points with the 0 value for TimeUnixNano SHOULD be rejected +// by consumers. +// +// # StartTimeUnixNano +// +// StartTimeUnixNano in general allows detecting when a sequence of +// observations is unbroken. This field indicates to consumers the +// start time for points with cumulative and delta +// AggregationTemporality, and it should be included whenever possible +// to support correct rate calculation. Although it may be omitted +// when the start time is truly unknown, setting StartTimeUnixNano is +// strongly encouraged. +type Metric struct { + unknownFields []byte + + // name of the metric, including its DNS name prefix. It must be unique. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` + // Data determines the aggregation type (if any) of the metric, what is the + // reported value type for the data points, as well as the relatationship to + // the time interval over which they are reported. + // + // Types that are assignable to Data: + // + // *Metric_Gauge + // *Metric_Sum + // *Metric_Histogram + // *Metric_ExponentialHistogram + // *Metric_Summary + Data isMetric_Data `protobuf_oneof:"data"` +} + +type isMetric_Data interface { + isMetric_Data() +} + +type Metric_Gauge struct { + Gauge *Gauge `protobuf:"bytes,5,opt,name=gauge,proto3,oneof"` +} + +type Metric_Sum struct { + Sum *Sum `protobuf:"bytes,7,opt,name=sum,proto3,oneof"` +} + +type Metric_Histogram struct { + Histogram *Histogram `protobuf:"bytes,9,opt,name=histogram,proto3,oneof"` +} + +type Metric_ExponentialHistogram struct { + ExponentialHistogram *ExponentialHistogram `protobuf:"bytes,10,opt,name=exponential_histogram,json=exponentialHistogram,proto3,oneof"` +} + +type Metric_Summary struct { + Summary *Summary `protobuf:"bytes,11,opt,name=summary,proto3,oneof"` +} + +func (*Metric_Gauge) isMetric_Data() {} + +func (*Metric_Sum) isMetric_Data() {} + +func (*Metric_Histogram) isMetric_Data() {} + +func (*Metric_ExponentialHistogram) isMetric_Data() {} + +func (*Metric_Summary) isMetric_Data() {} + +// Gauge represents the type of a scalar metric that always exports the +// "current value" for every data point. It should be used for an "unknown" +// aggregation. +// +// A Gauge does not support different aggregation temporalities. Given the +// aggregation is unknown, points cannot be combined using the same +// aggregation, regardless of aggregation temporalities. Therefore, +// AggregationTemporality is not included. Consequently, this also means +// "StartTimeUnixNano" is ignored for all data points. +type Gauge struct { + unknownFields []byte + + DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` +} + +// Sum represents the type of a scalar metric that is calculated as a sum of all +// reported measurements over a time interval. +type Sum struct { + unknownFields []byte + + DataPoints []*NumberDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"` + // If "true" means that the sum is monotonic. + IsMonotonic bool `protobuf:"varint,3,opt,name=is_monotonic,json=isMonotonic,proto3" json:"is_monotonic,omitempty"` +} + +// Histogram represents the type of a metric that is calculated by aggregating +// as a Histogram of all reported measurements over a time interval. +type Histogram struct { + unknownFields []byte + + DataPoints []*HistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"` +} + +// ExponentialHistogram represents the type of a metric that is calculated by aggregating +// as a ExponentialHistogram of all reported double measurements over a time interval. +type ExponentialHistogram struct { + unknownFields []byte + + DataPoints []*ExponentialHistogramDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality AggregationTemporality `protobuf:"varint,2,opt,name=aggregation_temporality,json=aggregationTemporality,proto3,enum=opentelemetry.AggregationTemporality" json:"aggregation_temporality,omitempty"` +} + +// Summary metric data are used to convey quantile summaries, +// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) +// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) +// data type. These data points cannot always be merged in a meaningful way. +// While they can be useful in some applications, histogram data points are +// recommended for new applications. +type Summary struct { + unknownFields []byte + + DataPoints []*SummaryDataPoint `protobuf:"bytes,1,rep,name=data_points,json=dataPoints,proto3" json:"data_points,omitempty"` +} + +// NumberDataPoint is a single data point in a timeseries that describes the +// time-varying scalar value of a metric. +type NumberDataPoint struct { + unknownFields []byte + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"` + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // The value itself. A point is considered invalid when one of the recognized + // value fields is not present inside this oneof. + // + // Types that are assignable to Value: + // + // *NumberDataPoint_AsDouble + // *NumberDataPoint_AsInt + Value isNumberDataPoint_Value `protobuf_oneof:"value"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*Exemplar `protobuf:"bytes,5,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"` +} + +type isNumberDataPoint_Value interface { + isNumberDataPoint_Value() +} + +type NumberDataPoint_AsDouble struct { + AsDouble float64 `protobuf:"fixed64,4,opt,name=as_double,json=asDouble,proto3,oneof"` +} + +type NumberDataPoint_AsInt struct { + AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"` +} + +func (*NumberDataPoint_AsDouble) isNumberDataPoint_Value() {} + +func (*NumberDataPoint_AsInt) isNumberDataPoint_Value() {} + +// HistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Histogram. A Histogram contains summary statistics +// for a population of values, it may optionally contain the distribution of +// those values across a set of buckets. +// +// If the histogram contains the distribution of values, then both +// "explicit_bounds" and "bucket counts" fields must be defined. +// If the histogram does not contain the distribution of values, then both +// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and +// "sum" are known. +type HistogramDataPoint struct { + unknownFields []byte + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be non-negative. This + // value must be equal to the sum of the "count" fields in buckets if a + // histogram is provided. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"` + // bucket_counts is an optional field contains the count values of histogram + // for each bucket. + // + // The sum of the bucket_counts must equal the value in the count field. + // + // The number of elements in bucket_counts array must be by one greater than + // the number of elements in explicit_bounds array. + BucketCounts []uint64 `protobuf:"fixed64,6,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` + // explicit_bounds specifies buckets with explicitly defined bounds for values. + // + // The boundaries for bucket at index i are: + // + // (-infinity, explicit_bounds[i]] for i == 0 + // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) + // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) + // + // The values in the explicit_bounds array must be strictly increasing. + // + // Histogram buckets are inclusive of their upper boundary, except the last + // bucket where the boundary is at infinity. This format is intentionally + // compatible with the OpenMetrics histogram definition. + ExplicitBounds []float64 `protobuf:"fixed64,7,rep,packed,name=explicit_bounds,json=explicitBounds,proto3" json:"explicit_bounds,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*Exemplar `protobuf:"bytes,8,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"` + // min is the minimum value over (start_time, end_time]. + Min *float64 `protobuf:"fixed64,11,opt,name=min,proto3,oneof" json:"min,omitempty"` + // max is the maximum value over (start_time, end_time]. + Max *float64 `protobuf:"fixed64,12,opt,name=max,proto3,oneof" json:"max,omitempty"` +} + +// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains +// summary statistics for a population of values, it may optionally contain the +// distribution of those values across a set of buckets. +type ExponentialHistogramDataPoint struct { + unknownFields []byte + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be + // non-negative. This value must be equal to the sum of the "bucket_counts" + // values in the positive and negative Buckets plus the "zero_count" field. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"` + // scale describes the resolution of the histogram. Boundaries are + // located at powers of the base, where: + // + // base = (2^(2^-scale)) + // + // The histogram bucket identified by `index`, a signed integer, + // contains values that are greater than (base^index) and + // less than or equal to (base^(index+1)). + // + // The positive and negative ranges of the histogram are expressed + // separately. Negative values are mapped by their absolute value + // into the negative range using the same scale as the positive range. + // + // scale is not restricted by the protocol, as the permissible + // values depend on the range of the data. + Scale int32 `protobuf:"zigzag32,6,opt,name=scale,proto3" json:"scale,omitempty"` + // zero_count is the count of values that are either exactly zero or + // within the region considered zero by the instrumentation at the + // tolerated degree of precision. This bucket stores values that + // cannot be expressed using the standard exponential formula as + // well as values that have been rounded to zero. + // + // Implementations MAY consider the zero bucket to have probability + // mass equal to (zero_count / count). + ZeroCount uint64 `protobuf:"fixed64,7,opt,name=zero_count,json=zeroCount,proto3" json:"zero_count,omitempty"` + // positive carries the positive range of exponential bucket counts. + Positive *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,8,opt,name=positive,proto3" json:"positive,omitempty"` + // negative carries the negative range of exponential bucket counts. + Negative *ExponentialHistogramDataPoint_Buckets `protobuf:"bytes,9,opt,name=negative,proto3" json:"negative,omitempty"` + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + Flags uint32 `protobuf:"varint,10,opt,name=flags,proto3" json:"flags,omitempty"` + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + Exemplars []*Exemplar `protobuf:"bytes,11,rep,name=exemplars,proto3" json:"exemplars,omitempty"` + // min is the minimum value over (start_time, end_time]. + Min *float64 `protobuf:"fixed64,12,opt,name=min,proto3,oneof" json:"min,omitempty"` + // max is the maximum value over (start_time, end_time]. + Max *float64 `protobuf:"fixed64,13,opt,name=max,proto3,oneof" json:"max,omitempty"` +} + +// SummaryDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Summary metric. +type SummaryDataPoint struct { + unknownFields []byte + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*KeyValue `protobuf:"bytes,7,rep,name=attributes,proto3" json:"attributes,omitempty"` + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + StartTimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,3,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // count is the number of values in the population. Must be non-negative. + Count uint64 `protobuf:"fixed64,4,opt,name=count,proto3" json:"count,omitempty"` + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary + Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"` + // (Optional) list of values at different quantiles of the distribution calculated + // from the current snapshot. The quantiles must be strictly increasing. + QuantileValues []*SummaryDataPoint_ValueAtQuantile `protobuf:"bytes,6,rep,name=quantile_values,json=quantileValues,proto3" json:"quantile_values,omitempty"` + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + Flags uint32 `protobuf:"varint,8,opt,name=flags,proto3" json:"flags,omitempty"` +} + +// A representation of an exemplar, which is a sample input measurement. +// Exemplars also hold information about the environment when the measurement +// was recorded, for example the span and trace ID of the active span when the +// exemplar was recorded. +type Exemplar struct { + unknownFields []byte + + // The set of key/value pairs that were filtered out by the aggregator, but + // recorded alongside the original measurement. Only key/value pairs that were + // filtered out by the aggregator should be included + FilteredAttributes []*KeyValue `protobuf:"bytes,7,rep,name=filtered_attributes,json=filteredAttributes,proto3" json:"filtered_attributes,omitempty"` + // time_unix_nano is the exact time when this exemplar was recorded + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + TimeUnixNano uint64 `protobuf:"fixed64,2,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // The value of the measurement that was recorded. An exemplar is + // considered invalid when one of the recognized value fields is not present + // inside this oneof. + // + // Types that are assignable to Value: + // + // *Exemplar_AsDouble + // *Exemplar_AsInt + Value isExemplar_Value `protobuf_oneof:"value"` + // (Optional) Span ID of the exemplar trace. + // span_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + SpanId []byte `protobuf:"bytes,4,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // (Optional) Trace ID of the exemplar trace. + // trace_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + TraceId []byte `protobuf:"bytes,5,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` +} + +type isExemplar_Value interface { + isExemplar_Value() +} + +type Exemplar_AsDouble struct { + AsDouble float64 `protobuf:"fixed64,3,opt,name=as_double,json=asDouble,proto3,oneof"` +} + +type Exemplar_AsInt struct { + AsInt int64 `protobuf:"fixed64,6,opt,name=as_int,json=asInt,proto3,oneof"` +} + +func (*Exemplar_AsDouble) isExemplar_Value() {} + +func (*Exemplar_AsInt) isExemplar_Value() {} + +// Buckets are a set of bucket counts, encoded in a contiguous array +// of counts. +type ExponentialHistogramDataPoint_Buckets struct { + unknownFields []byte + + // Offset is the bucket index of the first entry in the bucket_counts array. + // + // Note: This uses a varint encoding as a simple form of compression. + Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"` + // Count is an array of counts, where count[i] carries the count + // of the bucket at index (offset+i). count[i] is the count of + // values greater than base^(offset+i) and less or equal to than + // base^(offset+i+1). + // + // Note: By contrast, the explicit HistogramDataPoint uses + // fixed64. This field is expected to have many buckets, + // especially zeros, so uint64 has been selected to ensure + // varint encoding. + BucketCounts []uint64 `protobuf:"varint,2,rep,packed,name=bucket_counts,json=bucketCounts,proto3" json:"bucket_counts,omitempty"` +} + +// Represents the value at a given quantile of a distribution. +// +// To record Min and Max values following conventions are used: +// - The 1.0 quantile is equivalent to the maximum value observed. +// - The 0.0 quantile is equivalent to the minimum value observed. +// +// See the following issue for more context: +// https://github.com/open-telemetry/opentelemetry-proto/issues/125 +type SummaryDataPoint_ValueAtQuantile struct { + unknownFields []byte + + // The quantile of a distribution. Must be in the interval + // [0.0, 1.0]. + Quantile float64 `protobuf:"fixed64,1,opt,name=quantile,proto3" json:"quantile,omitempty"` + // The value at the given quantile of a distribution. + // + // Quantile values must NOT be negative. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` +} diff --git a/lib/protoparser/opentelemetry/pb/metrics_service.pb.go b/lib/protoparser/opentelemetry/pb/metrics_service.pb.go new file mode 100644 index 000000000..b7ef375fe --- /dev/null +++ b/lib/protoparser/opentelemetry/pb/metrics_service.pb.go @@ -0,0 +1,32 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: lib/protoparser/opentelemetry/proto/metrics_service.proto + +package pb + +type ExportMetricsServiceRequest struct { + unknownFields []byte + + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceMetrics []*ResourceMetrics `protobuf:"bytes,1,rep,name=resource_metrics,json=resourceMetrics,proto3" json:"resource_metrics,omitempty"` +} diff --git a/lib/protoparser/opentelemetry/pb/metrics_service_vtproto.pb.go b/lib/protoparser/opentelemetry/pb/metrics_service_vtproto.pb.go new file mode 100644 index 000000000..32aac2867 --- /dev/null +++ b/lib/protoparser/opentelemetry/pb/metrics_service_vtproto.pb.go @@ -0,0 +1,157 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: lib/protoparser/opentelemetry/proto/metrics_service.proto + +package pb + +import ( + fmt "fmt" + io "io" +) + +func (m *ExportMetricsServiceRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportMetricsServiceRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ExportMetricsServiceRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResourceMetrics) > 0 { + for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ExportMetricsServiceRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceMetrics) > 0 { + for _, e := range m.ResourceMetrics { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ExportMetricsServiceRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportMetricsServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportMetricsServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceMetrics = append(m.ResourceMetrics, &ResourceMetrics{}) + if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/lib/protoparser/opentelemetry/pb/metrics_vtproto.pb.go b/lib/protoparser/opentelemetry/pb/metrics_vtproto.pb.go new file mode 100644 index 000000000..f89703fcf --- /dev/null +++ b/lib/protoparser/opentelemetry/pb/metrics_vtproto.pb.go @@ -0,0 +1,4331 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: lib/protoparser/opentelemetry/proto/metrics.proto + +package pb + +import ( + binary "encoding/binary" + fmt "fmt" + io "io" + math "math" +) + +func (m *MetricsData) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricsData) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *MetricsData) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ResourceMetrics) > 0 { + for iNdEx := len(m.ResourceMetrics) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ResourceMetrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceMetrics) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetrics) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ResourceMetrics) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarint(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0x1a + } + if len(m.ScopeMetrics) > 0 { + for iNdEx := len(m.ScopeMetrics) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.ScopeMetrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + if m.Resource != nil { + size, err := m.Resource.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScopeMetrics) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopeMetrics) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ScopeMetrics) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.SchemaUrl) > 0 { + i -= len(m.SchemaUrl) + copy(dAtA[i:], m.SchemaUrl) + i = encodeVarint(dAtA, i, uint64(len(m.SchemaUrl))) + i-- + dAtA[i] = 0x1a + } + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Metrics[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *Metric) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Metric) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Metric) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if vtmsg, ok := m.Data.(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.Unit) > 0 { + i -= len(m.Unit) + copy(dAtA[i:], m.Unit) + i = encodeVarint(dAtA, i, uint64(len(m.Unit))) + i-- + dAtA[i] = 0x1a + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarint(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Metric_Gauge) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Metric_Gauge) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Gauge != nil { + size, err := m.Gauge.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Metric_Sum) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Metric_Sum) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Sum != nil { + size, err := m.Sum.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Metric_Histogram) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Metric_Histogram) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Histogram != nil { + size, err := m.Histogram.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + return len(dAtA) - i, nil +} +func (m *Metric_ExponentialHistogram) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Metric_ExponentialHistogram) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ExponentialHistogram != nil { + size, err := m.ExponentialHistogram.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x52 + } + return len(dAtA) - i, nil +} +func (m *Metric_Summary) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Metric_Summary) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Summary != nil { + size, err := m.Summary.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + return len(dAtA) - i, nil +} +func (m *Gauge) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Gauge) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Gauge) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DataPoints[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Sum) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sum) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Sum) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IsMonotonic { + i-- + if m.IsMonotonic { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.AggregationTemporality != 0 { + i = encodeVarint(dAtA, i, uint64(m.AggregationTemporality)) + i-- + dAtA[i] = 0x10 + } + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DataPoints[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Histogram) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Histogram) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Histogram) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AggregationTemporality != 0 { + i = encodeVarint(dAtA, i, uint64(m.AggregationTemporality)) + i-- + dAtA[i] = 0x10 + } + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DataPoints[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ExponentialHistogram) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExponentialHistogram) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ExponentialHistogram) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.AggregationTemporality != 0 { + i = encodeVarint(dAtA, i, uint64(m.AggregationTemporality)) + i-- + dAtA[i] = 0x10 + } + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DataPoints[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Summary) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Summary) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Summary) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.DataPoints) > 0 { + for iNdEx := len(m.DataPoints) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.DataPoints[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NumberDataPoint) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NumberDataPoint) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *NumberDataPoint) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if vtmsg, ok := m.Value.(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if m.Flags != 0 { + i = encodeVarint(dAtA, i, uint64(m.Flags)) + i-- + dAtA[i] = 0x40 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Attributes[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Exemplars[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x2a + } + } + if m.TimeUnixNano != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x19 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + return len(dAtA) - i, nil +} + +func (m *NumberDataPoint_AsDouble) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *NumberDataPoint_AsDouble) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AsDouble)))) + i-- + dAtA[i] = 0x21 + return len(dAtA) - i, nil +} +func (m *NumberDataPoint_AsInt) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *NumberDataPoint_AsInt) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.AsInt)) + i-- + dAtA[i] = 0x31 + return len(dAtA) - i, nil +} +func (m *HistogramDataPoint) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HistogramDataPoint) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *HistogramDataPoint) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Max != nil { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Max)))) + i-- + dAtA[i] = 0x61 + } + if m.Min != nil { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Min)))) + i-- + dAtA[i] = 0x59 + } + if m.Flags != 0 { + i = encodeVarint(dAtA, i, uint64(m.Flags)) + i-- + dAtA[i] = 0x50 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Attributes[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Exemplars[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + } + if len(m.ExplicitBounds) > 0 { + for iNdEx := len(m.ExplicitBounds) - 1; iNdEx >= 0; iNdEx-- { + f1 := math.Float64bits(float64(m.ExplicitBounds[iNdEx])) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1)) + } + i = encodeVarint(dAtA, i, uint64(len(m.ExplicitBounds)*8)) + i-- + dAtA[i] = 0x3a + } + if len(m.BucketCounts) > 0 { + for iNdEx := len(m.BucketCounts) - 1; iNdEx >= 0; iNdEx-- { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.BucketCounts[iNdEx])) + } + i = encodeVarint(dAtA, i, uint64(len(m.BucketCounts)*8)) + i-- + dAtA[i] = 0x32 + } + if m.Sum != nil { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Sum)))) + i-- + dAtA[i] = 0x29 + } + if m.Count != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) + i-- + dAtA[i] = 0x21 + } + if m.TimeUnixNano != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x19 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + return len(dAtA) - i, nil +} + +func (m *ExponentialHistogramDataPoint_Buckets) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExponentialHistogramDataPoint_Buckets) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ExponentialHistogramDataPoint_Buckets) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.BucketCounts) > 0 { + var pksize2 int + for _, num := range m.BucketCounts { + pksize2 += sov(uint64(num)) + } + i -= pksize2 + j1 := i + for _, num := range m.BucketCounts { + for num >= 1<<7 { + dAtA[j1] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j1++ + } + dAtA[j1] = uint8(num) + j1++ + } + i = encodeVarint(dAtA, i, uint64(pksize2)) + i-- + dAtA[i] = 0x12 + } + if m.Offset != 0 { + i = encodeVarint(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31)))) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ExponentialHistogramDataPoint) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExponentialHistogramDataPoint) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ExponentialHistogramDataPoint) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Max != nil { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Max)))) + i-- + dAtA[i] = 0x69 + } + if m.Min != nil { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Min)))) + i-- + dAtA[i] = 0x61 + } + if len(m.Exemplars) > 0 { + for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Exemplars[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x5a + } + } + if m.Flags != 0 { + i = encodeVarint(dAtA, i, uint64(m.Flags)) + i-- + dAtA[i] = 0x50 + } + if m.Negative != nil { + size, err := m.Negative.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x4a + } + if m.Positive != nil { + size, err := m.Positive.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x42 + } + if m.ZeroCount != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.ZeroCount)) + i-- + dAtA[i] = 0x39 + } + if m.Scale != 0 { + i = encodeVarint(dAtA, i, uint64((uint32(m.Scale)<<1)^uint32((m.Scale>>31)))) + i-- + dAtA[i] = 0x30 + } + if m.Sum != nil { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.Sum)))) + i-- + dAtA[i] = 0x29 + } + if m.Count != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) + i-- + dAtA[i] = 0x21 + } + if m.TimeUnixNano != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x19 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Attributes[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SummaryDataPoint_ValueAtQuantile) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SummaryDataPoint_ValueAtQuantile) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SummaryDataPoint_ValueAtQuantile) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if m.Quantile != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Quantile)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *SummaryDataPoint) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SummaryDataPoint) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SummaryDataPoint) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Flags != 0 { + i = encodeVarint(dAtA, i, uint64(m.Flags)) + i-- + dAtA[i] = 0x40 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Attributes[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.QuantileValues) > 0 { + for iNdEx := len(m.QuantileValues) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.QuantileValues[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if m.Sum != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum)))) + i-- + dAtA[i] = 0x29 + } + if m.Count != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.Count)) + i-- + dAtA[i] = 0x21 + } + if m.TimeUnixNano != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x19 + } + if m.StartTimeUnixNano != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + return len(dAtA) - i, nil +} + +func (m *Exemplar) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Exemplar) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Exemplar) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if vtmsg, ok := m.Value.(interface { + MarshalToSizedBufferVT([]byte) (int, error) + }); ok { + size, err := vtmsg.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + } + if len(m.FilteredAttributes) > 0 { + for iNdEx := len(m.FilteredAttributes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.FilteredAttributes[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.TraceId) > 0 { + i -= len(m.TraceId) + copy(dAtA[i:], m.TraceId) + i = encodeVarint(dAtA, i, uint64(len(m.TraceId))) + i-- + dAtA[i] = 0x2a + } + if len(m.SpanId) > 0 { + i -= len(m.SpanId) + copy(dAtA[i:], m.SpanId) + i = encodeVarint(dAtA, i, uint64(len(m.SpanId))) + i-- + dAtA[i] = 0x22 + } + if m.TimeUnixNano != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x11 + } + return len(dAtA) - i, nil +} + +func (m *Exemplar_AsDouble) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Exemplar_AsDouble) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.AsDouble)))) + i-- + dAtA[i] = 0x19 + return len(dAtA) - i, nil +} +func (m *Exemplar_AsInt) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Exemplar_AsInt) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.AsInt)) + i-- + dAtA[i] = 0x31 + return len(dAtA) - i, nil +} +func (m *MetricsData) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceMetrics) > 0 { + for _, e := range m.ResourceMetrics { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ResourceMetrics) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Resource != nil { + l = m.Resource.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if len(m.ScopeMetrics) > 0 { + for _, e := range m.ScopeMetrics { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScopeMetrics) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + l = len(m.SchemaUrl) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *Metric) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Unit) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if vtmsg, ok := m.Data.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + n += len(m.unknownFields) + return n +} + +func (m *Metric_Gauge) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Gauge != nil { + l = m.Gauge.SizeVT() + n += 1 + l + sov(uint64(l)) + } + return n +} +func (m *Metric_Sum) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Sum != nil { + l = m.Sum.SizeVT() + n += 1 + l + sov(uint64(l)) + } + return n +} +func (m *Metric_Histogram) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Histogram != nil { + l = m.Histogram.SizeVT() + n += 1 + l + sov(uint64(l)) + } + return n +} +func (m *Metric_ExponentialHistogram) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ExponentialHistogram != nil { + l = m.ExponentialHistogram.SizeVT() + n += 1 + l + sov(uint64(l)) + } + return n +} +func (m *Metric_Summary) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Summary != nil { + l = m.Summary.SizeVT() + n += 1 + l + sov(uint64(l)) + } + return n +} +func (m *Gauge) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Sum) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.AggregationTemporality != 0 { + n += 1 + sov(uint64(m.AggregationTemporality)) + } + if m.IsMonotonic { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *Histogram) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.AggregationTemporality != 0 { + n += 1 + sov(uint64(m.AggregationTemporality)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExponentialHistogram) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.AggregationTemporality != 0 { + n += 1 + sov(uint64(m.AggregationTemporality)) + } + n += len(m.unknownFields) + return n +} + +func (m *Summary) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DataPoints) > 0 { + for _, e := range m.DataPoints { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *NumberDataPoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.TimeUnixNano != 0 { + n += 9 + } + if vtmsg, ok := m.Value.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Flags != 0 { + n += 1 + sov(uint64(m.Flags)) + } + n += len(m.unknownFields) + return n +} + +func (m *NumberDataPoint_AsDouble) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *NumberDataPoint_AsInt) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *HistogramDataPoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Count != 0 { + n += 9 + } + if m.Sum != nil { + n += 9 + } + if len(m.BucketCounts) > 0 { + n += 1 + sov(uint64(len(m.BucketCounts)*8)) + len(m.BucketCounts)*8 + } + if len(m.ExplicitBounds) > 0 { + n += 1 + sov(uint64(len(m.ExplicitBounds)*8)) + len(m.ExplicitBounds)*8 + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Flags != 0 { + n += 1 + sov(uint64(m.Flags)) + } + if m.Min != nil { + n += 9 + } + if m.Max != nil { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *ExponentialHistogramDataPoint_Buckets) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Offset != 0 { + n += 1 + soz(uint64(m.Offset)) + } + if len(m.BucketCounts) > 0 { + l = 0 + for _, e := range m.BucketCounts { + l += sov(uint64(e)) + } + n += 1 + sov(uint64(l)) + l + } + n += len(m.unknownFields) + return n +} + +func (m *ExponentialHistogramDataPoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Count != 0 { + n += 9 + } + if m.Sum != nil { + n += 9 + } + if m.Scale != 0 { + n += 1 + soz(uint64(m.Scale)) + } + if m.ZeroCount != 0 { + n += 9 + } + if m.Positive != nil { + l = m.Positive.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Negative != nil { + l = m.Negative.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Flags != 0 { + n += 1 + sov(uint64(m.Flags)) + } + if len(m.Exemplars) > 0 { + for _, e := range m.Exemplars { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Min != nil { + n += 9 + } + if m.Max != nil { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *SummaryDataPoint_ValueAtQuantile) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Quantile != 0 { + n += 9 + } + if m.Value != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *SummaryDataPoint) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StartTimeUnixNano != 0 { + n += 9 + } + if m.TimeUnixNano != 0 { + n += 9 + } + if m.Count != 0 { + n += 9 + } + if m.Sum != 0 { + n += 9 + } + if len(m.QuantileValues) > 0 { + for _, e := range m.QuantileValues { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Flags != 0 { + n += 1 + sov(uint64(m.Flags)) + } + n += len(m.unknownFields) + return n +} + +func (m *Exemplar) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeUnixNano != 0 { + n += 9 + } + if vtmsg, ok := m.Value.(interface{ SizeVT() int }); ok { + n += vtmsg.SizeVT() + } + l = len(m.SpanId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.TraceId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.FilteredAttributes) > 0 { + for _, e := range m.FilteredAttributes { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *Exemplar_AsDouble) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Exemplar_AsInt) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *MetricsData) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricsData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricsData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceMetrics = append(m.ResourceMetrics, &ResourceMetrics{}) + if err := m.ResourceMetrics[len(m.ResourceMetrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetrics) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &Resource{} + } + if err := m.Resource.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScopeMetrics = append(m.ScopeMetrics, &ScopeMetrics{}) + if err := m.ScopeMetrics[len(m.ScopeMetrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScopeMetrics) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopeMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopeMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, &Metric{}) + if err := m.Metrics[len(m.Metrics)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchemaUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Metric) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Metric: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Unit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Unit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gauge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Data.(*Metric_Gauge); ok { + if err := oneof.Gauge.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &Gauge{} + if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_Gauge{Gauge: v} + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Data.(*Metric_Sum); ok { + if err := oneof.Sum.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &Sum{} + if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_Sum{Sum: v} + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Histogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Data.(*Metric_Histogram); ok { + if err := oneof.Histogram.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &Histogram{} + if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_Histogram{Histogram: v} + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExponentialHistogram", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Data.(*Metric_ExponentialHistogram); ok { + if err := oneof.ExponentialHistogram.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &ExponentialHistogram{} + if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_ExponentialHistogram{ExponentialHistogram: v} + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Summary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if oneof, ok := m.Data.(*Metric_Summary); ok { + if err := oneof.Summary.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + } else { + v := &Summary{} + if err := v.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &Metric_Summary{Summary: v} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Gauge) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Gauge: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Gauge: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &NumberDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sum) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sum: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sum: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &NumberDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + m.AggregationTemporality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsMonotonic", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsMonotonic = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Histogram) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Histogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Histogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &HistogramDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + m.AggregationTemporality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExponentialHistogram) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExponentialHistogram: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExponentialHistogram: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &ExponentialHistogramDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationTemporality", wireType) + } + m.AggregationTemporality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AggregationTemporality |= AggregationTemporality(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Summary) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Summary: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Summary: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataPoints = append(m.DataPoints, &SummaryDataPoint{}) + if err := m.DataPoints[len(m.DataPoints)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NumberDataPoint) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NumberDataPoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NumberDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = &NumberDataPoint_AsDouble{AsDouble: float64(math.Float64frombits(v))} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, &Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType) + } + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = int64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = &NumberDataPoint_AsInt{AsInt: v} + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, &KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Flags |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HistogramDataPoint) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HistogramDataPoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Count = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Sum = &v2 + case 6: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.BucketCounts = append(m.BucketCounts, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.BucketCounts) == 0 { + m.BucketCounts = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.BucketCounts = append(m.BucketCounts, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType) + } + case 7: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.ExplicitBounds = append(m.ExplicitBounds, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + elementCount = packedLen / 8 + if elementCount != 0 && len(m.ExplicitBounds) == 0 { + m.ExplicitBounds = make([]float64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.ExplicitBounds = append(m.ExplicitBounds, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field ExplicitBounds", wireType) + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, &Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, &KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Flags |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Min = &v2 + case 12: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Max = &v2 + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExponentialHistogramDataPoint_Buckets) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExponentialHistogramDataPoint_Buckets: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExponentialHistogramDataPoint_Buckets: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Offset = v + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BucketCounts = append(m.BucketCounts, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.BucketCounts) == 0 { + m.BucketCounts = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BucketCounts = append(m.BucketCounts, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field BucketCounts", wireType) + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExponentialHistogramDataPoint) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExponentialHistogramDataPoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExponentialHistogramDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, &KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Count = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Sum = &v2 + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + v = int32((uint32(v) >> 1) ^ uint32(((v&1)<<31)>>31)) + m.Scale = v + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ZeroCount", wireType) + } + m.ZeroCount = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.ZeroCount = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Positive", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Positive == nil { + m.Positive = &ExponentialHistogramDataPoint_Buckets{} + } + if err := m.Positive.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Negative", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Negative == nil { + m.Negative = &ExponentialHistogramDataPoint_Buckets{} + } + if err := m.Negative.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Flags |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Exemplars = append(m.Exemplars, &Exemplar{}) + if err := m.Exemplars[len(m.Exemplars)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Min = &v2 + case 13: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + v2 := float64(math.Float64frombits(v)) + m.Max = &v2 + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SummaryDataPoint_ValueAtQuantile) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SummaryDataPoint_ValueAtQuantile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SummaryDataPoint_ValueAtQuantile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantile", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Quantile = float64(math.Float64frombits(v)) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SummaryDataPoint) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SummaryDataPoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SummaryDataPoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) + } + m.StartTimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.StartTimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 4: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.Count = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Sum", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Sum = float64(math.Float64frombits(v)) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QuantileValues", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QuantileValues = append(m.QuantileValues, &SummaryDataPoint_ValueAtQuantile{}) + if err := m.QuantileValues[len(m.QuantileValues)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, &KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Flags |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Exemplar) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Exemplar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Exemplar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 3: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AsDouble", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = &Exemplar_AsDouble{AsDouble: float64(math.Float64frombits(v))} + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpanId = append(m.SpanId[:0], dAtA[iNdEx:postIndex]...) + if m.SpanId == nil { + m.SpanId = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TraceId = append(m.TraceId[:0], dAtA[iNdEx:postIndex]...) + if m.TraceId == nil { + m.TraceId = []byte{} + } + iNdEx = postIndex + case 6: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field AsInt", wireType) + } + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = int64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = &Exemplar_AsInt{AsInt: v} + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FilteredAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FilteredAttributes = append(m.FilteredAttributes, &KeyValue{}) + if err := m.FilteredAttributes[len(m.FilteredAttributes)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/lib/protoparser/opentelemetry/pb/resource.pb.go b/lib/protoparser/opentelemetry/pb/resource.pb.go new file mode 100644 index 000000000..a89c817b2 --- /dev/null +++ b/lib/protoparser/opentelemetry/pb/resource.pb.go @@ -0,0 +1,48 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.12 +// source: lib/protoparser/opentelemetry/proto/resource.proto + +package pb + +// Resource information. +type Resource struct { + unknownFields []byte + + // Set of attributes that describe the resource. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attributes []*KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (x *Resource) GetAttributes() []*KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Resource) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} diff --git a/lib/protoparser/opentelemetry/pb/resource_vtproto.pb.go b/lib/protoparser/opentelemetry/pb/resource_vtproto.pb.go new file mode 100644 index 000000000..27eb573e0 --- /dev/null +++ b/lib/protoparser/opentelemetry/pb/resource_vtproto.pb.go @@ -0,0 +1,184 @@ +// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. +// protoc-gen-go-vtproto version: v0.4.0 +// source: lib/protoparser/opentelemetry/proto/resource.proto + +package pb + +import ( + fmt "fmt" + io "io" +) + +func (m *Resource) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Resource) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DroppedAttributesCount != 0 { + i = encodeVarint(dAtA, i, uint64(m.DroppedAttributesCount)) + i-- + dAtA[i] = 0x10 + } + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Attributes[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Resource) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.DroppedAttributesCount != 0 { + n += 1 + sov(uint64(m.DroppedAttributesCount)) + } + n += len(m.unknownFields) + return n +} + +func (m *Resource) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Resource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, &KeyValue{}) + if err := m.Attributes[len(m.Attributes)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) + } + m.DroppedAttributesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DroppedAttributesCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} diff --git a/lib/protoparser/opentelemetry/proto/README.md b/lib/protoparser/opentelemetry/proto/README.md new file mode 100644 index 000000000..13cba6806 --- /dev/null +++ b/lib/protoparser/opentelemetry/proto/README.md @@ -0,0 +1,32 @@ +# Opentelemetry proto files + +Content copied from https://github.com/open-telemetry/opentelemetry-proto/tree/main/opentelemetry/proto + +## Requirements +- protoc binary [link](http://google.github.io/proto-lens/installing-protoc.html) +- golang-proto-gen[link](https://developers.google.com/protocol-buffers/docs/reference/go-generated) +- custom marshaller [link](https://github.com/planetscale/vtprotobuf) + +## Modifications + + Original proto files were modified: +1) changed package name for `package opentelemetry`. +2) changed import paths - changed directory names. +3) changed go_package for `opentelemetry/pb`. + + +## How to generate pbs + + run command: + ```bash +export GOBIN=~/go/bin protoc +protoc -I=. --go_out=./lib/protoparser/opentelemetry --go-vtproto_out=./lib/protoparser/opentelemetry --plugin protoc-gen-go-vtproto="$GOBIN/protoc-gen-go-vtproto" --go-vtproto_opt=features=marshal+unmarshal+size lib/protoparser/opentelemetry/proto/*.proto + ``` + +Generated code will be at `lib/protoparser/opentelemetry/opentelemetry/` + + manually edit it: + +1) remove all external imports +2) remove all unneeded methods +3) replace `unknownFields` with `unknownFields []byte` \ No newline at end of file diff --git a/lib/protoparser/opentelemetry/proto/common.proto b/lib/protoparser/opentelemetry/proto/common.proto new file mode 100644 index 000000000..751778622 --- /dev/null +++ b/lib/protoparser/opentelemetry/proto/common.proto @@ -0,0 +1,67 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry; + +option csharp_namespace = "OpenTelemetry.Proto.Common.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.common.v1"; +option java_outer_classname = "CommonProto"; +option go_package = "opentelemetry/pb"; + +// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +message AnyValue { + // The value is one of the listed fields. It is valid for all values to be unspecified + // in which case this AnyValue is considered to be "empty". + oneof value { + string string_value = 1; + bool bool_value = 2; + int64 int_value = 3; + double double_value = 4; + ArrayValue array_value = 5; + KeyValueList kvlist_value = 6; + bytes bytes_value = 7; + } +} + +// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +// since oneof in AnyValue does not allow repeated fields. +message ArrayValue { + // Array of values. The array may be empty (contain 0 elements). + repeated AnyValue values = 1; +} + +// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +// are semantically equivalent. +message KeyValueList { + // A collection of key/value pairs of key-value pairs. The list may be empty (may + // contain 0 elements). + // The keys MUST be unique (it is not allowed to have more than one + // value with the same key). + repeated KeyValue values = 1; +} + +// KeyValue is a key-value pair that is used to store Span attributes, Link +// attributes, etc. +message KeyValue { + string key = 1; + AnyValue value = 2; +} diff --git a/lib/protoparser/opentelemetry/proto/metrics.proto b/lib/protoparser/opentelemetry/proto/metrics.proto new file mode 100644 index 000000000..b2dae7c14 --- /dev/null +++ b/lib/protoparser/opentelemetry/proto/metrics.proto @@ -0,0 +1,661 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry; + +import "lib/protoparser/opentelemetry/proto/common.proto"; +import "lib/protoparser/opentelemetry/proto/resource.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Metrics.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.metrics.v1"; +option java_outer_classname = "MetricsProto"; +option go_package = "opentelemetry/pb"; + +// MetricsData represents the metrics data that can be stored in a persistent +// storage, OR can be embedded by other protocols that transfer OTLP metrics +// data but do not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +message MetricsData { + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + repeated ResourceMetrics resource_metrics = 1; +} + +// A collection of ScopeMetrics from a Resource. +message ResourceMetrics { + reserved 1000; + + // The resource for the metrics in this message. + // If this field is not set then no resource info is known. + Resource resource = 1; + + // A list of metrics that originate from a resource. + repeated ScopeMetrics scope_metrics = 2; + + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_metrics" field which have their own schema_url field. + string schema_url = 3; +} + +// A collection of Metrics produced by an Scope. +message ScopeMetrics { + // A list of metrics that originate from an instrumentation library. + repeated Metric metrics = 2; + + // This schema_url applies to all metrics in the "metrics" field. + string schema_url = 3; +} + +// Defines a Metric which has one or more timeseries. The following is a +// brief summary of the Metric data model. For more details, see: +// +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md +// +// +// The data model and relation between entities is shown in the +// diagram below. Here, "DataPoint" is the term used to refer to any +// one of the specific data point value types, and "points" is the term used +// to refer to any one of the lists of points contained in the Metric. +// +// - Metric is composed of a metadata and data. +// - Metadata part contains a name, description, unit. +// - Data is one of the possible types (Sum, Gauge, Histogram, Summary). +// - DataPoint contains timestamps, attributes, and one of the possible value type +// fields. +// +// Metric +// +------------+ +// |name | +// |description | +// |unit | +------------------------------------+ +// |data |---> |Gauge, Sum, Histogram, Summary, ... | +// +------------+ +------------------------------------+ +// +// Data [One of Gauge, Sum, Histogram, Summary, ...] +// +-----------+ +// |... | // Metadata about the Data. +// |points |--+ +// +-----------+ | +// | +---------------------------+ +// | |DataPoint 1 | +// v |+------+------+ +------+ | +// +-----+ ||label |label |...|label | | +// | 1 |-->||value1|value2|...|valueN| | +// +-----+ |+------+------+ +------+ | +// | . | |+-----+ | +// | . | ||value| | +// | . | |+-----+ | +// | . | +---------------------------+ +// | . | . +// | . | . +// | . | . +// | . | +---------------------------+ +// | . | |DataPoint M | +// +-----+ |+------+------+ +------+ | +// | M |-->||label |label |...|label | | +// +-----+ ||value1|value2|...|valueN| | +// |+------+------+ +------+ | +// |+-----+ | +// ||value| | +// |+-----+ | +// +---------------------------+ +// +// Each distinct type of DataPoint represents the output of a specific +// aggregation function, the result of applying the DataPoint's +// associated function of to one or more measurements. +// +// All DataPoint types have three common fields: +// - Attributes includes key-value pairs associated with the data point +// - TimeUnixNano is required, set to the end time of the aggregation +// - StartTimeUnixNano is optional, but strongly encouraged for DataPoints +// having an AggregationTemporality field, as discussed below. +// +// Both TimeUnixNano and StartTimeUnixNano values are expressed as +// UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. +// +// # TimeUnixNano +// +// This field is required, having consistent interpretation across +// DataPoint types. TimeUnixNano is the moment corresponding to when +// the data point's aggregate value was captured. +// +// Data points with the 0 value for TimeUnixNano SHOULD be rejected +// by consumers. +// +// # StartTimeUnixNano +// +// StartTimeUnixNano in general allows detecting when a sequence of +// observations is unbroken. This field indicates to consumers the +// start time for points with cumulative and delta +// AggregationTemporality, and it should be included whenever possible +// to support correct rate calculation. Although it may be omitted +// when the start time is truly unknown, setting StartTimeUnixNano is +// strongly encouraged. +message Metric { + reserved 4, 6, 8; + + // name of the metric, including its DNS name prefix. It must be unique. + string name = 1; + + // description of the metric, which can be used in documentation. + string description = 2; + + // unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + string unit = 3; + + // Data determines the aggregation type (if any) of the metric, what is the + // reported value type for the data points, as well as the relatationship to + // the time interval over which they are reported. + oneof data { + Gauge gauge = 5; + Sum sum = 7; + Histogram histogram = 9; + ExponentialHistogram exponential_histogram = 10; + Summary summary = 11; + } +} + +// Gauge represents the type of a scalar metric that always exports the +// "current value" for every data point. It should be used for an "unknown" +// aggregation. +// +// A Gauge does not support different aggregation temporalities. Given the +// aggregation is unknown, points cannot be combined using the same +// aggregation, regardless of aggregation temporalities. Therefore, +// AggregationTemporality is not included. Consequently, this also means +// "StartTimeUnixNano" is ignored for all data points. +message Gauge { + repeated NumberDataPoint data_points = 1; +} + +// Sum represents the type of a scalar metric that is calculated as a sum of all +// reported measurements over a time interval. +message Sum { + repeated NumberDataPoint data_points = 1; + + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality aggregation_temporality = 2; + + // If "true" means that the sum is monotonic. + bool is_monotonic = 3; +} + +// Histogram represents the type of a metric that is calculated by aggregating +// as a Histogram of all reported measurements over a time interval. +message Histogram { + repeated HistogramDataPoint data_points = 1; + + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality aggregation_temporality = 2; +} + +// ExponentialHistogram represents the type of a metric that is calculated by aggregating +// as a ExponentialHistogram of all reported double measurements over a time interval. +message ExponentialHistogram { + repeated ExponentialHistogramDataPoint data_points = 1; + + // aggregation_temporality describes if the aggregator reports delta changes + // since last report time, or cumulative changes since a fixed start time. + AggregationTemporality aggregation_temporality = 2; +} + +// Summary metric data are used to convey quantile summaries, +// a Prometheus (see: https://prometheus.io/docs/concepts/metric_types/#summary) +// and OpenMetrics (see: https://github.com/OpenObservability/OpenMetrics/blob/4dbf6075567ab43296eed941037c12951faafb92/protos/prometheus.proto#L45) +// data type. These data points cannot always be merged in a meaningful way. +// While they can be useful in some applications, histogram data points are +// recommended for new applications. +message Summary { + repeated SummaryDataPoint data_points = 1; +} + +// AggregationTemporality defines how a metric aggregator reports aggregated +// values. It describes how those values relate to the time interval over +// which they are aggregated. +enum AggregationTemporality { + // UNSPECIFIED is the default AggregationTemporality, it MUST not be used. + AGGREGATION_TEMPORALITY_UNSPECIFIED = 0; + + // DELTA is an AggregationTemporality for a metric aggregator which reports + // changes since last report time. Successive metrics contain aggregation of + // values from continuous and non-overlapping intervals. + // + // The values for a DELTA metric are based only on the time interval + // associated with one measurement cycle. There is no dependency on + // previous measurements like is the case for CUMULATIVE metrics. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // DELTA metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0+1 to + // t_0+2 with a value of 2. + AGGREGATION_TEMPORALITY_DELTA = 1; + + // CUMULATIVE is an AggregationTemporality for a metric aggregator which + // reports changes since a fixed start time. This means that current values + // of a CUMULATIVE metric depend on all previous measurements since the + // start time. Because of this, the sender is required to retain this state + // in some form. If this state is lost or invalidated, the CUMULATIVE metric + // values MUST be reset and a new fixed start time following the last + // reported measurement time sent MUST be used. + // + // For example, consider a system measuring the number of requests that + // it receives and reports the sum of these requests every second as a + // CUMULATIVE metric: + // + // 1. The system starts receiving at time=t_0. + // 2. A request is received, the system measures 1 request. + // 3. A request is received, the system measures 1 request. + // 4. A request is received, the system measures 1 request. + // 5. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+1 with a value of 3. + // 6. A request is received, the system measures 1 request. + // 7. A request is received, the system measures 1 request. + // 8. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_0 to + // t_0+2 with a value of 5. + // 9. The system experiences a fault and loses state. + // 10. The system recovers and resumes receiving at time=t_1. + // 11. A request is received, the system measures 1 request. + // 12. The 1 second collection cycle ends. A metric is exported for the + // number of requests received over the interval of time t_1 to + // t_0+1 with a value of 1. + // + // Note: Even though, when reporting changes since last report time, using + // CUMULATIVE is valid, it is not recommended. This may cause problems for + // systems that do not use start_time to determine when the aggregation + // value was reset (e.g. Prometheus). + AGGREGATION_TEMPORALITY_CUMULATIVE = 2; +} + +// DataPointFlags is defined as a protobuf 'uint32' type and is to be used as a +// bit-field representing 32 distinct boolean flags. Each flag defined in this +// enum is a bit-mask. To test the presence of a single flag in the flags of +// a data point, for example, use an expression like: +// +// (point.flags & FLAG_NO_RECORDED_VALUE) == FLAG_NO_RECORDED_VALUE +// +enum DataPointFlags { + FLAG_NONE = 0; + + // This DataPoint is valid but has no recorded value. This value + // SHOULD be used to reflect explicitly missing data in a series, as + // for an equivalent to the Prometheus "staleness marker". + FLAG_NO_RECORDED_VALUE = 1; + + // Bits 2-31 are reserved for future use. +} + +// NumberDataPoint is a single data point in a timeseries that describes the +// time-varying scalar value of a metric. +message NumberDataPoint { + reserved 1; + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated KeyValue attributes = 7; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // The value itself. A point is considered invalid when one of the recognized + // value fields is not present inside this oneof. + oneof value { + double as_double = 4; + sfixed64 as_int = 6; + } + + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + repeated Exemplar exemplars = 5; + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 8; +} + +// HistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Histogram. A Histogram contains summary statistics +// for a population of values, it may optionally contain the distribution of +// those values across a set of buckets. +// +// If the histogram contains the distribution of values, then both +// "explicit_bounds" and "bucket counts" fields must be defined. +// If the histogram does not contain the distribution of values, then both +// "explicit_bounds" and "bucket_counts" must be omitted and only "count" and +// "sum" are known. +message HistogramDataPoint { + reserved 1; + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated KeyValue attributes = 9; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // count is the number of values in the population. Must be non-negative. This + // value must be equal to the sum of the "count" fields in buckets if a + // histogram is provided. + fixed64 count = 4; + + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + optional double sum = 5; + + // bucket_counts is an optional field contains the count values of histogram + // for each bucket. + // + // The sum of the bucket_counts must equal the value in the count field. + // + // The number of elements in bucket_counts array must be by one greater than + // the number of elements in explicit_bounds array. + repeated fixed64 bucket_counts = 6; + + // explicit_bounds specifies buckets with explicitly defined bounds for values. + // + // The boundaries for bucket at index i are: + // + // (-infinity, explicit_bounds[i]] for i == 0 + // (explicit_bounds[i-1], explicit_bounds[i]] for 0 < i < size(explicit_bounds) + // (explicit_bounds[i-1], +infinity) for i == size(explicit_bounds) + // + // The values in the explicit_bounds array must be strictly increasing. + // + // Histogram buckets are inclusive of their upper boundary, except the last + // bucket where the boundary is at infinity. This format is intentionally + // compatible with the OpenMetrics histogram definition. + repeated double explicit_bounds = 7; + + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + repeated Exemplar exemplars = 8; + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 10; + + // min is the minimum value over (start_time, end_time]. + optional double min = 11; + + // max is the maximum value over (start_time, end_time]. + optional double max = 12; +} + +// ExponentialHistogramDataPoint is a single data point in a timeseries that describes the +// time-varying values of a ExponentialHistogram of double values. A ExponentialHistogram contains +// summary statistics for a population of values, it may optionally contain the +// distribution of those values across a set of buckets. +// +message ExponentialHistogramDataPoint { + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated KeyValue attributes = 1; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // count is the number of values in the population. Must be + // non-negative. This value must be equal to the sum of the "bucket_counts" + // values in the positive and negative Buckets plus the "zero_count" field. + fixed64 count = 4; + + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram + optional double sum = 5; + + // scale describes the resolution of the histogram. Boundaries are + // located at powers of the base, where: + // + // base = (2^(2^-scale)) + // + // The histogram bucket identified by `index`, a signed integer, + // contains values that are greater than (base^index) and + // less than or equal to (base^(index+1)). + // + // The positive and negative ranges of the histogram are expressed + // separately. Negative values are mapped by their absolute value + // into the negative range using the same scale as the positive range. + // + // scale is not restricted by the protocol, as the permissible + // values depend on the range of the data. + sint32 scale = 6; + + // zero_count is the count of values that are either exactly zero or + // within the region considered zero by the instrumentation at the + // tolerated degree of precision. This bucket stores values that + // cannot be expressed using the standard exponential formula as + // well as values that have been rounded to zero. + // + // Implementations MAY consider the zero bucket to have probability + // mass equal to (zero_count / count). + fixed64 zero_count = 7; + + // positive carries the positive range of exponential bucket counts. + Buckets positive = 8; + + // negative carries the negative range of exponential bucket counts. + Buckets negative = 9; + + // Buckets are a set of bucket counts, encoded in a contiguous array + // of counts. + message Buckets { + // Offset is the bucket index of the first entry in the bucket_counts array. + // + // Note: This uses a varint encoding as a simple form of compression. + sint32 offset = 1; + + // Count is an array of counts, where count[i] carries the count + // of the bucket at index (offset+i). count[i] is the count of + // values greater than base^(offset+i) and less or equal to than + // base^(offset+i+1). + // + // Note: By contrast, the explicit HistogramDataPoint uses + // fixed64. This field is expected to have many buckets, + // especially zeros, so uint64 has been selected to ensure + // varint encoding. + repeated uint64 bucket_counts = 2; + } + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 10; + + // (Optional) List of exemplars collected from + // measurements that were used to form the data point + repeated Exemplar exemplars = 11; + + // min is the minimum value over (start_time, end_time]. + optional double min = 12; + + // max is the maximum value over (start_time, end_time]. + optional double max = 13; +} + +// SummaryDataPoint is a single data point in a timeseries that describes the +// time-varying values of a Summary metric. +message SummaryDataPoint { + reserved 1; + + // The set of key/value pairs that uniquely identify the timeseries from + // where this point belongs. The list may be empty (may contain 0 elements). + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated KeyValue attributes = 7; + + // StartTimeUnixNano is optional but strongly encouraged, see the + // the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 start_time_unix_nano = 2; + + // TimeUnixNano is required, see the detailed comments above Metric. + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 3; + + // count is the number of values in the population. Must be non-negative. + fixed64 count = 4; + + // sum of the values in the population. If count is zero then this field + // must be zero. + // + // Note: Sum should only be filled out when measuring non-negative discrete + // events, and is assumed to be monotonic over the values of these events. + // Negative events *can* be recorded, but sum should not be filled out when + // doing so. This is specifically to enforce compatibility w/ OpenMetrics, + // see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary + double sum = 5; + + // Represents the value at a given quantile of a distribution. + // + // To record Min and Max values following conventions are used: + // - The 1.0 quantile is equivalent to the maximum value observed. + // - The 0.0 quantile is equivalent to the minimum value observed. + // + // See the following issue for more context: + // https://github.com/open-telemetry/opentelemetry-proto/issues/125 + message ValueAtQuantile { + // The quantile of a distribution. Must be in the interval + // [0.0, 1.0]. + double quantile = 1; + + // The value at the given quantile of a distribution. + // + // Quantile values must NOT be negative. + double value = 2; + } + + // (Optional) list of values at different quantiles of the distribution calculated + // from the current snapshot. The quantiles must be strictly increasing. + repeated ValueAtQuantile quantile_values = 6; + + // Flags that apply to this specific data point. See DataPointFlags + // for the available flags and their meaning. + uint32 flags = 8; +} + +// A representation of an exemplar, which is a sample input measurement. +// Exemplars also hold information about the environment when the measurement +// was recorded, for example the span and trace ID of the active span when the +// exemplar was recorded. +message Exemplar { + reserved 1; + + // The set of key/value pairs that were filtered out by the aggregator, but + // recorded alongside the original measurement. Only key/value pairs that were + // filtered out by the aggregator should be included + repeated KeyValue filtered_attributes = 7; + + // time_unix_nano is the exact time when this exemplar was recorded + // + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January + // 1970. + fixed64 time_unix_nano = 2; + + // The value of the measurement that was recorded. An exemplar is + // considered invalid when one of the recognized value fields is not present + // inside this oneof. + oneof value { + double as_double = 3; + sfixed64 as_int = 6; + } + + // (Optional) Span ID of the exemplar trace. + // span_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + bytes span_id = 4; + + // (Optional) Trace ID of the exemplar trace. + // trace_id may be missing if the measurement is not recorded inside a trace + // or if the trace is not sampled. + bytes trace_id = 5; +} diff --git a/lib/protoparser/opentelemetry/proto/metrics_service.proto b/lib/protoparser/opentelemetry/proto/metrics_service.proto new file mode 100644 index 000000000..505f67682 --- /dev/null +++ b/lib/protoparser/opentelemetry/proto/metrics_service.proto @@ -0,0 +1,30 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry; + +import "lib/protoparser/opentelemetry/proto/metrics.proto"; + +option go_package = "opentelemetry/pb"; + +message ExportMetricsServiceRequest { + // An array of ResourceMetrics. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + repeated ResourceMetrics resource_metrics = 1; +} diff --git a/lib/protoparser/opentelemetry/proto/resource.proto b/lib/protoparser/opentelemetry/proto/resource.proto new file mode 100644 index 000000000..572ccf1b6 --- /dev/null +++ b/lib/protoparser/opentelemetry/proto/resource.proto @@ -0,0 +1,37 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package opentelemetry; + +import "lib/protoparser/opentelemetry/proto/common.proto"; + +option csharp_namespace = "OpenTelemetry.Proto.Resource.V1"; +option java_multiple_files = true; +option java_package = "io.opentelemetry.proto.resource.v1"; +option java_outer_classname = "ResourceProto"; +option go_package = "opentelemetry/pb"; + +// Resource information. +message Resource { + // Set of attributes that describe the resource. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + repeated KeyValue attributes = 1; + + // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // no attributes were dropped. + uint32 dropped_attributes_count = 2; +} diff --git a/lib/protoparser/opentelemetry/stream/streamparser.go b/lib/protoparser/opentelemetry/stream/streamparser.go new file mode 100644 index 000000000..f5b79d5d8 --- /dev/null +++ b/lib/protoparser/opentelemetry/stream/streamparser.go @@ -0,0 +1,298 @@ +package stream + +import ( + "fmt" + "io" + "strconv" + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/decimal" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" + "github.com/VictoriaMetrics/metrics" +) + +// ParseStream parses OpenTelemetry protobuf or json data from r and calls callback for the parsed rows. +// +// callback shouldn't hold tss items after returning. +func ParseStream(r io.Reader, isGzipped bool, callback func(tss []prompbmarshal.TimeSeries) error) error { + wcr := writeconcurrencylimiter.GetReader(r) + defer writeconcurrencylimiter.PutReader(wcr) + r = wcr + + if isGzipped { + zr, err := common.GetGzipReader(r) + if err != nil { + return fmt.Errorf("cannot read gzip-compressed OpenTelemetry protocol data: %w", err) + } + defer common.PutGzipReader(zr) + r = zr + } + + wr := getWriteContext() + defer putWriteContext(wr) + req, err := wr.readAndUnpackRequest(r) + if err != nil { + return fmt.Errorf("cannot unpack OpenTelemetry metrics: %w", err) + } + wr.parseRequestToTss(req) + + if err := callback(wr.tss); err != nil { + return fmt.Errorf("error when processing OpenTelemetry samples: %w", err) + } + + return nil +} + +func (wr *writeContext) appendSamplesFromScopeMetrics(sc *pb.ScopeMetrics) { + for _, m := range sc.Metrics { + if len(m.Name) == 0 { + // skip metrics without names + continue + } + switch t := m.Data.(type) { + case *pb.Metric_Gauge: + for _, p := range t.Gauge.DataPoints { + wr.appendSampleFromNumericPoint(m.Name, p) + } + case *pb.Metric_Sum: + if t.Sum.AggregationTemporality != pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE { + rowsDroppedUnsupportedSum.Inc() + continue + } + for _, p := range t.Sum.DataPoints { + wr.appendSampleFromNumericPoint(m.Name, p) + } + case *pb.Metric_Summary: + for _, p := range t.Summary.DataPoints { + wr.appendSamplesFromSummary(m.Name, p) + } + case *pb.Metric_Histogram: + if t.Histogram.AggregationTemporality != pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE { + rowsDroppedUnsupportedHistogram.Inc() + continue + } + for _, p := range t.Histogram.DataPoints { + wr.appendSamplesFromHistogram(m.Name, p) + } + default: + rowsDroppedUnsupportedMetricType.Inc() + logger.Warnf("unsupported type %T for metric %q", t, m.Name) + } + } +} + +// appendSampleFromNumericPoint appends p to wr.tss +func (wr *writeContext) appendSampleFromNumericPoint(metricName string, p *pb.NumberDataPoint) { + var v float64 + switch t := p.Value.(type) { + case *pb.NumberDataPoint_AsInt: + v = float64(t.AsInt) + case *pb.NumberDataPoint_AsDouble: + v = t.AsDouble + } + + t := int64(p.TimeUnixNano / 1e6) + isStale := (p.Flags)&uint32(1) != 0 + wr.pointLabels = appendAttributesToPromLabels(wr.pointLabels[:0], p.Attributes) + + wr.appendSample(metricName, t, v, isStale) +} + +// appendSamplesFromSummary appends summary p to wr.tss +func (wr *writeContext) appendSamplesFromSummary(metricName string, p *pb.SummaryDataPoint) { + t := int64(p.TimeUnixNano / 1e6) + isStale := (p.Flags)&uint32(1) != 0 + wr.pointLabels = appendAttributesToPromLabels(wr.pointLabels[:0], p.Attributes) + + wr.appendSample(metricName+"_sum", t, p.Sum, isStale) + wr.appendSample(metricName+"_count", t, float64(p.Count), isStale) + for _, q := range p.QuantileValues { + qValue := strconv.FormatFloat(q.Quantile, 'f', -1, 64) + wr.appendSampleWithExtraLabel(metricName, "quantile", qValue, t, q.Value, isStale) + } +} + +// appendSamplesFromHistogram appends histogram p to wr.tss +func (wr *writeContext) appendSamplesFromHistogram(metricName string, p *pb.HistogramDataPoint) { + if len(p.BucketCounts) == 0 { + // nothing to append + return + } + if len(p.BucketCounts) != len(p.ExplicitBounds)+1 { + // fast path, broken data format + logger.Warnf("opentelemetry bad histogram format: %q, size of buckets: %d, size of bounds: %d", metricName, len(p.BucketCounts), len(p.ExplicitBounds)) + return + } + + t := int64(p.TimeUnixNano / 1e6) + isStale := (p.Flags)&uint32(1) != 0 + wr.pointLabels = appendAttributesToPromLabels(wr.pointLabels[:0], p.Attributes) + + wr.appendSample(metricName+"_sum", t, *p.Sum, isStale) + wr.appendSample(metricName+"_count", t, float64(p.Count), isStale) + + var cumulative uint64 + for index, bound := range p.ExplicitBounds { + cumulative += p.BucketCounts[index] + boundLabelValue := strconv.FormatFloat(bound, 'f', -1, 64) + wr.appendSampleWithExtraLabel(metricName+"_bucket", "le", boundLabelValue, t, float64(cumulative), isStale) + } + cumulative += p.BucketCounts[len(p.BucketCounts)-1] + wr.appendSampleWithExtraLabel(metricName+"_bucket", "le", "+Inf", t, float64(cumulative), isStale) +} + +// appendSample appends sample with the given metricName to wr.tss +func (wr *writeContext) appendSample(metricName string, t int64, v float64, isStale bool) { + wr.appendSampleWithExtraLabel(metricName, "", "", t, v, isStale) +} + +// appendSampleWithExtraLabel appends sample with the given metricName and the given (labelName=labelValue) extra label to wr.tss +func (wr *writeContext) appendSampleWithExtraLabel(metricName, labelName, labelValue string, t int64, v float64, isStale bool) { + if isStale { + v = decimal.StaleNaN + } + if t <= 0 { + // Set the current timestamp if t isn't set. + t = int64(fasttime.UnixTimestamp()) * 1000 + } + + labelsPool := wr.labelsPool + labelsLen := len(labelsPool) + labelsPool = append(labelsPool, prompbmarshal.Label{ + Name: "__name__", + Value: metricName, + }) + labelsPool = append(labelsPool, wr.baseLabels...) + labelsPool = append(labelsPool, wr.pointLabels...) + if labelName != "" && labelValue != "" { + labelsPool = append(labelsPool, prompbmarshal.Label{ + Name: labelName, + Value: labelValue, + }) + } + + samplesPool := wr.samplesPool + samplesLen := len(samplesPool) + samplesPool = append(samplesPool, prompbmarshal.Sample{ + Timestamp: t, + Value: v, + }) + + wr.tss = append(wr.tss, prompbmarshal.TimeSeries{ + Labels: labelsPool[labelsLen:], + Samples: samplesPool[samplesLen:], + }) + + wr.labelsPool = labelsPool + wr.samplesPool = samplesPool + + rowsRead.Inc() +} + +// appendAttributesToPromLabels appends attributes to dst and returns the result. +func appendAttributesToPromLabels(dst []prompbmarshal.Label, attributes []*pb.KeyValue) []prompbmarshal.Label { + for _, at := range attributes { + dst = append(dst, prompbmarshal.Label{ + Name: at.Key, + Value: at.Value.FormatString(), + }) + } + return dst +} + +type writeContext struct { + // bb holds the original data (json or protobuf), which must be parsed. + bb bytesutil.ByteBuffer + + // tss holds parsed time series + tss []prompbmarshal.TimeSeries + + // baseLabels are labels, which must be added to all the ingested samples + baseLabels []prompbmarshal.Label + + // pointLabels are labels, which must be added to the ingested OpenTelemetry points + pointLabels []prompbmarshal.Label + + // pools are used for reducing memory allocations when parsing time series + labelsPool []prompbmarshal.Label + samplesPool []prompbmarshal.Sample +} + +func (wr *writeContext) reset() { + wr.bb.Reset() + + tss := wr.tss + for i := range tss { + ts := &tss[i] + ts.Labels = nil + ts.Samples = nil + } + wr.tss = tss[:0] + + wr.baseLabels = resetLabels(wr.baseLabels) + wr.pointLabels = resetLabels(wr.pointLabels) + + wr.labelsPool = resetLabels(wr.labelsPool) + wr.samplesPool = wr.samplesPool[:0] +} + +func resetLabels(labels []prompbmarshal.Label) []prompbmarshal.Label { + for i := range labels { + label := &labels[i] + label.Name = "" + label.Value = "" + } + return labels[:0] +} + +func (wr *writeContext) readAndUnpackRequest(r io.Reader) (*pb.ExportMetricsServiceRequest, error) { + if _, err := wr.bb.ReadFrom(r); err != nil { + return nil, fmt.Errorf("cannot read request: %w", err) + } + var req pb.ExportMetricsServiceRequest + if err := req.UnmarshalVT(wr.bb.B); err != nil { + return nil, fmt.Errorf("cannot unmarshal request from %d bytes: %w", len(wr.bb.B), err) + } + return &req, nil +} + +func (wr *writeContext) parseRequestToTss(req *pb.ExportMetricsServiceRequest) { + for _, rm := range req.ResourceMetrics { + if rm.Resource == nil { + // skip metrics without resource part. + continue + } + wr.baseLabels = appendAttributesToPromLabels(wr.baseLabels[:0], rm.Resource.Attributes) + for _, sc := range rm.ScopeMetrics { + wr.appendSamplesFromScopeMetrics(sc) + } + } +} + +var wrPool sync.Pool + +func getWriteContext() *writeContext { + v := wrPool.Get() + if v == nil { + return &writeContext{} + } + return v.(*writeContext) +} + +func putWriteContext(wr *writeContext) { + wr.reset() + wrPool.Put(wr) +} + +var ( + rowsRead = metrics.NewCounter(`vm_protoparser_rows_read_total{type="opentelemetry"}`) + rowsDroppedUnsupportedHistogram = metrics.NewCounter(`vm_protoparser_rows_dropped_total{type="opentelemetry",reason="unsupported_histogram_aggregation"}`) + rowsDroppedUnsupportedSum = metrics.NewCounter(`vm_protoparser_rows_dropped_total{type="opentelemetry",reason="unsupported_sum_aggregation"}`) + rowsDroppedUnsupportedMetricType = metrics.NewCounter(`vm_protoparser_rows_dropped_total{type="opentelemetry",reason="unsupported_metric_type"}`) +) diff --git a/lib/protoparser/opentelemetry/stream/streamparser_test.go b/lib/protoparser/opentelemetry/stream/streamparser_test.go new file mode 100644 index 000000000..6b4f4c9f8 --- /dev/null +++ b/lib/protoparser/opentelemetry/stream/streamparser_test.go @@ -0,0 +1,317 @@ +package stream + +import ( + "bytes" + "compress/gzip" + "fmt" + "reflect" + "sort" + "testing" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/fasttime" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb" +) + +func TestParseStream(t *testing.T) { + f := func(samples []*pb.Metric, tssExpected []prompbmarshal.TimeSeries) { + t.Helper() + + checkSeries := func(tss []prompbmarshal.TimeSeries) error { + if len(tss) != len(tssExpected) { + return fmt.Errorf("not expected tss count, got: %d, want: %d", len(tss), len(tssExpected)) + } + sortByMetricName(tss) + sortByMetricName(tssExpected) + for i := 0; i < len(tss); i++ { + ts := tss[i] + tsExpected := tssExpected[i] + if len(ts.Labels) != len(tsExpected.Labels) { + return fmt.Errorf("idx: %d, not expected labels count, got: %d, want: %d", i, len(ts.Labels), len(tsExpected.Labels)) + } + sortLabels(ts.Labels) + sortLabels(tsExpected.Labels) + for j, label := range ts.Labels { + labelExpected := tsExpected.Labels[j] + if !reflect.DeepEqual(label, labelExpected) { + return fmt.Errorf("idx: %d, label idx: %d, not equal label pairs, \ngot: \n%s, \nwant: \n%s", + i, j, prettifyLabel(label), prettifyLabel(labelExpected)) + } + } + if len(ts.Samples) != len(tsExpected.Samples) { + return fmt.Errorf("idx: %d, not expected samples count, got: %d, want: %d", i, len(ts.Samples), len(tsExpected.Samples)) + } + for j, sample := range ts.Samples { + sampleExpected := tsExpected.Samples[j] + if !reflect.DeepEqual(sample, sampleExpected) { + return fmt.Errorf("idx: %d, label idx: %d, not equal sample pairs, \ngot: \n%s,\nwant: \n%s", + i, j, prettifySample(sample), prettifySample(sampleExpected)) + } + } + } + return nil + } + + req := &pb.ExportMetricsServiceRequest{ + ResourceMetrics: []*pb.ResourceMetrics{ + generateOTLPSamples(samples), + }, + } + + // Verify protobuf parsing + pbData, err := req.MarshalVT() + if err != nil { + t.Fatalf("cannot marshal to protobuf: %s", err) + } + if err := checkParseStream(pbData, checkSeries); err != nil { + t.Fatalf("cannot parse protobuf: %s", err) + } + } + + jobLabelValue := prompbmarshal.Label{ + Name: "job", + Value: "vm", + } + leLabel := func(value string) prompbmarshal.Label { + return prompbmarshal.Label{ + Name: "le", + Value: value, + } + } + kvLabel := func(k, v string) prompbmarshal.Label { + return prompbmarshal.Label{ + Name: k, + Value: v, + } + } + + // Test all metric types + f( + []*pb.Metric{ + generateGauge("my-gauge"), + generateHistogram("my-histogram"), + generateSum("my-sum"), + generateSummary("my-summary"), + }, + []prompbmarshal.TimeSeries{ + newPromPBTs("my-gauge", 15000, 15.0, jobLabelValue, kvLabel("label1", "value1")), + newPromPBTs("my-histogram_count", 30000, 15.0, jobLabelValue, kvLabel("label2", "value2")), + newPromPBTs("my-histogram_sum", 30000, 30.0, jobLabelValue, kvLabel("label2", "value2")), + newPromPBTs("my-histogram_bucket", 30000, 0.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("0.1")), + newPromPBTs("my-histogram_bucket", 30000, 5.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("0.5")), + newPromPBTs("my-histogram_bucket", 30000, 15.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("1")), + newPromPBTs("my-histogram_bucket", 30000, 15.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("5")), + newPromPBTs("my-histogram_bucket", 30000, 15.0, jobLabelValue, kvLabel("label2", "value2"), leLabel("+Inf")), + newPromPBTs("my-sum", 150000, 15.5, jobLabelValue, kvLabel("label5", "value5")), + newPromPBTs("my-summary_sum", 35000, 32.5, jobLabelValue, kvLabel("label6", "value6")), + newPromPBTs("my-summary_count", 35000, 5.0, jobLabelValue, kvLabel("label6", "value6")), + newPromPBTs("my-summary", 35000, 7.5, jobLabelValue, kvLabel("label6", "value6"), kvLabel("quantile", "0.1")), + newPromPBTs("my-summary", 35000, 10.0, jobLabelValue, kvLabel("label6", "value6"), kvLabel("quantile", "0.5")), + newPromPBTs("my-summary", 35000, 15.0, jobLabelValue, kvLabel("label6", "value6"), kvLabel("quantile", "1")), + }) + + // Test gauge + f( + []*pb.Metric{ + generateGauge("my-gauge"), + }, + []prompbmarshal.TimeSeries{ + newPromPBTs("my-gauge", 15000, 15.0, jobLabelValue, kvLabel("label1", "value1")), + }, + ) +} + +func checkParseStream(data []byte, checkSeries func(tss []prompbmarshal.TimeSeries) error) error { + // Verify parsing without compression + if err := ParseStream(bytes.NewBuffer(data), false, checkSeries); err != nil { + return fmt.Errorf("error when parsing data: %w", err) + } + + // Verify parsing with compression + var bb bytes.Buffer + zw := gzip.NewWriter(&bb) + if _, err := zw.Write(data); err != nil { + return fmt.Errorf("cannot compress data: %s", err) + } + if err := zw.Close(); err != nil { + return fmt.Errorf("cannot close gzip writer: %s", err) + } + if err := ParseStream(&bb, true, checkSeries); err != nil { + return fmt.Errorf("error when parsing compressed data: %w", err) + } + + return nil +} + +func attributesFromKV(k, v string) []*pb.KeyValue { + return []*pb.KeyValue{ + { + Key: k, + Value: &pb.AnyValue{ + Value: &pb.AnyValue_StringValue{ + StringValue: v, + }, + }, + }, + } +} + +func generateGauge(name string) *pb.Metric { + points := []*pb.NumberDataPoint{ + { + Attributes: attributesFromKV("label1", "value1"), + Value: &pb.NumberDataPoint_AsInt{AsInt: 15}, + TimeUnixNano: uint64(15 * time.Second), + }, + } + return &pb.Metric{ + Name: name, + Data: &pb.Metric_Gauge{ + Gauge: &pb.Gauge{ + DataPoints: points, + }, + }, + } +} + +func generateHistogram(name string) *pb.Metric { + points := []*pb.HistogramDataPoint{ + { + + Attributes: attributesFromKV("label2", "value2"), + Count: 15, + Sum: func() *float64 { v := 30.0; return &v }(), + ExplicitBounds: []float64{0.1, 0.5, 1.0, 5.0}, + BucketCounts: []uint64{0, 5, 10, 0, 0}, + TimeUnixNano: uint64(30 * time.Second), + }, + } + return &pb.Metric{ + Name: name, + Data: &pb.Metric_Histogram{ + Histogram: &pb.Histogram{ + AggregationTemporality: pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + DataPoints: points, + }, + }, + } +} + +func generateSum(name string) *pb.Metric { + points := []*pb.NumberDataPoint{ + { + Attributes: attributesFromKV("label5", "value5"), + Value: &pb.NumberDataPoint_AsDouble{AsDouble: 15.5}, + TimeUnixNano: uint64(150 * time.Second), + }, + } + return &pb.Metric{ + Name: name, + Data: &pb.Metric_Sum{ + Sum: &pb.Sum{ + AggregationTemporality: pb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + DataPoints: points, + }, + }, + } +} + +func generateSummary(name string) *pb.Metric { + points := []*pb.SummaryDataPoint{ + { + Attributes: attributesFromKV("label6", "value6"), + TimeUnixNano: uint64(35 * time.Second), + Sum: 32.5, + Count: 5, + QuantileValues: []*pb.SummaryDataPoint_ValueAtQuantile{ + { + Quantile: 0.1, + Value: 7.5, + }, + { + Quantile: 0.5, + Value: 10.0, + }, + { + Quantile: 1.0, + Value: 15.0, + }, + }, + }, + } + return &pb.Metric{ + Name: name, + Data: &pb.Metric_Summary{ + Summary: &pb.Summary{ + DataPoints: points, + }, + }, + } +} + +func generateOTLPSamples(srcs []*pb.Metric) *pb.ResourceMetrics { + otlpMetrics := &pb.ResourceMetrics{ + Resource: &pb.Resource{ + Attributes: attributesFromKV("job", "vm"), + }, + } + otlpMetrics.ScopeMetrics = []*pb.ScopeMetrics{ + { + Metrics: append([]*pb.Metric{}, srcs...), + }, + } + return otlpMetrics +} + +func newPromPBTs(metricName string, t int64, v float64, extraLabels ...prompbmarshal.Label) prompbmarshal.TimeSeries { + if t <= 0 { + // Set the current timestamp if t isn't set. + t = int64(fasttime.UnixTimestamp()) * 1000 + } + ts := prompbmarshal.TimeSeries{ + Labels: []prompbmarshal.Label{ + { + Name: "__name__", + Value: metricName, + }, + }, + Samples: []prompbmarshal.Sample{ + { + Value: v, + Timestamp: t, + }, + }, + } + ts.Labels = append(ts.Labels, extraLabels...) + return ts +} + +func prettifyLabel(label prompbmarshal.Label) string { + return fmt.Sprintf("name=%q value=%q", label.Name, label.Value) +} + +func prettifySample(sample prompbmarshal.Sample) string { + return fmt.Sprintf("sample=%f timestamp: %d", sample.Value, sample.Timestamp) +} + +func sortByMetricName(tss []prompbmarshal.TimeSeries) { + sort.Slice(tss, func(i, j int) bool { + return getMetricName(tss[i].Labels) < getMetricName(tss[j].Labels) + }) +} + +func getMetricName(labels []prompbmarshal.Label) string { + for _, l := range labels { + if l.Name == "__name__" { + return l.Value + } + } + return "" +} + +func sortLabels(labels []prompbmarshal.Label) { + sort.Slice(labels, func(i, j int) bool { + return labels[i].Name < labels[j].Name + }) +} diff --git a/lib/protoparser/opentelemetry/stream/streamparser_timing_test.go b/lib/protoparser/opentelemetry/stream/streamparser_timing_test.go new file mode 100644 index 000000000..368c0309d --- /dev/null +++ b/lib/protoparser/opentelemetry/stream/streamparser_timing_test.go @@ -0,0 +1,39 @@ +package stream + +import ( + "bytes" + "testing" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentelemetry/pb" +) + +func BenchmarkParseStream(b *testing.B) { + samples := []*pb.Metric{ + generateGauge("my-gauge"), + generateHistogram("my-histogram"), + generateSum("my-sum"), + generateSummary("my-summary"), + } + b.SetBytes(1) + b.ReportAllocs() + b.RunParallel(func(p *testing.PB) { + pbRequest := pb.ExportMetricsServiceRequest{ + ResourceMetrics: []*pb.ResourceMetrics{generateOTLPSamples(samples)}, + } + data, err := pbRequest.MarshalVT() + if err != nil { + b.Fatalf("cannot marshal data: %s", err) + } + + for p.Next() { + err := ParseStream(bytes.NewBuffer(data), false, func(tss []prompbmarshal.TimeSeries) error { + return nil + }) + if err != nil { + b.Fatalf("cannot parse stream: %s", err) + } + } + }) + +}