diff --git a/docs/README.md b/docs/README.md index d5ec365dea..c09e77ab07 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1627,6 +1627,25 @@ Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed da VictoriaMetrics stores the ingested OpenTelemetry [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) as is without any transformations. Pass `-opentelemetry.usePrometheusNaming` command-line flag to VictoriaMetrics for automatic conversion of metric names and labels into Prometheus-compatible format. +Using the following exporter configuration in the opentelemetry collector will allow you to send metrics into VictoriaMetrics: + +```yaml +exporters: + otlphttp/victoriametrics: + compression: gzip + encoding: proto + endpoint: http://..svc.cluster.local:/opentelemetry +``` +Remember to add the exporter to the desired service pipeline in order to activate the exporter. +```yaml +service: + pipelines: + metrics: + exporters: + - otlphttp/victoriametrics + receivers: + - otlp +``` See [How to use OpenTelemetry metrics with VictoriaMetrics](https://docs.victoriametrics.com/guides/getting-started-with-opentelemetry/). ## JSON line format diff --git a/docs/Single-server-VictoriaMetrics.md b/docs/Single-server-VictoriaMetrics.md index 1536a9b72a..32da7583ed 100644 --- a/docs/Single-server-VictoriaMetrics.md +++ b/docs/Single-server-VictoriaMetrics.md @@ -1635,6 +1635,25 @@ Set HTTP request header `Content-Encoding: gzip` when sending gzip-compressed da VictoriaMetrics stores the ingested OpenTelemetry [raw samples](https://docs.victoriametrics.com/keyconcepts/#raw-samples) as is without any transformations. Pass `-opentelemetry.usePrometheusNaming` command-line flag to VictoriaMetrics for automatic conversion of metric names and labels into Prometheus-compatible format. +Using the following exporter configuration in the opentelemetry collector will allow you to send metrics into VictoriaMetrics: + +```yaml +exporters: + otlphttp/victoriametrics: + compression: gzip + encoding: proto + endpoint: http://..svc.cluster.local:/opentelemetry +``` +Remember to add the exporter to the desired service pipeline in order to activate the exporter. +```yaml +service: + pipelines: + metrics: + exporters: + - otlphttp/victoriametrics + receivers: + - otlp +``` See [How to use OpenTelemetry metrics with VictoriaMetrics](https://docs.victoriametrics.com/guides/getting-started-with-opentelemetry/). ## JSON line format diff --git a/docs/guides/getting-started-with-opentelemetry-app.go-collector.example b/docs/guides/getting-started-with-opentelemetry-app.go-collector.example new file mode 100644 index 0000000000..24bc6e72c0 --- /dev/null +++ b/docs/guides/getting-started-with-opentelemetry-app.go-collector.example @@ -0,0 +1,223 @@ +package main + +import ( + "context" + "errors" + "io" + "log" + "math/rand" + "net" + "net/http" + "os" + "os/signal" + "strconv" + "time" + + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/propagation" + otelmetric "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.25.0" +) + +func main() { + if err := run(); err != nil { + log.Fatalln(err) + } +} + +func run() (err error) { + // Handle SIGINT (CTRL+C) gracefully. + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) + defer stop() + + // Set up OpenTelemetry. + otelShutdown, err := setupOTelSDK(ctx) + if err != nil { + return + } + // Handle shutdown properly so nothing leaks. + defer func() { + err = errors.Join(err, otelShutdown(context.Background())) + }() + + // Start HTTP server. + srv := &http.Server{ + Addr: ":8080", + BaseContext: func(_ net.Listener) context.Context { return ctx }, + ReadTimeout: time.Second, + WriteTimeout: 10 * time.Second, + Handler: newHTTPHandler(), + } + srvErr := make(chan error, 1) + go func() { + srvErr <- srv.ListenAndServe() + }() + + // Wait for interruption. + select { + case err = <-srvErr: + // Error when starting HTTP server. + return + case <-ctx.Done(): + // Wait for first CTRL+C. + // Stop receiving signal notifications as soon as possible. + stop() + } + + // When Shutdown is called, ListenAndServe immediately returns ErrServerClosed. + err = srv.Shutdown(context.Background()) + return +} + +func newHTTPHandler() http.Handler { + mux := http.NewServeMux() + + // handleFunc is a replacement for mux.HandleFunc + // which enriches the handler's HTTP instrumentation with the pattern as the http.route. + handleFunc := func(pattern string, handlerFunc func(http.ResponseWriter, *http.Request)) { + // Configure the "http.route" for the HTTP instrumentation. + handler := otelhttp.WithRouteTag(pattern, http.HandlerFunc(handlerFunc)) + mux.Handle(pattern, handler) + } + + // Register handlers. + handleFunc("/rolldice", rolldice) + + // Add HTTP instrumentation for the whole server. + handler := otelhttp.NewHandler(mux, "/") + return handler +} + +var ( + tracer = otel.Tracer("rolldice") + meter = otel.Meter("rolldice") + rollCnt otelmetric.Int64Counter +) + +func init() { + var err error + rollCnt, err = meter.Int64Counter("dice.rolls", + otelmetric.WithDescription("The number of rolls by roll value"), + otelmetric.WithUnit("{roll}")) + if err != nil { + panic(err) + } +} + +func rolldice(w http.ResponseWriter, r *http.Request) { + ctx, span := tracer.Start(r.Context(), "roll") + defer span.End() + + roll := 1 + rand.Intn(6) + + rollValueAttr := attribute.Int("roll.value", roll) + span.SetAttributes(rollValueAttr) + rollCnt.Add(ctx, 1, otelmetric.WithAttributes(rollValueAttr)) + + resp := strconv.Itoa(roll) + "\n" + if _, err := io.WriteString(w, resp); err != nil { + log.Printf("Write failed: %v\n", err) + } +} + +// setupOTelSDK bootstraps the OpenTelemetry pipeline. +// If it does not return an error, make sure to call shutdown for proper cleanup. +func setupOTelSDK(ctx context.Context) (shutdown func(context.Context) error, err error) { + var shutdownFuncs []func(context.Context) error + + // shutdown calls cleanup functions registered via shutdownFuncs. + // The errors from the calls are joined. + // Each registered cleanup will be invoked once. + shutdown = func(ctx context.Context) error { + var err error + for _, fn := range shutdownFuncs { + err = errors.Join(err, fn(ctx)) + } + shutdownFuncs = nil + return err + } + + // handleErr calls shutdown for cleanup and makes sure that all errors are returned. + handleErr := func(inErr error) { + err = errors.Join(inErr, shutdown(ctx)) + } + + // Set up propagator. + prop := newPropagator() + otel.SetTextMapPropagator(prop) + + // Set up trace provider. + tracerProvider, err := newTraceProvider(ctx) + if err != nil { + handleErr(err) + return + } + shutdownFuncs = append(shutdownFuncs, tracerProvider.Shutdown) + otel.SetTracerProvider(tracerProvider) + + // Set up meter provider. + meterProvider, err := newMeterProvider(ctx) + if err != nil { + handleErr(err) + return + } + shutdownFuncs = append(shutdownFuncs, meterProvider.Shutdown) + otel.SetMeterProvider(meterProvider) + + return +} + +func newPropagator() propagation.TextMapPropagator { + return propagation.NewCompositeTextMapPropagator( + propagation.TraceContext{}, + propagation.Baggage{}, + ) +} + +func newTraceProvider(ctx context.Context) (*trace.TracerProvider, error) { + traceExporter, err := otlptracehttp.New(ctx, otlptracehttp.WithInsecure(), otlptracehttp.WithEndpoint("localhost:4318")) + if err != nil { + return nil, err + } + + traceProvider := trace.NewTracerProvider( + trace.WithBatcher(traceExporter, + // Default is 5s. Set to 1s for demonstrative purposes. + trace.WithBatchTimeout(time.Second)), + ) + return traceProvider, nil +} + +func newMeterProvider(ctx context.Context) (*metric.MeterProvider, error) { + metricExporter, err := otlpmetrichttp.New(ctx, otlpmetrichttp.WithInsecure(), otlpmetrichttp.WithEndpoint("localhost:4318")) + if err != nil { + return nil, err + } + //metricExporter, err := stdoutmetric.New() + //if err != nil { + // return nil, err + //} + res, err := resource.Merge(resource.Default(), + resource.NewWithAttributes(semconv.SchemaURL, + semconv.ServiceName("dice-roller"), + semconv.ServiceVersion("0.1.0"), + )) + if err != nil { + return nil, err + } + meterProvider := metric.NewMeterProvider( + metric.WithResource(res), + metric.WithReader(metric.NewPeriodicReader(metricExporter, + // Default is 1m. Set to 3s for demonstrative purposes. + metric.WithInterval(3*time.Second))), + ) + + return meterProvider, nil +} diff --git a/docs/guides/getting-started-with-opentelemetry.md b/docs/guides/getting-started-with-opentelemetry.md index e8f444516e..7d5afe9124 100644 --- a/docs/guides/getting-started-with-opentelemetry.md +++ b/docs/guides/getting-started-with-opentelemetry.md @@ -55,23 +55,37 @@ helm repo update # add values cat << EOF > values.yaml +mode: deployment +image: + repository: "otel/opentelemetry-collector-contrib" presets: clusterMetrics: enabled: true config: + receivers: + otlp: + protocols: + grpc: + endpoint: 0.0.0.0:4317 + http: + endpoint: 0.0.0.0:4318 exporters: - prometheusremotewrite: - endpoint: "http://victoria-metrics-victoria-metrics-single-server.default.svc.cluster.local:8428/api/v1/write" + otlphttp/victoriametrics: + compression: gzip + encoding: proto + endpoint: http://victoria-metrics-victoria-metrics-single-server.default.svc.cluster.local:8428/opentelemetry + tls: + insecure: true service: - pipelines: - metrics: - receivers: [otlp] - processors: [] - exporters: [prometheusremotewrite] + pipelines: + metrics: + receivers: [otlp] + processors: [] + exporters: [otlphttp/victoriametrics] EOF # install helm chart -helm upgrade -i otl-collector open-telemetry/opentelemetry-collector --set mode=deployment -f values.yaml +helm upgrade -i otl-collector open-telemetry/opentelemetry-collector -f values.yaml # check if pod is healthy kubectl get pod @@ -79,13 +93,78 @@ NAME READY STATUS RESTA otl-collector-opentelemetry-collector-7467bbb559-2pq2n 1/1 Running 0 23m # forward port to local machine to verify metrics are ingested -kubectl port-forward victoria-metrics-victoria-metrics-single-server-0 8428 +kubectl port-forward service/victoria-metrics-victoria-metrics-single-server 8428 # check metric `k8s_container_ready` via browser http://localhost:8428/vmui/#/?g0.expr=k8s_container_ready + +# forward port to local machine to setup opentelemetry-collector locally +kubectl port-forward otl-collector-opentelemetry-collector 4318 + ``` The full version of possible configuration options could be found in [OpenTelemetry docs](https://opentelemetry.io/docs/collector/configuration/). +## Sending to VictoriaMetrics via OpenTelemetry +Metrics could be sent to VictoriaMetrics via OpenTelemetry instrumentation libraries. You can use any compatible OpenTelemetry instrumentation [clients](https://opentelemetry.io/docs/languages/). +In our example, we'll create a WEB server in [Golang](https://go.dev/) and instrument it with metrics. + +### Building the Go application instrumented with metrics +Copy the go file from [here](/guides/getting-started-with-opentelemetry-app.go-collector.example). This will give you a basic implementation of a dice roll WEB server with the urls for opentelemetry-collector pointing to localhost:4318. +In the same directory run the following command to create the `go.mod` file: +```sh +go mod init vm/otel +``` + +For demo purposes, we'll add the following dependencies to `go.mod` file: +```go + +require ( + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 + go.opentelemetry.io/otel v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 + go.opentelemetry.io/otel/metric v1.27.0 + go.opentelemetry.io/otel/sdk v1.27.0 + go.opentelemetry.io/otel/sdk/metric v1.27.0 +) + +require ( + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.1 // indirectdice.rolls + github.com/go-logr/stdr v1.2.2 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect + go.opentelemetry.io/otel/trace v1.27.0 // indirect + go.opentelemetry.io/proto/otlp v1.2.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect + google.golang.org/grpc v1.64.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect +) +``` + +Once you have these in your `go.mod` file, you can run the following command to download the dependencies: +```sh +go mod tidy +``` + +Now you can run the application: +```sh +go run . +``` + +### Test metrics ingestion +By default, the application will be available at `localhost:8080`. You can start sending requests to /rolldice endpoint to generate metrics. The following command will send 20 requests to the /rolldice endpoint: +```sh +for i in `seq 1 20`; do curl http://localhost:8080/rolldice; done +``` + +After a few seconds you should start to see metrics sent over to the vmui interface by visiting `http://localhost:8428/vmui/#/?g0.expr=dice.rolls` in your browser or by querying the metric `dice.rolls` in the vmui interface. + ## Direct metrics push Metrics could be ingested into VictoriaMetrics directly with HTTP requests. You can use any compatible OpenTelemetry diff --git a/docs/guides/vmui-dice-roll.webp b/docs/guides/vmui-dice-roll.webp new file mode 100644 index 0000000000..300cca63f4 Binary files /dev/null and b/docs/guides/vmui-dice-roll.webp differ