From 04762344c67aabbec4ce1fcb0ae12e3bcc72b20d Mon Sep 17 00:00:00 2001 From: Aliaksandr Valialkin Date: Sun, 23 Feb 2020 13:35:47 +0200 Subject: [PATCH] app/vmagent: initial implementation for vmagent --- .gitignore | 1 + Makefile | 12 +- README.md | 19 +- app/vmagent/Makefile | 73 + app/vmagent/README.md | 151 + app/vmagent/common/push_ctx.go | 70 + app/vmagent/deployment/Dockerfile | 8 + app/vmagent/graphite/request_handler.go | 65 + app/vmagent/influx/request_handler.go | 152 + app/vmagent/main.go | 149 + app/vmagent/opentsdb/request_handler.go | 65 + app/vmagent/opentsdbhttp/request_handler.go | 64 + .../promremotewrite/request_handler.go | 67 + app/vmagent/remotewrite/client.go | 267 ++ app/vmagent/remotewrite/pendingseries.go | 191 ++ app/vmagent/remotewrite/relabel.go | 108 + app/vmagent/remotewrite/remotewrite.go | 127 + app/vmagent/remotewrite/statconn.go | 71 + app/vmagent/vmimport/request_handler.go | 70 + app/vminsert/common/insert_ctx_pool.go | 36 + app/vminsert/graphite/request_handler.go | 150 +- app/vminsert/influx/request_handler.go | 122 +- app/vminsert/main.go | 27 +- app/vminsert/opentsdb/request_handler.go | 149 +- app/vminsert/opentsdbhttp/request_handler.go | 149 +- app/vminsert/prompush/push.go | 97 + .../promremotewrite/request_handler.go | 47 + app/vminsert/vmimport/request_handler.go | 80 +- go.mod | 2 + go.sum | 8 +- lib/filestream/filestream.go | 10 +- lib/flagutil/array.go | 28 + lib/flagutil/array_test.go | 34 + lib/httpserver/metrics.go | 24 +- .../ingestserver}/graphite/server.go | 21 +- .../ingestserver}/opentsdb/listener_switch.go | 0 .../ingestserver}/opentsdb/server.go | 16 +- .../ingestserver}/opentsdbhttp/server.go | 33 +- lib/netutil/tcplistener.go | 5 + lib/persistentqueue/fastqueue.go | 153 + lib/persistentqueue/fastqueue_test.go | 274 ++ lib/persistentqueue/fastqueue_timing_test.go | 66 + lib/persistentqueue/persistentqueue.go | 489 +++ lib/persistentqueue/persistentqueue_test.go | 436 +++ .../persistentqueue_timing_test.go | 69 + lib/prompbmarshal/remote.pb.go | 79 + lib/prompbmarshal/remote.proto | 82 + lib/prompbmarshal/types.pb.go | 216 ++ lib/prompbmarshal/types.proto | 85 + lib/prompbmarshal/util.go | 30 + lib/promrelabel/config.go | 129 + lib/promrelabel/config_test.go | 163 + lib/promrelabel/relabel.go | 268 ++ lib/promrelabel/relabel_test.go | 630 ++++ lib/promrelabel/relabel_timing_test.go | 567 ++++ lib/promrelabel/sort.go | 37 + lib/promrelabel/sort_test.go | 43 + lib/promrelabel/testdata/invalid_config.yml | 3 + .../testdata/relabel_configs_valid.yml | 20 + lib/promscrape/client.go | 134 + lib/promscrape/config.go | 588 ++++ lib/promscrape/config_test.go | 1096 +++++++ lib/promscrape/scraper.go | 243 ++ lib/promscrape/scrapework.go | 261 ++ lib/promscrape/scrapework_test.go | 348 +++ lib/promscrape/scrapework_timing_test.go | 50 + lib/promscrape/statconn.go | 71 + lib/promscrape/targetstatus.go | 127 + .../testdata/empty_target_file_sd.yml | 1 + lib/promscrape/testdata/file_sd.json | 8 + lib/promscrape/testdata/file_sd_1.yml | 3 + lib/promscrape/testdata/password.txt | 1 + lib/promscrape/testdata/prometheus.yml | 6 + lib/promscrape/testdata/ssl-cert-snakeoil.key | 28 + lib/promscrape/testdata/ssl-cert-snakeoil.pem | 17 + .../protoparser}/common/gzip_reader.go | 0 .../protoparser}/common/lines_reader.go | 0 .../protoparser}/common/lines_reader_test.go | 0 lib/protoparser/graphite/streamparser.go | 130 + lib/protoparser/influx/streamparser.go | 153 + lib/protoparser/opentsdb/streamparser.go | 128 + lib/protoparser/opentsdbhttp/parser.go | 2 +- lib/protoparser/opentsdbhttp/streamparser.go | 127 + lib/protoparser/prometheus/parser.go | 24 +- .../promremotewrite/streamparser.go | 75 +- .../protoparser}/vmimport/parser.go | 0 .../protoparser}/vmimport/parser_test.go | 0 .../vmimport/parser_timing_test.go | 0 lib/protoparser/vmimport/streamparser.go | 111 + .../concurrencylimiter.go | 5 +- .../klauspost/compress/zlib/reader.go | 183 ++ .../klauspost/compress/zlib/writer.go | 201 ++ vendor/github.com/valyala/fasthttp/.gitignore | 3 + .../github.com/valyala/fasthttp/.travis.yml | 55 + vendor/github.com/valyala/fasthttp/LICENSE | 9 + vendor/github.com/valyala/fasthttp/README.md | 585 ++++ .../github.com/valyala/fasthttp/SECURITY.md | 115 + vendor/github.com/valyala/fasthttp/TODO | 4 + vendor/github.com/valyala/fasthttp/args.go | 588 ++++ .../github.com/valyala/fasthttp/bytesconv.go | 385 +++ .../valyala/fasthttp/bytesconv_32.go | 7 + .../valyala/fasthttp/bytesconv_64.go | 7 + .../valyala/fasthttp/bytesconv_table.go | 10 + vendor/github.com/valyala/fasthttp/client.go | 2370 ++++++++++++++ .../github.com/valyala/fasthttp/coarseTime.go | 13 + .../github.com/valyala/fasthttp/compress.go | 438 +++ vendor/github.com/valyala/fasthttp/cookie.go | 550 ++++ vendor/github.com/valyala/fasthttp/doc.go | 37 + .../valyala/fasthttp/fasthttputil/doc.go | 2 + .../valyala/fasthttp/fasthttputil/ecdsa.key | 5 + .../valyala/fasthttp/fasthttputil/ecdsa.pem | 10 + .../fasthttputil/inmemory_listener.go | 97 + .../fasthttp/fasthttputil/pipeconns.go | 313 ++ .../valyala/fasthttp/fasthttputil/rsa.key | 28 + .../valyala/fasthttp/fasthttputil/rsa.pem | 17 + vendor/github.com/valyala/fasthttp/fs.go | 1273 ++++++++ vendor/github.com/valyala/fasthttp/fuzzit.sh | 22 + vendor/github.com/valyala/fasthttp/go.mod | 11 + vendor/github.com/valyala/fasthttp/go.sum | 13 + vendor/github.com/valyala/fasthttp/header.go | 2342 ++++++++++++++ vendor/github.com/valyala/fasthttp/headers.go | 164 + vendor/github.com/valyala/fasthttp/http.go | 1844 +++++++++++ .../github.com/valyala/fasthttp/lbclient.go | 165 + vendor/github.com/valyala/fasthttp/methods.go | 14 + vendor/github.com/valyala/fasthttp/nocopy.go | 11 + .../github.com/valyala/fasthttp/peripconn.go | 100 + vendor/github.com/valyala/fasthttp/server.go | 2587 ++++++++++++++++ .../valyala/fasthttp/ssl-cert-snakeoil.key | 28 + .../valyala/fasthttp/ssl-cert-snakeoil.pem | 17 + .../valyala/fasthttp/stackless/doc.go | 3 + .../valyala/fasthttp/stackless/func.go | 79 + .../valyala/fasthttp/stackless/writer.go | 138 + vendor/github.com/valyala/fasthttp/status.go | 176 ++ vendor/github.com/valyala/fasthttp/stream.go | 54 + vendor/github.com/valyala/fasthttp/strings.go | 85 + .../github.com/valyala/fasthttp/tcpdialer.go | 473 +++ vendor/github.com/valyala/fasthttp/timer.go | 54 + vendor/github.com/valyala/fasthttp/uri.go | 582 ++++ .../github.com/valyala/fasthttp/uri_unix.go | 12 + .../valyala/fasthttp/uri_windows.go | 12 + .../github.com/valyala/fasthttp/userdata.go | 71 + .../github.com/valyala/fasthttp/workerpool.go | 249 ++ vendor/gopkg.in/yaml.v2/.travis.yml | 16 + vendor/gopkg.in/yaml.v2/LICENSE | 201 ++ vendor/gopkg.in/yaml.v2/LICENSE.libyaml | 31 + vendor/gopkg.in/yaml.v2/NOTICE | 13 + vendor/gopkg.in/yaml.v2/README.md | 133 + vendor/gopkg.in/yaml.v2/apic.go | 739 +++++ vendor/gopkg.in/yaml.v2/decode.go | 815 +++++ vendor/gopkg.in/yaml.v2/emitterc.go | 1685 ++++++++++ vendor/gopkg.in/yaml.v2/encode.go | 390 +++ vendor/gopkg.in/yaml.v2/go.mod | 5 + vendor/gopkg.in/yaml.v2/parserc.go | 1095 +++++++ vendor/gopkg.in/yaml.v2/readerc.go | 412 +++ vendor/gopkg.in/yaml.v2/resolve.go | 258 ++ vendor/gopkg.in/yaml.v2/scannerc.go | 2711 +++++++++++++++++ vendor/gopkg.in/yaml.v2/sorter.go | 113 + vendor/gopkg.in/yaml.v2/writerc.go | 26 + vendor/gopkg.in/yaml.v2/yaml.go | 466 +++ vendor/gopkg.in/yaml.v2/yamlh.go | 739 +++++ vendor/gopkg.in/yaml.v2/yamlprivateh.go | 173 ++ vendor/modules.txt | 7 + 162 files changed, 36290 insertions(+), 693 deletions(-) create mode 100644 app/vmagent/Makefile create mode 100644 app/vmagent/README.md create mode 100644 app/vmagent/common/push_ctx.go create mode 100644 app/vmagent/deployment/Dockerfile create mode 100644 app/vmagent/graphite/request_handler.go create mode 100644 app/vmagent/influx/request_handler.go create mode 100644 app/vmagent/main.go create mode 100644 app/vmagent/opentsdb/request_handler.go create mode 100644 app/vmagent/opentsdbhttp/request_handler.go create mode 100644 app/vmagent/promremotewrite/request_handler.go create mode 100644 app/vmagent/remotewrite/client.go create mode 100644 app/vmagent/remotewrite/pendingseries.go create mode 100644 app/vmagent/remotewrite/relabel.go create mode 100644 app/vmagent/remotewrite/remotewrite.go create mode 100644 app/vmagent/remotewrite/statconn.go create mode 100644 app/vmagent/vmimport/request_handler.go create mode 100644 app/vminsert/common/insert_ctx_pool.go create mode 100644 app/vminsert/prompush/push.go create mode 100644 app/vminsert/promremotewrite/request_handler.go create mode 100644 lib/flagutil/array.go create mode 100644 lib/flagutil/array_test.go rename {app/vminsert => lib/ingestserver}/graphite/server.go (81%) rename {app/vminsert => lib/ingestserver}/opentsdb/listener_switch.go (100%) rename {app/vminsert => lib/ingestserver}/opentsdb/server.go (89%) rename {app/vminsert => lib/ingestserver}/opentsdbhttp/server.go (70%) create mode 100644 lib/persistentqueue/fastqueue.go create mode 100644 lib/persistentqueue/fastqueue_test.go create mode 100644 lib/persistentqueue/fastqueue_timing_test.go create mode 100644 lib/persistentqueue/persistentqueue.go create mode 100644 lib/persistentqueue/persistentqueue_test.go create mode 100644 lib/persistentqueue/persistentqueue_timing_test.go create mode 100644 lib/prompbmarshal/remote.pb.go create mode 100644 lib/prompbmarshal/remote.proto create mode 100644 lib/prompbmarshal/types.pb.go create mode 100644 lib/prompbmarshal/types.proto create mode 100644 lib/prompbmarshal/util.go create mode 100644 lib/promrelabel/config.go create mode 100644 lib/promrelabel/config_test.go create mode 100644 lib/promrelabel/relabel.go create mode 100644 lib/promrelabel/relabel_test.go create mode 100644 lib/promrelabel/relabel_timing_test.go create mode 100644 lib/promrelabel/sort.go create mode 100644 lib/promrelabel/sort_test.go create mode 100644 lib/promrelabel/testdata/invalid_config.yml create mode 100644 lib/promrelabel/testdata/relabel_configs_valid.yml create mode 100644 lib/promscrape/client.go create mode 100644 lib/promscrape/config.go create mode 100644 lib/promscrape/config_test.go create mode 100644 lib/promscrape/scraper.go create mode 100644 lib/promscrape/scrapework.go create mode 100644 lib/promscrape/scrapework_test.go create mode 100644 lib/promscrape/scrapework_timing_test.go create mode 100644 lib/promscrape/statconn.go create mode 100644 lib/promscrape/targetstatus.go create mode 100644 lib/promscrape/testdata/empty_target_file_sd.yml create mode 100644 lib/promscrape/testdata/file_sd.json create mode 100644 lib/promscrape/testdata/file_sd_1.yml create mode 100644 lib/promscrape/testdata/password.txt create mode 100644 lib/promscrape/testdata/prometheus.yml create mode 100644 lib/promscrape/testdata/ssl-cert-snakeoil.key create mode 100644 lib/promscrape/testdata/ssl-cert-snakeoil.pem rename {app/vminsert => lib/protoparser}/common/gzip_reader.go (100%) rename {app/vminsert => lib/protoparser}/common/lines_reader.go (100%) rename {app/vminsert => lib/protoparser}/common/lines_reader_test.go (100%) create mode 100644 lib/protoparser/graphite/streamparser.go create mode 100644 lib/protoparser/influx/streamparser.go create mode 100644 lib/protoparser/opentsdb/streamparser.go create mode 100644 lib/protoparser/opentsdbhttp/streamparser.go rename app/vminsert/prometheus/request_handler.go => lib/protoparser/promremotewrite/streamparser.go (59%) rename {app/vminsert => lib/protoparser}/vmimport/parser.go (100%) rename {app/vminsert => lib/protoparser}/vmimport/parser_test.go (100%) rename {app/vminsert => lib/protoparser}/vmimport/parser_timing_test.go (100%) create mode 100644 lib/protoparser/vmimport/streamparser.go rename {app/vminsert/concurrencylimiter => lib/writeconcurrencylimiter}/concurrencylimiter.go (92%) create mode 100644 vendor/github.com/klauspost/compress/zlib/reader.go create mode 100644 vendor/github.com/klauspost/compress/zlib/writer.go create mode 100644 vendor/github.com/valyala/fasthttp/.gitignore create mode 100644 vendor/github.com/valyala/fasthttp/.travis.yml create mode 100644 vendor/github.com/valyala/fasthttp/LICENSE create mode 100644 vendor/github.com/valyala/fasthttp/README.md create mode 100644 vendor/github.com/valyala/fasthttp/SECURITY.md create mode 100644 vendor/github.com/valyala/fasthttp/TODO create mode 100644 vendor/github.com/valyala/fasthttp/args.go create mode 100644 vendor/github.com/valyala/fasthttp/bytesconv.go create mode 100644 vendor/github.com/valyala/fasthttp/bytesconv_32.go create mode 100644 vendor/github.com/valyala/fasthttp/bytesconv_64.go create mode 100644 vendor/github.com/valyala/fasthttp/bytesconv_table.go create mode 100644 vendor/github.com/valyala/fasthttp/client.go create mode 100644 vendor/github.com/valyala/fasthttp/coarseTime.go create mode 100644 vendor/github.com/valyala/fasthttp/compress.go create mode 100644 vendor/github.com/valyala/fasthttp/cookie.go create mode 100644 vendor/github.com/valyala/fasthttp/doc.go create mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/doc.go create mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.key create mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.pem create mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go create mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go create mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/rsa.key create mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/rsa.pem create mode 100644 vendor/github.com/valyala/fasthttp/fs.go create mode 100644 vendor/github.com/valyala/fasthttp/fuzzit.sh create mode 100644 vendor/github.com/valyala/fasthttp/go.mod create mode 100644 vendor/github.com/valyala/fasthttp/go.sum create mode 100644 vendor/github.com/valyala/fasthttp/header.go create mode 100644 vendor/github.com/valyala/fasthttp/headers.go create mode 100644 vendor/github.com/valyala/fasthttp/http.go create mode 100644 vendor/github.com/valyala/fasthttp/lbclient.go create mode 100644 vendor/github.com/valyala/fasthttp/methods.go create mode 100644 vendor/github.com/valyala/fasthttp/nocopy.go create mode 100644 vendor/github.com/valyala/fasthttp/peripconn.go create mode 100644 vendor/github.com/valyala/fasthttp/server.go create mode 100644 vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.key create mode 100644 vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.pem create mode 100644 vendor/github.com/valyala/fasthttp/stackless/doc.go create mode 100644 vendor/github.com/valyala/fasthttp/stackless/func.go create mode 100644 vendor/github.com/valyala/fasthttp/stackless/writer.go create mode 100644 vendor/github.com/valyala/fasthttp/status.go create mode 100644 vendor/github.com/valyala/fasthttp/stream.go create mode 100644 vendor/github.com/valyala/fasthttp/strings.go create mode 100644 vendor/github.com/valyala/fasthttp/tcpdialer.go create mode 100644 vendor/github.com/valyala/fasthttp/timer.go create mode 100644 vendor/github.com/valyala/fasthttp/uri.go create mode 100644 vendor/github.com/valyala/fasthttp/uri_unix.go create mode 100644 vendor/github.com/valyala/fasthttp/uri_windows.go create mode 100644 vendor/github.com/valyala/fasthttp/userdata.go create mode 100644 vendor/github.com/valyala/fasthttp/workerpool.go create mode 100644 vendor/gopkg.in/yaml.v2/.travis.yml create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE create mode 100644 vendor/gopkg.in/yaml.v2/LICENSE.libyaml create mode 100644 vendor/gopkg.in/yaml.v2/NOTICE create mode 100644 vendor/gopkg.in/yaml.v2/README.md create mode 100644 vendor/gopkg.in/yaml.v2/apic.go create mode 100644 vendor/gopkg.in/yaml.v2/decode.go create mode 100644 vendor/gopkg.in/yaml.v2/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v2/encode.go create mode 100644 vendor/gopkg.in/yaml.v2/go.mod create mode 100644 vendor/gopkg.in/yaml.v2/parserc.go create mode 100644 vendor/gopkg.in/yaml.v2/readerc.go create mode 100644 vendor/gopkg.in/yaml.v2/resolve.go create mode 100644 vendor/gopkg.in/yaml.v2/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v2/sorter.go create mode 100644 vendor/gopkg.in/yaml.v2/writerc.go create mode 100644 vendor/gopkg.in/yaml.v2/yaml.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v2/yamlprivateh.go diff --git a/.gitignore b/.gitignore index 12bcf1ea3..ba509278a 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ *.swp /gocache-for-docker /victoria-metrics-data +/vmagent-remotewrite-data /vmstorage-data /vmselect-cache /package/temp-deb-* diff --git a/Makefile b/Makefile index 350daf7b9..5fd444831 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,10 @@ endif GO_BUILDINFO = -X '$(PKG_PREFIX)/lib/buildinfo.Version=$(APP_NAME)-$(shell date -u +'%Y%m%d-%H%M%S')-$(BUILDINFO_TAG)' all: \ - victoria-metrics-prod + victoria-metrics-prod \ + vmagent-prod \ + vmbackup-prod \ + vmrestore-prod include app/*/Makefile include deployment/*/Makefile @@ -21,15 +24,18 @@ clean: publish: \ publish-victoria-metrics \ + publish-vmagent \ publish-vmbackup \ publish-vmrestore package: \ package-victoria-metrics \ + package-vmagent \ package-vmbackup \ package-vmrestore vmutils: \ + vmagent \ vmbackup \ vmrestore @@ -42,9 +48,10 @@ release-victoria-metrics: victoria-metrics-prod sha256sum victoria-metrics-$(PKG_TAG).tar.gz > victoria-metrics-$(PKG_TAG)_checksums.txt release-vmutils: \ + vmagent-prod \ vmbackup-prod \ vmrestore-prod - cd bin && tar czf vmutils-$(PKG_TAG).tar.gz vmbackup-prod vmrestore-prod && \ + cd bin && tar czf vmutils-$(PKG_TAG).tar.gz vmagent-prod vmbackup-prod vmrestore-prod && \ sha256sum vmutils-$(PKG_TAG).tar.gz > vmutils-$(PKG_TAG)_checksums.txt pprof-cpu: @@ -70,6 +77,7 @@ errcheck: install-errcheck errcheck -exclude=errcheck_excludes.txt ./app/vminsert/... errcheck -exclude=errcheck_excludes.txt ./app/vmselect/... errcheck -exclude=errcheck_excludes.txt ./app/vmstorage/... + errcheck -exclude=errcheck_excludes.txt ./app/vmagent/... errcheck -exclude=errcheck_excludes.txt ./app/vmbackup/... errcheck -exclude=errcheck_excludes.txt ./app/vmrestore/... errcheck -exclude=errcheck_excludes.txt ./app/vmalert/... diff --git a/README.md b/README.md index cc615c1ed..90296c02c 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,9 @@ Cluster version is available [here](https://github.com/VictoriaMetrics/VictoriaM to S3 or GCS with [vmbackup](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmbackup/README.md) / [vmrestore](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmrestore/README.md). See [this article](https://medium.com/@valyala/speeding-up-backups-for-big-time-series-databases-533c1a927883) for more details. * Storage is protected from corruption on unclean shutdown (i.e. OOM, hardware reset or `kill -9`) thanks to [the storage architecture](https://medium.com/@valyala/how-victoriametrics-makes-instant-snapshots-for-multi-terabyte-time-series-data-e1f3fb0e0282). -* Supports metrics' ingestion and [backfilling](#backfilling) via the following protocols: +* Supports metrics' scraping, ingestion and [backfilling](#backfilling) via the following protocols: + * [Metrics from Prometheus exporters](https://github.com/prometheus/docs/blob/master/content/docs/instrumenting/exposition_formats.md#text-based-format) + such as [node_exporter](https://github.com/prometheus/node_exporter). See [these docs](#how-to-scrape-prometheus-exporters-such-as-node-exporter) for details. * [Prometheus remote write API](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write) * [InfluxDB line protocol](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) * [Graphite plaintext protocol](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) with [tags](https://graphite.readthedocs.io/en/latest/tags.html#carbon) @@ -75,6 +77,7 @@ Cluster version is available [here](https://github.com/VictoriaMetrics/VictoriaM - [Grafana setup](#grafana-setup) - [How to upgrade VictoriaMetrics?](#how-to-upgrade-victoriametrics) - [How to apply new config to VictoriaMetrics?](#how-to-apply-new-config-to-victoriametrics) + - [How to scrape Prometheus exporters such as node_exporter?](#how-to-scrape-prometheus-exporters-such-as-node-exporter) - [How to send data from InfluxDB-compatible agents such as Telegraf?](#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) - [How to send data from Graphite-compatible agents such as StatsD?](#how-to-send-data-from-graphite-compatible-agents-such-as-statsd) - [Querying Graphite data](#querying-graphite-data) @@ -238,6 +241,20 @@ Prometheus doesn't drop data during VictoriaMetrics restart. See [this article](https://grafana.com/blog/2019/03/25/whats-new-in-prometheus-2.8-wal-based-remote-write/) for details. +### How to scrape Prometheus exporters such as [node-exporter](https://github.com/prometheus/node_exporter)? + +VictoriaMetrics can be used as drop-in replacement for Prometheus for scraping targets configured in `prometheus.yml` config file according to [the specification](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#configuration-file). +Just set `-promscrape.config` command-line flag to the path to `prometheus.yml` config - and VictoriaMetrics should start scraping the configured targets. +Currently the following [scrape_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) types are supported: + +* [static_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config) +* [file_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config) + +In the future other `*_sd_config` types will be supported. + +See also [vmagent](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmagent/README.md), which can be used as drop-in replacement for Prometheus. + + ### How to send data from InfluxDB-compatible agents such as [Telegraf](https://www.influxdata.com/time-series-platform/telegraf/)? Just use `http://:8428` url instead of InfluxDB url in agents' configs. diff --git a/app/vmagent/Makefile b/app/vmagent/Makefile new file mode 100644 index 000000000..e2fbfc2b3 --- /dev/null +++ b/app/vmagent/Makefile @@ -0,0 +1,73 @@ +# All these commands must run from repository root. + +vmagent: + APP_NAME=vmagent $(MAKE) app-local + +vmagent-prod: + APP_NAME=vmagent $(MAKE) app-via-docker + +vmagent-pure-prod: + APP_NAME=vmagent $(MAKE) app-via-docker-pure + +vmagent-amd64-prod: + APP_NAME=vmagent $(MAKE) app-via-docker-amd64 + +vmagent-arm-prod: + APP_NAME=vmagent $(MAKE) app-via-docker-arm + +vmagent-arm64-prod: + APP_NAME=vmagent $(MAKE) app-via-docker-arm64 + +vmagent-ppc64le-prod: + APP_NAME=vmagent $(MAKE) app-via-docker-ppc64le + +vmagent-386-prod: + APP_NAME=vmagent $(MAKE) app-via-docker-386 + +package-vmagent: + APP_NAME=vmagent $(MAKE) package-via-docker + +package-vmagent-pure: + APP_NAME=vmagent $(MAKE) package-via-docker-pure + +package-vmagent-amd64: + APP_NAME=vmagent $(MAKE) package-via-docker-amd64 + +package-vmagent-arm: + APP_NAME=vmagent $(MAKE) package-via-docker-arm + +package-vmagent-arm64: + APP_NAME=vmagent $(MAKE) package-via-docker-arm64 + +package-vmagent-ppc64le: + APP_NAME=vmagent $(MAKE) package-via-docker-ppc64le + +package-vmagent-386: + APP_NAME=vmagent $(MAKE) package-via-docker-386 + +publish-vmagent: + APP_NAME=vmagent $(MAKE) publish-via-docker + +run-vmagent: + mkdir -p vmagent-data + DOCKER_OPTS='-v $(shell pwd)/vmagent-data:/vmagent-data' \ + APP_NAME=vmagent \ + $(MAKE) run-via-docker + +vmagent-amd64: + CGO_ENABLED=1 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmagent-amd64 ./app/vmagent + +vmagent-arm: + CGO_ENABLED=0 GOOS=linux GOARCH=arm GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmagent-arm ./app/vmagent + +vmagent-arm64: + CGO_ENABLED=0 GOOS=linux GOARCH=arm64 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmagent-arm64 ./app/vmagent + +vmagent-ppc64le: + CGO_ENABLED=0 GOOS=linux GOARCH=ppc64le GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmagent-ppc64le ./app/vmagent + +vmagent-386: + CGO_ENABLED=0 GOOS=linux GOARCH=386 GO111MODULE=on go build -mod=vendor -ldflags "$(GO_BUILDINFO)" -o bin/vmagent-386 ./app/vmagent + +vmagent-pure: + APP_NAME=vmagent $(MAKE) app-local-pure diff --git a/app/vmagent/README.md b/app/vmagent/README.md new file mode 100644 index 000000000..4a0c28086 --- /dev/null +++ b/app/vmagent/README.md @@ -0,0 +1,151 @@ +## vmagent + +`vmagent` collects metrics from various sources and pushes them to any remote storage for Prometheus +from [this list](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). +The recommended remote storage is [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics). + + +### Features + +* Can be used as drop-in replacement for Prometheus for scraping targets such as [node_exporter](https://github.com/prometheus/node_exporter). + Just use `-promscrape.config=/path/to/prometheus.yml` command-line flag. +* Accepts data via all the ingestion protocols supported by VictoriaMetrics: + * Influx line protocol via `http://:8429/write`. See [these docs](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf). + * JSON lines import protocol via `http://:8429/api/v1/import`. See [these docs](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-import-time-series-data). + * Graphite plaintext protocol if `-graphiteListenAddr` command-line flag is set. See [these docs](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-send-data-from-graphite-compatible-agents-such-as-statsd). + * OpenTSDB telnet and http protocols if `-opentsdbListenAddr` command-line flag is set. See [these docs](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-send-data-from-opentsdb-compatible-agents). + * Prometheus remote write protocol via `http://:8429/api/v1/write`. +* Prometheus relabeling can be applied to all the collected metrics. See [these docs](#relabeling). +* Additional labels can be added to all the collected metrics before sending them to remote storage. + Just pass these labels to `-remoteWrite.label` command-line flag: `-remoteWrite.label="labelName=labelValue"`. +* Collected metrics can be sent simultaneously to multiple remote storage systems by providing multiple `-remoteWrite.url` args. +* Works in environments with unstable connections to remote storage. If the remote storage is unavailable, the collected metrics + are buffered at `-remoteWrite.tmpDataPath` until free space is available. The buffered metrics are sent to remote storage + as soon as connection to remote storage is recovered. +* Uses lower amounts of RAM, CPU, disk IO and network bandwidth comparing to Prometheus. + + +### Quick start + +Just pass path the following flags to `vmagent` in order to start scraping Prometheus targets: + +* `-promscrape.config` with the path to Prometheus config file (it is usually located at `/etc/prometheus/prometheus.yml`) +* `-remoteWrite.url` with the remote storage endpoint that accepts data over Prometheus remote_write API. + See [the list of supported remote storage systems](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). + `vmagent` supports sending data to multiple remote storage systems in parallel. Just set multiple `-remoteWrite.url` args. + +Example command line: + +``` +/path/to/vmagent -promscrape.config=/path/to/prometheus.yml -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write +``` + +If you need collecting only Influx data, then the following command line would be enough: + +``` +/path/to/vmagent -remoteWrite.url=https://victoria-metrics-host:8428/api/v1/write +``` + +Then send Influx data to `http://vmagent-host:8429/write`. See [these docs](https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md#how-to-send-data-from-influxdb-compatible-agents-such-as-telegraf) for more details. + + +### How to collect metrics in Prometheus format? + +`vmagent` can be used as Prometheus replacement for metrics' scraping if path to [Prometheus config file](https://prometheus.io/docs/prometheus/latest/configuration/configuration/) +path is passed to `-promscrape.config` command-line flag. This file is usually named `prometheus.yml`. +`vmagent` takes into account the following sections from [Prometheus config file](https://prometheus.io/docs/prometheus/latest/configuration/configuration/): + +* `global` +* `scrape_configs` + +All the other section are ignored, including [remote_write](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write) section. +Use `-remoteWrite.*` command-line flags instead for configuring remote write settings: + +* `-remoteWrite.url` for pointing to remote storage. Data to remote storage can be sent either via HTTP or HTTPS. See `-remoteWrite.tls*` flags for details. +* `-remoteWrite.label` for adding labels to metrics before sending them to remote storage. +* `-remoteWrite.relabelConfig` for applying relabeling to metrics before sending them to remote storage. + +The following scrape types in [scrape_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) section are supported: + +* `static_configs` - for scraping statically defined targets. See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config) for details. +* `file_sd_configs` - for scraping targets defined in external files aka file-based service discover. + See [these docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config) for details. + +File feature requests at [our issue tracker](https://github.com/VictoriaMetrics/VictoriaMetrics/issues) if you need other service discovery mechanisms to be supported by `vmagent`. + + +### Adding labels to metrics + +Labels can be added to metrics via the following mechanisms: + +* Via `global -> external_labels` section in `-promscrape.config` file. These labels are added only to metrics scraped from targets configured in `-promscrape.config` file. +* Via `-remoteWrite.label` command-line flag. These labels are added to all the collected metrics before sending them to `-remoteWrite.url`. + + +### Relabeling + +`vmagent` supports [Prometheus relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config). +Additionally it provides the following extra actions: + +* `replace_all`: replaces all the occurences of `regex` in the values of `source_labels` with the `replacement` +* `labelmap_all`: replaces all the occurences of `regex` in all the labels with the `replacement` + +The relabeling can be defined in the following places: + +* At `scrape_config -> relabel_configs` section in `-promscrape.config` file. This relabeling is applied when parsing the file during `vmagent` startup + or during config reload after sending `SIGHUP` signal to `vmagent` via `kill -HUP`. +* At `scrape_config -> metric_relabel_configs` section in `-promscrape.config` file. This relabeling is applied to metrics after each scrape for configured targets. + Changes to this section can be applied after sending `SIGHUP` signal to `vmagent`. +* At `-remoteWrite.relabelConfig` file. This relabeling is aplied to all the collected metrics before sending them to `-remoteWrite.url`. + +Read more about relabeling in the following articles: + +* [Life of a label](https://www.robustperception.io/life-of-a-label) +* [Discarding targets and timeseries with relabeling](https://www.robustperception.io/relabelling-can-discard-targets-timeseries-and-alerts) +* [Dropping labels at scrape time](https://www.robustperception.io/dropping-metrics-at-scrape-time-with-prometheus) +* [Extracting labels from legacy metric names](https://www.robustperception.io/extracting-labels-from-legacy-metric-names) +* [relabel_configs vs metric_relabel_configs](https://www.robustperception.io/relabel_configs-vs-metric_relabel_configs) + + +### Monitoring + +`vmagent` export various metrics in Prometheus exposition format at `/metrics` page. It is recommended setting up regular scraping of this page +either via `vmagent` itself or via Prometheus, so the exported metrics could be analyzed later. + + +### Troubleshooting + +* It is recommended increasing the maximum number of open file in the system (`ulimit -n`) when scraping big number of targets, + since `vmagent` establishes at least a single TCP connection per each target. + +* It is recommended increasing `-remoteWrite.queues` if `vmagent` collects more than 100K samples per second + and `vmagent_remotewrite_pending_data_bytes` metric exported by `vmagent` at `/metrics` page constantly grows. + +* `vmagent` buffers scraped data at `-remoteWrite.tmpDataPath` directory until it is sent to `-remoteWrite.url`. + The directory can grow big when remote storage is unvailable during extended periods of time. If you don't want + sending all the data from the directory to remote storage, just stop `vmagent` and delete the directory. + + +### How to build from sources + +It is recommended using [binary releases](https://github.com/VictoriaMetrics/VictoriaMetrics/releases) - `vmagent` is located in `vmutils-*` archives there. + + +#### Development build + +1. [Install Go](https://golang.org/doc/install). The minimum supported version is Go 1.12. +2. Run `make vmagent` from the root folder of the repository. + It builds `vmagent` binary and puts it into the `bin` folder. + +#### Production build + +1. [Install docker](https://docs.docker.com/install/). +2. Run `make vmagent-prod` from the root folder of the repository. + It builds `vmagent-prod` binary and puts it into the `bin` folder. + +#### Building docker images + +Run `make package-vmagent`. It builds `victoriametrics/vmagent:` docker image locally. +`` is auto-generated image tag, which depends on source code in the repository. +The `` may be manually set via `PKG_TAG=foobar make package-vmagent`. + diff --git a/app/vmagent/common/push_ctx.go b/app/vmagent/common/push_ctx.go new file mode 100644 index 000000000..7d2686dd6 --- /dev/null +++ b/app/vmagent/common/push_ctx.go @@ -0,0 +1,70 @@ +package common + +import ( + "runtime" + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" +) + +// PushCtx is a context used for populating WriteRequest. +type PushCtx struct { + WriteRequest prompbmarshal.WriteRequest + + // Labels contains flat list of all the labels used in WriteRequest. + Labels []prompbmarshal.Label + + // Samples contains flat list of all the samples used in WriteRequest. + Samples []prompbmarshal.Sample +} + +// Reset resets ctx. +func (ctx *PushCtx) Reset() { + tss := ctx.WriteRequest.Timeseries + for i := range tss { + ts := &tss[i] + ts.Labels = nil + ts.Samples = nil + } + ctx.WriteRequest.Timeseries = ctx.WriteRequest.Timeseries[:0] + + labels := ctx.Labels + for i := range labels { + label := &labels[i] + label.Name = "" + label.Value = "" + } + ctx.Labels = ctx.Labels[:0] + + ctx.Samples = ctx.Samples[:0] +} + +// GetPushCtx returns PushCtx from pool. +// +// Call PutPushCtx when the ctx is no longer needed. +func GetPushCtx() *PushCtx { + select { + case ctx := <-pushCtxPoolCh: + return ctx + default: + if v := pushCtxPool.Get(); v != nil { + return v.(*PushCtx) + } + return &PushCtx{} + } +} + +// PutPushCtx returns ctx to the pool. +// +// ctx mustn't be used after returning to the pool. +func PutPushCtx(ctx *PushCtx) { + ctx.Reset() + select { + case pushCtxPoolCh <- ctx: + default: + pushCtxPool.Put(ctx) + } +} + +var pushCtxPool sync.Pool +var pushCtxPoolCh = make(chan *PushCtx, runtime.GOMAXPROCS(-1)) diff --git a/app/vmagent/deployment/Dockerfile b/app/vmagent/deployment/Dockerfile new file mode 100644 index 000000000..6bead7b08 --- /dev/null +++ b/app/vmagent/deployment/Dockerfile @@ -0,0 +1,8 @@ +ARG certs_image +FROM $certs_image AS certs +FROM scratch +COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +ARG src_binary +COPY $src_binary ./vmagent-prod +EXPOSE 8429 +ENTRYPOINT ["/vmagent-prod"] diff --git a/app/vmagent/graphite/request_handler.go b/app/vmagent/graphite/request_handler.go new file mode 100644 index 000000000..fd0d96cd6 --- /dev/null +++ b/app/vmagent/graphite/request_handler.go @@ -0,0 +1,65 @@ +package graphite + +import ( + "io" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" + "github.com/VictoriaMetrics/metrics" +) + +var ( + rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="graphite"}`) + rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="graphite"}`) +) + +// InsertHandler processes remote write for graphite plaintext protocol. +// +// See https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol +func InsertHandler(r io.Reader) error { + return writeconcurrencylimiter.Do(func() error { + return parser.ParseStream(r, insertRows) + }) +} + +func insertRows(rows []parser.Row) error { + ctx := common.GetPushCtx() + defer common.PutPushCtx(ctx) + + tssDst := ctx.WriteRequest.Timeseries[:0] + labels := ctx.Labels[:0] + samples := ctx.Samples[:0] + for i := range rows { + r := &rows[i] + labelsLen := len(labels) + labels = append(labels, prompbmarshal.Label{ + Name: "__name__", + Value: r.Metric, + }) + for j := range r.Tags { + tag := &r.Tags[j] + labels = append(labels, prompbmarshal.Label{ + Name: tag.Key, + Value: tag.Value, + }) + } + samples = append(samples, prompbmarshal.Sample{ + Value: r.Value, + Timestamp: r.Timestamp, + }) + tssDst = append(tssDst, prompbmarshal.TimeSeries{ + Labels: labels[labelsLen:], + Samples: samples[len(samples)-1:], + }) + } + ctx.WriteRequest.Timeseries = tssDst + ctx.Labels = labels + ctx.Samples = samples + remotewrite.Push(&ctx.WriteRequest) + rowsInserted.Add(len(rows)) + rowsPerInsert.Update(float64(len(rows))) + return nil +} diff --git a/app/vmagent/influx/request_handler.go b/app/vmagent/influx/request_handler.go new file mode 100644 index 000000000..e0ed2ad92 --- /dev/null +++ b/app/vmagent/influx/request_handler.go @@ -0,0 +1,152 @@ +package influx + +import ( + "flag" + "net/http" + "runtime" + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/influx" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" + "github.com/VictoriaMetrics/metrics" +) + +var ( + measurementFieldSeparator = flag.String("influxMeasurementFieldSeparator", "_", "Separator for '{measurement}{separator}{field_name}' metric name when inserted via Influx line protocol") + skipSingleField = flag.Bool("influxSkipSingleField", false, "Uses '{measurement}' instead of '{measurement}{separator}{field_name}' for metic name if Influx line contains only a single field") +) + +var ( + rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="influx"}`) + rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="influx"}`) +) + +// InsertHandler processes remote write for influx line protocol. +// +// See https://github.com/influxdata/influxdb/blob/4cbdc197b8117fee648d62e2e5be75c6575352f0/tsdb/README.md +func InsertHandler(req *http.Request) error { + return writeconcurrencylimiter.Do(func() error { + return parser.ParseStream(req, insertRows) + }) +} + +func insertRows(db string, rows []parser.Row) error { + ctx := getPushCtx() + defer putPushCtx(ctx) + + rowsTotal := 0 + tssDst := ctx.ctx.WriteRequest.Timeseries[:0] + labels := ctx.ctx.Labels[:0] + samples := ctx.ctx.Samples[:0] + commonLabels := ctx.commonLabels[:0] + buf := ctx.buf[:0] + for i := range rows { + r := &rows[i] + commonLabels = commonLabels[:0] + hasDBLabel := false + for j := range r.Tags { + tag := &r.Tags[j] + if tag.Key == "db" { + hasDBLabel = true + } + commonLabels = append(commonLabels, prompbmarshal.Label{ + Name: tag.Key, + Value: tag.Value, + }) + } + if len(db) > 0 && !hasDBLabel { + commonLabels = append(commonLabels, prompbmarshal.Label{ + Name: "db", + Value: db, + }) + } + ctx.metricGroupBuf = append(ctx.metricGroupBuf[:0], r.Measurement...) + skipFieldKey := len(r.Fields) == 1 && *skipSingleField + if len(ctx.metricGroupBuf) > 0 && !skipFieldKey { + ctx.metricGroupBuf = append(ctx.metricGroupBuf, *measurementFieldSeparator...) + } + for j := range r.Fields { + f := &r.Fields[j] + bufLen := len(buf) + buf = append(buf, ctx.metricGroupBuf...) + if !skipFieldKey { + buf = append(buf, f.Key...) + } + metricGroup := bytesutil.ToUnsafeString(buf[bufLen:]) + labelsLen := len(labels) + labels = append(labels, prompbmarshal.Label{ + Name: "__name__", + Value: metricGroup, + }) + labels = append(labels, commonLabels...) + samples = append(samples, prompbmarshal.Sample{ + Timestamp: r.Timestamp, + Value: f.Value, + }) + tssDst = append(tssDst, prompbmarshal.TimeSeries{ + Labels: labels[labelsLen:], + Samples: samples[len(samples)-1:], + }) + } + rowsTotal += len(r.Fields) + } + ctx.buf = buf + ctx.ctx.WriteRequest.Timeseries = tssDst + ctx.ctx.Labels = labels + ctx.ctx.Samples = samples + ctx.commonLabels = commonLabels + remotewrite.Push(&ctx.ctx.WriteRequest) + rowsInserted.Add(rowsTotal) + rowsPerInsert.Update(float64(rowsTotal)) + + return nil +} + +type pushCtx struct { + ctx common.PushCtx + commonLabels []prompbmarshal.Label + metricGroupBuf []byte + buf []byte +} + +func (ctx *pushCtx) reset() { + ctx.ctx.Reset() + + commonLabels := ctx.commonLabels + for i := range commonLabels { + label := &commonLabels[i] + label.Name = "" + label.Value = "" + } + + ctx.metricGroupBuf = ctx.metricGroupBuf[:0] + ctx.buf = ctx.buf[:0] +} + +func getPushCtx() *pushCtx { + select { + case ctx := <-pushCtxPoolCh: + return ctx + default: + if v := pushCtxPool.Get(); v != nil { + return v.(*pushCtx) + } + return &pushCtx{} + } +} + +func putPushCtx(ctx *pushCtx) { + ctx.reset() + select { + case pushCtxPoolCh <- ctx: + default: + pushCtxPool.Put(ctx) + } +} + +var pushCtxPool sync.Pool +var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1)) diff --git a/app/vmagent/main.go b/app/vmagent/main.go new file mode 100644 index 000000000..264a0f5a9 --- /dev/null +++ b/app/vmagent/main.go @@ -0,0 +1,149 @@ +package main + +import ( + "flag" + "fmt" + "net/http" + "strings" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/graphite" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/influx" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/opentsdb" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/opentsdbhttp" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/promremotewrite" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/vmimport" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/buildinfo" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/envflag" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver" + graphiteserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/graphite" + opentsdbserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdb" + opentsdbhttpserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdbhttp" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/procutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" + "github.com/VictoriaMetrics/metrics" +) + +var ( + httpListenAddr = flag.String("httpListenAddr", ":8429", "TCP address to listen for http connections") + graphiteListenAddr = flag.String("graphiteListenAddr", "", "TCP and UDP address to listen for Graphite plaintext data. Usually :2003 must be set. Doesn't work if empty") + opentsdbListenAddr = flag.String("opentsdbListenAddr", "", "TCP and UDP address to listen for OpentTSDB metrics. "+ + "Telnet put messages and HTTP /api/put messages are simultaneously served on TCP port. "+ + "Usually :4242 must be set. Doesn't work if empty") + opentsdbHTTPListenAddr = flag.String("opentsdbHTTPListenAddr", "", "TCP address to listen for OpentTSDB HTTP put requests. Usually :4242 must be set. Doesn't work if empty") +) + +var ( + graphiteServer *graphiteserver.Server + opentsdbServer *opentsdbserver.Server + opentsdbhttpServer *opentsdbhttpserver.Server +) + +func main() { + envflag.Parse() + buildinfo.Init() + logger.Init() + logger.Infof("starting vmagent at %q...", *httpListenAddr) + startTime := time.Now() + remotewrite.Init() + writeconcurrencylimiter.Init() + if len(*graphiteListenAddr) > 0 { + graphiteServer = graphiteserver.MustStart(*graphiteListenAddr, graphite.InsertHandler) + } + if len(*opentsdbListenAddr) > 0 { + opentsdbServer = opentsdbserver.MustStart(*opentsdbListenAddr, opentsdb.InsertHandler, opentsdbhttp.InsertHandler) + } + if len(*opentsdbHTTPListenAddr) > 0 { + opentsdbhttpServer = opentsdbhttpserver.MustStart(*opentsdbHTTPListenAddr, opentsdbhttp.InsertHandler) + } + + promscrape.Init(remotewrite.Push) + + go httpserver.Serve(*httpListenAddr, requestHandler) + logger.Infof("started vmagent in %.3f seconds", time.Since(startTime).Seconds()) + + sig := procutil.WaitForSigterm() + logger.Infof("received signal %s", sig) + + logger.Infof("gracefully shutting down webservice at %q", *httpListenAddr) + startTime = time.Now() + if err := httpserver.Stop(*httpListenAddr); err != nil { + logger.Fatalf("cannot stop the webservice: %s", err) + } + logger.Infof("successfully shut down the webservice in %.3f seconds", time.Since(startTime).Seconds()) + + promscrape.Stop() + + if len(*graphiteListenAddr) > 0 { + graphiteServer.MustStop() + } + if len(*opentsdbListenAddr) > 0 { + opentsdbServer.MustStop() + } + if len(*opentsdbHTTPListenAddr) > 0 { + opentsdbhttpServer.MustStop() + } + remotewrite.Stop() + + logger.Infof("successfully stopped vmagent in %.3f seconds", time.Since(startTime).Seconds()) +} + +func requestHandler(w http.ResponseWriter, r *http.Request) bool { + path := strings.Replace(r.URL.Path, "//", "/", -1) + switch path { + case "/api/v1/write": + prometheusWriteRequests.Inc() + if err := promremotewrite.InsertHandler(r); err != nil { + prometheusWriteErrors.Inc() + httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err) + return true + } + w.WriteHeader(http.StatusNoContent) + return true + case "/targets": + w.Header().Set("Content-Type", "text/plain") + promscrape.WriteHumanReadableTargetsStatus(w) + return true + case "/api/v1/import": + vmimportRequests.Inc() + if err := vmimport.InsertHandler(r); err != nil { + vmimportErrors.Inc() + httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err) + return true + } + w.WriteHeader(http.StatusNoContent) + return true + case "/write", "/api/v2/write": + influxWriteRequests.Inc() + if err := influx.InsertHandler(r); err != nil { + influxWriteErrors.Inc() + httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err) + return true + } + w.WriteHeader(http.StatusNoContent) + return true + case "/query": + // Emulate fake response for influx query. + // This is required for TSBS benchmark. + influxQueryRequests.Inc() + fmt.Fprintf(w, `{"results":[{"series":[{"values":[]}]}]}`) + return true + } + return false +} + +var ( + prometheusWriteRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/api/v1/write", protocol="prometheus"}`) + prometheusWriteErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/api/v1/write", protocol="prometheus"}`) + + vmimportRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/api/v1/import", protocol="vm"}`) + vmimportErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/api/v1/import", protocol="vm"}`) + + influxWriteRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/write", protocol="influx"}`) + influxWriteErrors = metrics.NewCounter(`vmagent_http_request_errors_total{path="/write", protocol="influx"}`) + + influxQueryRequests = metrics.NewCounter(`vmagent_http_requests_total{path="/query", protocol="influx"}`) +) diff --git a/app/vmagent/opentsdb/request_handler.go b/app/vmagent/opentsdb/request_handler.go new file mode 100644 index 000000000..628676de9 --- /dev/null +++ b/app/vmagent/opentsdb/request_handler.go @@ -0,0 +1,65 @@ +package opentsdb + +import ( + "io" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdb" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" + "github.com/VictoriaMetrics/metrics" +) + +var ( + rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="opentsdb"}`) + rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="opentsdb"}`) +) + +// InsertHandler processes remote write for OpenTSDB put protocol. +// +// See http://opentsdb.net/docs/build/html/api_telnet/put.html +func InsertHandler(r io.Reader) error { + return writeconcurrencylimiter.Do(func() error { + return parser.ParseStream(r, insertRows) + }) +} + +func insertRows(rows []parser.Row) error { + ctx := common.GetPushCtx() + defer common.PutPushCtx(ctx) + + tssDst := ctx.WriteRequest.Timeseries[:0] + labels := ctx.Labels[:0] + samples := ctx.Samples[:0] + for i := range rows { + r := &rows[i] + labelsLen := len(labels) + labels = append(labels, prompbmarshal.Label{ + Name: "__name__", + Value: r.Metric, + }) + for j := range r.Tags { + tag := &r.Tags[j] + labels = append(labels, prompbmarshal.Label{ + Name: tag.Key, + Value: tag.Value, + }) + } + samples = append(samples, prompbmarshal.Sample{ + Value: r.Value, + Timestamp: r.Timestamp, + }) + tssDst = append(tssDst, prompbmarshal.TimeSeries{ + Labels: labels[labelsLen:], + Samples: samples[len(samples)-1:], + }) + } + ctx.WriteRequest.Timeseries = tssDst + ctx.Labels = labels + ctx.Samples = samples + remotewrite.Push(&ctx.WriteRequest) + rowsInserted.Add(len(rows)) + rowsPerInsert.Update(float64(len(rows))) + return nil +} diff --git a/app/vmagent/opentsdbhttp/request_handler.go b/app/vmagent/opentsdbhttp/request_handler.go new file mode 100644 index 000000000..365cbe0aa --- /dev/null +++ b/app/vmagent/opentsdbhttp/request_handler.go @@ -0,0 +1,64 @@ +package opentsdbhttp + +import ( + "net/http" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdbhttp" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" + "github.com/VictoriaMetrics/metrics" +) + +var ( + rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="opentsdbhttp"}`) + rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="opentsdbhttp"}`) +) + +// InsertHandler processes HTTP OpenTSDB put requests. +// See http://opentsdb.net/docs/build/html/api_http/put.html +func InsertHandler(req *http.Request) error { + return writeconcurrencylimiter.Do(func() error { + return parser.ParseStream(req, insertRows) + }) +} + +func insertRows(rows []parser.Row) error { + ctx := common.GetPushCtx() + defer common.PutPushCtx(ctx) + + tssDst := ctx.WriteRequest.Timeseries[:0] + labels := ctx.Labels[:0] + samples := ctx.Samples[:0] + for i := range rows { + r := &rows[i] + labelsLen := len(labels) + labels = append(labels, prompbmarshal.Label{ + Name: "__name__", + Value: r.Metric, + }) + for j := range r.Tags { + tag := &r.Tags[j] + labels = append(labels, prompbmarshal.Label{ + Name: tag.Key, + Value: tag.Value, + }) + } + samples = append(samples, prompbmarshal.Sample{ + Value: r.Value, + Timestamp: r.Timestamp, + }) + tssDst = append(tssDst, prompbmarshal.TimeSeries{ + Labels: labels[labelsLen:], + Samples: samples[len(samples)-1:], + }) + } + ctx.WriteRequest.Timeseries = tssDst + ctx.Labels = labels + ctx.Samples = samples + remotewrite.Push(&ctx.WriteRequest) + rowsInserted.Add(len(rows)) + rowsPerInsert.Update(float64(len(rows))) + return nil +} diff --git a/app/vmagent/promremotewrite/request_handler.go b/app/vmagent/promremotewrite/request_handler.go new file mode 100644 index 000000000..747092e1c --- /dev/null +++ b/app/vmagent/promremotewrite/request_handler.go @@ -0,0 +1,67 @@ +package promremotewrite + +import ( + "net/http" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/promremotewrite" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" + "github.com/VictoriaMetrics/metrics" +) + +var ( + rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="promremotewrite"}`) + rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="promremotewrite"}`) +) + +// InsertHandler processes remote write for prometheus. +func InsertHandler(req *http.Request) error { + return writeconcurrencylimiter.Do(func() error { + return parser.ParseStream(req, insertRows) + }) +} + +func insertRows(timeseries []prompb.TimeSeries) error { + ctx := common.GetPushCtx() + defer common.PutPushCtx(ctx) + + rowsTotal := 0 + tssDst := ctx.WriteRequest.Timeseries[:0] + labels := ctx.Labels[:0] + samples := ctx.Samples[:0] + for i := range timeseries { + ts := ×eries[i] + labelsLen := len(labels) + for i := range ts.Labels { + label := &ts.Labels[i] + labels = append(labels, prompbmarshal.Label{ + Name: bytesutil.ToUnsafeString(label.Name), + Value: bytesutil.ToUnsafeString(label.Value), + }) + } + samplesLen := len(samples) + for i := range ts.Samples { + sample := &ts.Samples[i] + samples = append(samples, prompbmarshal.Sample{ + Value: sample.Value, + Timestamp: sample.Timestamp, + }) + } + tssDst = append(tssDst, prompbmarshal.TimeSeries{ + Labels: labels[labelsLen:], + Samples: samples[samplesLen:], + }) + rowsTotal += len(ts.Samples) + } + ctx.WriteRequest.Timeseries = tssDst + ctx.Labels = labels + ctx.Samples = samples + remotewrite.Push(&ctx.WriteRequest) + rowsInserted.Add(rowsTotal) + rowsPerInsert.Update(float64(rowsTotal)) + return nil +} diff --git a/app/vmagent/remotewrite/client.go b/app/vmagent/remotewrite/client.go new file mode 100644 index 000000000..b372eab58 --- /dev/null +++ b/app/vmagent/remotewrite/client.go @@ -0,0 +1,267 @@ +package remotewrite + +import ( + "crypto/tls" + "crypto/x509" + "encoding/base64" + "flag" + "fmt" + "io/ioutil" + "strings" + "sync" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue" + "github.com/VictoriaMetrics/metrics" + "github.com/valyala/fasthttp" +) + +var ( + sendTimeout = flag.Duration("remoteWrite.sendTimeout", time.Minute, "Timeout for sending a single block of data to -remoteWrite.url") + + tlsInsecureSkipVerify = flag.Bool("remoteWrite.tlsInsecureSkipVerify", false, "Whether to skip tls verification when connecting to -remoteWrite.url") + tlsCertFile = flag.String("remoteWrite.tlsCertFile", "", "Optional path to client-side TLS certificate file to use when connecting to -remoteWrite.url") + tlsKeyFile = flag.String("remoteWrite.tlsKeyFile", "", "Optional path to client-side TLS certificate key to use when connecting to -remoteWrite.url") + tlsCAFile = flag.String("remoteWrite.tlsCAFile", "", "Optional path to TLS CA file to use for verifying connections to -remoteWrite.url. "+ + "By default system CA is used") + + basicAuthUsername = flag.String("remoteWrite.basicAuth.username", "", "Optional basic auth username to use for -remoteWrite.url") + basicAuthPassword = flag.String("remoteWrite.basicAuth.password", "", "Optional basic auth password to use for -remoteWrite.url") + bearerToken = flag.String("remoteWrite.bearerToken", "", "Optional bearer auth token to use for -remoteWrite.url") +) + +type client struct { + urlLabelValue string + remoteWriteURL string + host string + requestURI string + authHeader string + fq *persistentqueue.FastQueue + hc *fasthttp.HostClient + + requestDuration *metrics.Histogram + requestsOKCount *metrics.Counter + errorsCount *metrics.Counter + retriesCount *metrics.Counter + + wg sync.WaitGroup + stopCh chan struct{} +} + +func newClient(remoteWriteURL, urlLabelValue string, fq *persistentqueue.FastQueue) *client { + authHeader := "" + if len(*basicAuthUsername) > 0 || len(*basicAuthPassword) > 0 { + // See https://en.wikipedia.org/wiki/Basic_access_authentication + token := *basicAuthUsername + ":" + *basicAuthPassword + token64 := base64.StdEncoding.EncodeToString([]byte(token)) + authHeader = "Basic " + token64 + } + if len(*bearerToken) > 0 { + if authHeader != "" { + logger.Panicf("FATAL: `-remoteWrite.bearerToken`=%q cannot be set when `-remoteWrite.basicAuth.*` flags are set", *bearerToken) + } + authHeader = "Bearer " + *bearerToken + } + + readTimeout := *sendTimeout + if readTimeout <= 0 { + readTimeout = time.Minute + } + var u fasthttp.URI + u.Update(remoteWriteURL) + scheme := string(u.Scheme()) + switch scheme { + case "http", "https": + default: + logger.Panicf("FATAL: unsupported scheme in -remoteWrite.url=%q: %q. It must be http or https", remoteWriteURL, scheme) + } + host := string(u.Host()) + if len(host) == 0 { + logger.Panicf("FATAL: invalid -remoteWrite.url=%q: host cannot be empty. Make sure the url looks like `http://host:port/path`", remoteWriteURL) + } + requestURI := string(u.RequestURI()) + isTLS := scheme == "https" + var tlsCfg *tls.Config + if isTLS { + var err error + tlsCfg, err = getTLSConfig() + if err != nil { + logger.Panicf("FATAL: cannot initialize TLS config: %s", err) + } + } + if !strings.Contains(host, ":") { + if isTLS { + host += ":443" + } else { + host += ":80" + } + } + maxConns := 2 * *queues + hc := &fasthttp.HostClient{ + Addr: host, + Name: "vmagent", + Dial: statDial, + DialDualStack: netutil.TCP6Enabled(), + IsTLS: isTLS, + TLSConfig: tlsCfg, + MaxConns: maxConns, + MaxIdleConnDuration: 10 * readTimeout, + ReadTimeout: readTimeout, + WriteTimeout: 10 * time.Second, + MaxResponseBodySize: 1024 * 1024, + } + c := &client{ + urlLabelValue: urlLabelValue, + remoteWriteURL: remoteWriteURL, + host: host, + requestURI: requestURI, + authHeader: authHeader, + fq: fq, + hc: hc, + stopCh: make(chan struct{}), + } + c.requestDuration = metrics.NewHistogram(fmt.Sprintf(`vmagent_remotewrite_duration_seconds{url=%q}`, c.urlLabelValue)) + c.requestsOKCount = metrics.NewCounter(fmt.Sprintf(`vmagent_remotewrite_requests_total{url=%q, status_code="2XX"}`, c.urlLabelValue)) + c.errorsCount = metrics.NewCounter(fmt.Sprintf(`vmagent_remotewrite_errors_total{url=%q}`, c.urlLabelValue)) + c.retriesCount = metrics.NewCounter(fmt.Sprintf(`vmagent_remotewrite_retries_count_total{url=%q}`, c.urlLabelValue)) + for i := 0; i < *queues; i++ { + c.wg.Add(1) + go func() { + defer c.wg.Done() + c.runWorker() + }() + } + logger.Infof("initialized client for -remoteWrite.url=%q", c.remoteWriteURL) + return c +} + +func (c *client) MustStop() { + close(c.stopCh) + c.wg.Wait() + logger.Infof("stopped client for -remoteWrite.url=%q", c.remoteWriteURL) +} + +func getTLSConfig() (*tls.Config, error) { + var tlsRootCA *x509.CertPool + var tlsCertificate *tls.Certificate + if *tlsCertFile != "" || *tlsKeyFile != "" { + cert, err := tls.LoadX509KeyPair(*tlsCertFile, *tlsKeyFile) + if err != nil { + return nil, fmt.Errorf("cannot load TLS certificate for -remoteWrite.tlsCertFile=%q and -remoteWrite.tlsKeyFile=%q: %s", *tlsCertFile, *tlsKeyFile, err) + } + tlsCertificate = &cert + } + if *tlsCAFile != "" { + data, err := ioutil.ReadFile(*tlsCAFile) + if err != nil { + return nil, fmt.Errorf("cannot read -remoteWrite.tlsCAFile=%q: %s", *tlsCAFile, err) + } + tlsRootCA = x509.NewCertPool() + if !tlsRootCA.AppendCertsFromPEM(data) { + return nil, fmt.Errorf("cannot parse data -remoteWrite.tlsCAFile=%q", *tlsCAFile) + } + } + tlsCfg := &tls.Config{ + RootCAs: tlsRootCA, + ClientSessionCache: tls.NewLRUClientSessionCache(0), + } + if tlsCertificate != nil { + tlsCfg.Certificates = []tls.Certificate{*tlsCertificate} + } + tlsCfg.InsecureSkipVerify = *tlsInsecureSkipVerify + return tlsCfg, nil +} + +func (c *client) runWorker() { + var ok bool + var block []byte + ch := make(chan struct{}) + for { + block, ok = c.fq.MustReadBlock(block[:0]) + if !ok { + return + } + go func() { + c.sendBlock(block) + ch <- struct{}{} + }() + select { + case <-ch: + // The block has been sent successfully + continue + case <-c.stopCh: + // c must be stopped. Wait for a while in the hope the block will be sent. + graceDuration := 5 * time.Second + select { + case <-ch: + // The block has been sent successfully. + case <-time.After(graceDuration): + logger.Errorf("couldn't sent block with size %d bytes to %q in %.3f seconds during shutdown; dropping it", + len(block), c.remoteWriteURL, graceDuration.Seconds()) + } + return + } + } +} + +func (c *client) sendBlock(block []byte) { + req := fasthttp.AcquireRequest() + req.SetRequestURI(c.requestURI) + req.SetHost(c.host) + req.Header.SetMethod("POST") + req.Header.Add("Content-Type", "application/x-protobuf") + req.Header.Add("Content-Encoding", "snappy") + if c.authHeader != "" { + req.Header.Set("Authorization", c.authHeader) + } + req.SetBody(block) + + retryDuration := time.Second + resp := fasthttp.AcquireResponse() + +again: + select { + case <-c.stopCh: + fasthttp.ReleaseRequest(req) + fasthttp.ReleaseResponse(resp) + return + default: + } + + startTime := time.Now() + // There is no need in calling DoTimeout, since the timeout is set in c.hc.ReadTimeout. + err := c.hc.Do(req, resp) + c.requestDuration.UpdateDuration(startTime) + if err != nil { + c.errorsCount.Inc() + retryDuration *= 2 + if retryDuration > time.Minute { + retryDuration = time.Minute + } + logger.Errorf("couldn't send a block with size %d bytes to %q: %s; re-sending the block in %.3f seconds", + len(block), c.remoteWriteURL, err, retryDuration.Seconds()) + time.Sleep(retryDuration) + c.retriesCount.Inc() + goto again + } + statusCode := resp.StatusCode() + if statusCode/100 != 2 { + metrics.GetOrCreateCounter(fmt.Sprintf(`vmagent_remotewrite_requests_total{url=%q, status_code="%d"}`, c.urlLabelValue, statusCode)).Inc() + retryDuration *= 2 + if retryDuration > time.Minute { + retryDuration = time.Minute + } + logger.Errorf("unexpected status code received after sending a block with size %d bytes to %q: %d; response body=%q; re-sending the block in %.3f seconds", + len(block), c.remoteWriteURL, statusCode, resp.Body(), retryDuration.Seconds()) + time.Sleep(retryDuration) + c.retriesCount.Inc() + goto again + } + c.requestsOKCount.Inc() + + // The block has been successfully sent to the remote storage. + fasthttp.ReleaseResponse(resp) + fasthttp.ReleaseRequest(req) +} diff --git a/app/vmagent/remotewrite/pendingseries.go b/app/vmagent/remotewrite/pendingseries.go new file mode 100644 index 000000000..b583ebffa --- /dev/null +++ b/app/vmagent/remotewrite/pendingseries.go @@ -0,0 +1,191 @@ +package remotewrite + +import ( + "flag" + "sync" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/VictoriaMetrics/metrics" + "github.com/golang/snappy" +) + +var flushInterval = flag.Duration("remoteWrite.flushInterval", time.Second, "Interval for flushing the data to remote storage. "+ + "Higher value reduces network bandwidth usage at the cost of delayed push of scraped data to remote storage") + +// the maximum number of rows to send per each block. +const maxRowsPerBlock = 10000 + +type pendingSeries struct { + mu sync.Mutex + wr writeRequest + + stopCh chan struct{} + periodicFlusherWG sync.WaitGroup +} + +func newPendingSeries(pushBlock func(block []byte)) *pendingSeries { + var ps pendingSeries + ps.wr.pushBlock = pushBlock + ps.stopCh = make(chan struct{}) + ps.periodicFlusherWG.Add(1) + go func() { + defer ps.periodicFlusherWG.Done() + ps.periodicFlusher() + }() + return &ps +} + +func (ps *pendingSeries) MustStop() { + close(ps.stopCh) + ps.periodicFlusherWG.Wait() +} + +func (ps *pendingSeries) Push(tss []prompbmarshal.TimeSeries) { + ps.mu.Lock() + ps.wr.push(tss) + ps.mu.Unlock() +} + +func (ps *pendingSeries) periodicFlusher() { + ticker := time.NewTicker(*flushInterval) + defer ticker.Stop() + mustStop := false + for !mustStop { + select { + case <-ps.stopCh: + mustStop = true + case <-ticker.C: + if time.Since(ps.wr.lastFlushTime) < *flushInterval/2 { + continue + } + } + ps.mu.Lock() + ps.wr.flush() + ps.mu.Unlock() + } +} + +type writeRequest struct { + wr prompbmarshal.WriteRequest + pushBlock func(block []byte) + lastFlushTime time.Time + + tss []prompbmarshal.TimeSeries + + labels []prompbmarshal.Label + samples []prompbmarshal.Sample + buf []byte +} + +func (wr *writeRequest) reset() { + wr.wr.Timeseries = nil + + for i := range wr.tss { + ts := &wr.tss[i] + ts.Labels = nil + ts.Samples = nil + } + wr.tss = wr.tss[:0] + + for i := range wr.labels { + label := &wr.labels[i] + label.Name = "" + label.Value = "" + } + wr.labels = wr.labels[:0] + + wr.samples = wr.samples[:0] + wr.buf = wr.buf[:0] +} + +func (wr *writeRequest) flush() { + wr.wr.Timeseries = wr.tss + wr.lastFlushTime = time.Now() + pushWriteRequest(&wr.wr, wr.pushBlock) + wr.reset() +} + +func (wr *writeRequest) push(src []prompbmarshal.TimeSeries) { + tssDst := wr.tss + for i := range src { + tssDst = append(tssDst, prompbmarshal.TimeSeries{}) + dst := &tssDst[len(tssDst)-1] + wr.copyTimeSeries(dst, &src[i]) + if len(wr.tss) >= maxRowsPerBlock { + wr.flush() + tssDst = wr.tss + } + } + wr.tss = tssDst +} + +func (wr *writeRequest) copyTimeSeries(dst, src *prompbmarshal.TimeSeries) { + labelsDst := wr.labels + labelsLen := len(wr.labels) + samplesDst := wr.samples + buf := wr.buf + for i := range src.Labels { + labelsDst = append(labelsDst, prompbmarshal.Label{}) + dstLabel := &labelsDst[len(labelsDst)-1] + srcLabel := &src.Labels[i] + + buf = append(buf, srcLabel.Name...) + dstLabel.Name = bytesutil.ToUnsafeString(buf[len(buf)-len(srcLabel.Name):]) + buf = append(buf, srcLabel.Value...) + dstLabel.Value = bytesutil.ToUnsafeString(buf[len(buf)-len(srcLabel.Value):]) + } + dst.Labels = labelsDst[labelsLen:] + + samplesDst = append(samplesDst, prompbmarshal.Sample{}) + dstSample := &samplesDst[len(samplesDst)-1] + if len(src.Samples) != 1 { + logger.Panicf("BUG: unexpected number of samples in time series; got %d; want 1", len(src.Samples)) + } + *dstSample = src.Samples[0] + dst.Samples = samplesDst[len(samplesDst)-1:] + + wr.samples = samplesDst + wr.labels = labelsDst + wr.buf = buf +} + +func pushWriteRequest(wr *prompbmarshal.WriteRequest, pushBlock func(block []byte)) { + if len(wr.Timeseries) == 0 { + // Nothing to push + return + } + bb := writeRequestBufPool.Get() + bb.B = prompbmarshal.MarshalWriteRequest(bb.B[:0], wr) + zb := snappyBufPool.Get() + zb.B = snappy.Encode(zb.B[:cap(zb.B)], bb.B) + writeRequestBufPool.Put(bb) + if len(zb.B) <= persistentqueue.MaxBlockSize { + pushBlock(zb.B) + blockSizeRows.Update(float64(len(wr.Timeseries))) + blockSizeBytes.Update(float64(len(zb.B))) + snappyBufPool.Put(zb) + return + } + snappyBufPool.Put(zb) + + // Too big block. Recursively split it into smaller parts. + timeseries := wr.Timeseries + n := len(timeseries) / 2 + wr.Timeseries = timeseries[:n] + pushWriteRequest(wr, pushBlock) + wr.Timeseries = timeseries[n:] + pushWriteRequest(wr, pushBlock) + wr.Timeseries = timeseries +} + +var ( + blockSizeBytes = metrics.NewHistogram(`vmagent_remotewrite_block_size_bytes`) + blockSizeRows = metrics.NewHistogram(`vmagent_remotewrite_block_size_rows`) +) + +var writeRequestBufPool bytesutil.ByteBufferPool +var snappyBufPool bytesutil.ByteBufferPool diff --git a/app/vmagent/remotewrite/relabel.go b/app/vmagent/remotewrite/relabel.go new file mode 100644 index 000000000..b6fe3d097 --- /dev/null +++ b/app/vmagent/remotewrite/relabel.go @@ -0,0 +1,108 @@ +package remotewrite + +import ( + "flag" + "strings" + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel" +) + +var ( + extraLabelsUnparsed = flagutil.NewArray("remoteWrite.label", "Optional label in the form 'name=value' to add to all the metrics before sending them to -remoteWrite.url. "+ + "Pass multiple -remoteWrite.label flags in order to add multiple flags to metrics before sending them to remote storage") + relabelConfigPath = flag.String("remoteWrite.relabelConfig", "", "Optional path to file with relabel_config entries. These entries are applied to all the metrics "+ + "before sending them to -remoteWrite.url. See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config for details") +) + +var extraLabels []prompbmarshal.Label +var prcs []promrelabel.ParsedRelabelConfig + +// initRelabel must be called after parsing command-line flags. +func initRelabel() { + // Init extraLabels + for _, s := range *extraLabelsUnparsed { + n := strings.IndexByte(s, '=') + if n < 0 { + logger.Panicf("FATAL: missing '=' in `-remoteWrite.label`. It must contain label in the form `name=value`; got %q", s) + } + extraLabels = append(extraLabels, prompbmarshal.Label{ + Name: s[:n], + Value: s[n+1:], + }) + } + + // Init prcs + if len(*relabelConfigPath) > 0 { + var err error + prcs, err = promrelabel.LoadRelabelConfigs(*relabelConfigPath) + if err != nil { + logger.Panicf("FATAL: cannot load relabel configs from -remoteWrite.relabelConfig=%q: %s", *relabelConfigPath, err) + } + } +} + +func resetRelabel() { + extraLabels = nil + prcs = nil +} + +func (rctx *relabelCtx) applyRelabeling(wr *prompbmarshal.WriteRequest) { + if len(extraLabels) == 0 && len(prcs) == 0 { + // Nothing to change. + return + } + tss := wr.Timeseries + tssDst := tss[:0] + labels := rctx.labels[:0] + for i := range tss { + ts := &tss[i] + labelsLen := len(labels) + labels = append(labels, ts.Labels...) + // extraLabels must be added before applying relabeling according to https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write + for j := range extraLabels { + extraLabel := &extraLabels[j] + tmp := promrelabel.GetLabelByName(labels[labelsLen:], extraLabel.Name) + if tmp != nil { + tmp.Value = extraLabel.Value + } else { + labels = append(labels, *extraLabel) + } + } + labels = promrelabel.ApplyRelabelConfigs(labels, labelsLen, prcs, true) + if len(labels) == labelsLen { + // Drop the current time series, since relabeling removed all the labels. + continue + } + tssDst = append(tssDst, prompbmarshal.TimeSeries{ + Labels: labels[labelsLen:], + Samples: ts.Samples, + }) + } + rctx.labels = labels + wr.Timeseries = tssDst +} + +type relabelCtx struct { + // pool for labels, which are used during the relabeling. + labels []prompbmarshal.Label +} + +func (rctx *relabelCtx) reset() { + labels := rctx.labels + for i := range labels { + label := &labels[i] + label.Name = "" + label.Value = "" + } + rctx.labels = rctx.labels[:0] +} + +var relabelCtxPool = &sync.Pool{ + New: func() interface{} { + return &relabelCtx{} + }, +} diff --git a/app/vmagent/remotewrite/remotewrite.go b/app/vmagent/remotewrite/remotewrite.go new file mode 100644 index 000000000..8bb927c71 --- /dev/null +++ b/app/vmagent/remotewrite/remotewrite.go @@ -0,0 +1,127 @@ +package remotewrite + +import ( + "flag" + "fmt" + "sync/atomic" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/flagutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/memory" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/persistentqueue" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/VictoriaMetrics/metrics" + xxhash "github.com/cespare/xxhash/v2" +) + +var ( + remoteWriteURLs = flagutil.NewArray("remoteWrite.url", "Remote storage URL to write data to. It must support Prometheus remote_write API. "+ + "It is recommended using VictoriaMetrics as remote storage. Example url: http://:8428/api/v1/write . "+ + "Pass multiple -remoteWrite.url flags in order to write data concurrently to multiple remote storage systems") + tmpDataPath = flag.String("remoteWrite.tmpDataPath", "vmagent-remotewrite-data", "Path to directory where temporary data for remote write component is stored") + queues = flag.Int("remoteWrite.queues", 1, "The number of concurrent queues to each -remoteWrite.url. Set more queues if a single queue "+ + "isn't enough for sending high volume of collected data to remote storage") + showRemoteWriteURL = flag.Bool("remoteWrite.showURL", false, "Whether to show -remoteWrite.url in the exported metrics. "+ + "It is hidden by default, since it can contain sensistive auth info") +) + +// Init initializes remotewrite. +// +// It must be called after flag.Parse(). +// +// Stop must be called for graceful shutdown. +func Init() { + if len(*remoteWriteURLs) == 0 { + logger.Panicf("FATAL: at least one `-remoteWrite.url` must be set") + } + + if !*showRemoteWriteURL { + // remoteWrite.url can contain authentication codes, so hide it at `/metrics` output. + httpserver.RegisterSecretFlag("remoteWrite.url") + } + initRelabel() + + maxInmemoryBlocks := memory.Allowed() / len(*remoteWriteURLs) / maxRowsPerBlock / 100 + if maxInmemoryBlocks > 200 { + // There is no much sense in keeping higher number of blocks in memory, + // since this means that the producer outperforms consumer and the queue + // will continue growing. It is better storing the queue to file. + maxInmemoryBlocks = 200 + } + if maxInmemoryBlocks < 2 { + maxInmemoryBlocks = 2 + } + for i, remoteWriteURL := range *remoteWriteURLs { + h := xxhash.Sum64([]byte(remoteWriteURL)) + path := fmt.Sprintf("%s/persistent-queue/%016X", *tmpDataPath, h) + fq := persistentqueue.MustOpenFastQueue(path, remoteWriteURL, maxInmemoryBlocks) + urlLabelValue := fmt.Sprintf("secret-url-%d", i+1) + if *showRemoteWriteURL { + urlLabelValue = remoteWriteURL + } + _ = metrics.NewGauge(fmt.Sprintf(`vmagent_remotewrite_pending_data_bytes{url=%q, hash="%016X"}`, urlLabelValue, h), func() float64 { + return float64(fq.GetPendingBytes()) + }) + _ = metrics.NewGauge(fmt.Sprintf(`vmagent_remotewrite_pending_inmemory_blocks{url=%q}`, urlLabelValue), func() float64 { + return float64(fq.GetInmemoryQueueLen()) + }) + c := newClient(remoteWriteURL, urlLabelValue, fq) + fqs = append(fqs, fq) + cs = append(cs, c) + } + + pss = make([]*pendingSeries, *queues) + for i := range pss { + pss[i] = newPendingSeries(pushBlockToPersistentQueues) + } +} + +// Stop stops remotewrite. +// +// It is expected that nobody calls Push during and after the call to this func. +func Stop() { + for _, ps := range pss { + ps.MustStop() + } + + // Close all the persistent queues. This should unblock clients waiting in MustReadBlock. + for _, fq := range fqs { + fq.MustClose() + } + fqs = nil + + // Stop all the clients + for _, c := range cs { + c.MustStop() + } + cs = nil + + resetRelabel() +} + +// Push sends wr to remote storage systems set via `-remoteWrite.url`. +// +// Each timeseries in wr.Timeseries must contain one sample. +func Push(wr *prompbmarshal.WriteRequest) { + rctx := relabelCtxPool.Get().(*relabelCtx) + rctx.applyRelabeling(wr) + + idx := atomic.AddUint64(&pssNextIdx, 1) % uint64(len(pss)) + pss[idx].Push(wr.Timeseries) + + rctx.reset() + relabelCtxPool.Put(rctx) +} + +func pushBlockToPersistentQueues(block []byte) { + for _, fq := range fqs { + fq.MustWriteBlock(block) + } +} + +var fqs []*persistentqueue.FastQueue +var cs []*client + +var pssNextIdx uint64 +var pss []*pendingSeries diff --git a/app/vmagent/remotewrite/statconn.go b/app/vmagent/remotewrite/statconn.go new file mode 100644 index 000000000..19ee1e9b5 --- /dev/null +++ b/app/vmagent/remotewrite/statconn.go @@ -0,0 +1,71 @@ +package remotewrite + +import ( + "net" + "sync/atomic" + + "github.com/VictoriaMetrics/metrics" + "github.com/valyala/fasthttp" +) + +func statDial(addr string) (net.Conn, error) { + conn, err := fasthttp.Dial(addr) + dialsTotal.Inc() + if err != nil { + dialErrors.Inc() + return nil, err + } + conns.Inc() + sc := &statConn{ + Conn: conn, + } + return sc, nil +} + +var ( + dialsTotal = metrics.NewCounter(`vmagent_remotewrite_dials_total`) + dialErrors = metrics.NewCounter(`vmagent_remotewrite_dial_errors_total`) + conns = metrics.NewCounter(`vmagent_remotewrite_conns`) +) + +type statConn struct { + closed uint64 + net.Conn +} + +func (sc *statConn) Read(p []byte) (int, error) { + n, err := sc.Conn.Read(p) + connReadsTotal.Inc() + if err != nil { + connReadErrors.Inc() + } + connBytesRead.Add(n) + return n, err +} + +func (sc *statConn) Write(p []byte) (int, error) { + n, err := sc.Conn.Write(p) + connWritesTotal.Inc() + if err != nil { + connWriteErrors.Inc() + } + connBytesWritten.Add(n) + return n, err +} + +func (sc *statConn) Close() error { + err := sc.Conn.Close() + if atomic.AddUint64(&sc.closed, 1) == 1 { + conns.Dec() + } + return err +} + +var ( + connReadsTotal = metrics.NewCounter(`vmagent_remotewrite_conn_reads_total`) + connWritesTotal = metrics.NewCounter(`vmagent_remotewrite_conn_writes_total`) + connReadErrors = metrics.NewCounter(`vmagent_remotewrite_conn_read_errors_total`) + connWriteErrors = metrics.NewCounter(`vmagent_remotewrite_conn_write_errors_total`) + connBytesRead = metrics.NewCounter(`vmagent_remotewrite_conn_bytes_read_total`) + connBytesWritten = metrics.NewCounter(`vmagent_remotewrite_conn_bytes_written_total`) +) diff --git a/app/vmagent/vmimport/request_handler.go b/app/vmagent/vmimport/request_handler.go new file mode 100644 index 000000000..852747ce6 --- /dev/null +++ b/app/vmagent/vmimport/request_handler.go @@ -0,0 +1,70 @@ +package vmimport + +import ( + "net/http" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/common" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vmagent/remotewrite" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/vmimport" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" + "github.com/VictoriaMetrics/metrics" +) + +var ( + rowsInserted = metrics.NewCounter(`vmagent_rows_inserted_total{type="vmimport"}`) + rowsPerInsert = metrics.NewHistogram(`vmagent_rows_per_insert{type="vmimport"}`) +) + +// InsertHandler processes `/api/v1/import` request. +// +// See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6 +func InsertHandler(req *http.Request) error { + return writeconcurrencylimiter.Do(func() error { + return parser.ParseStream(req, insertRows) + }) +} + +func insertRows(rows []parser.Row) error { + ctx := common.GetPushCtx() + defer common.PutPushCtx(ctx) + + rowsTotal := 0 + tssDst := ctx.WriteRequest.Timeseries[:0] + labels := ctx.Labels[:0] + samples := ctx.Samples[:0] + for i := range rows { + r := &rows[i] + labelsLen := len(labels) + for j := range r.Tags { + tag := &r.Tags[j] + labels = append(labels, prompbmarshal.Label{ + Name: bytesutil.ToUnsafeString(tag.Key), + Value: bytesutil.ToUnsafeString(tag.Value), + }) + } + values := r.Values + timestamps := r.Timestamps + _ = timestamps[len(values)-1] + samplesLen := len(samples) + for j, value := range values { + samples = append(samples, prompbmarshal.Sample{ + Value: value, + Timestamp: timestamps[j], + }) + } + tssDst = append(tssDst, prompbmarshal.TimeSeries{ + Labels: labels[labelsLen:], + Samples: samples[samplesLen:], + }) + rowsTotal += len(values) + } + ctx.WriteRequest.Timeseries = tssDst + ctx.Labels = labels + ctx.Samples = samples + remotewrite.Push(&ctx.WriteRequest) + rowsInserted.Add(rowsTotal) + rowsPerInsert.Update(float64(rowsTotal)) + return nil +} diff --git a/app/vminsert/common/insert_ctx_pool.go b/app/vminsert/common/insert_ctx_pool.go new file mode 100644 index 000000000..3f6c057bd --- /dev/null +++ b/app/vminsert/common/insert_ctx_pool.go @@ -0,0 +1,36 @@ +package common + +import ( + "runtime" + "sync" +) + +// GetInsertCtx returns InsertCtx from the pool. +// +// Call PutInsertCtx for returning it to the pool. +func GetInsertCtx() *InsertCtx { + select { + case ctx := <-insertCtxPoolCh: + return ctx + default: + if v := insertCtxPool.Get(); v != nil { + return v.(*InsertCtx) + } + return &InsertCtx{} + } +} + +// PutInsertCtx returns ctx to the pool. +// +// ctx cannot be used after the call. +func PutInsertCtx(ctx *InsertCtx) { + ctx.Reset(0) + select { + case insertCtxPoolCh <- ctx: + default: + insertCtxPool.Put(ctx) + } +} + +var insertCtxPool sync.Pool +var insertCtxPoolCh = make(chan *InsertCtx, runtime.GOMAXPROCS(-1)) diff --git a/app/vminsert/graphite/request_handler.go b/app/vminsert/graphite/request_handler.go index adaac204d..fe1690e3f 100644 --- a/app/vminsert/graphite/request_handler.go +++ b/app/vminsert/graphite/request_handler.go @@ -1,162 +1,44 @@ package graphite import ( - "fmt" "io" - "net" - "runtime" - "sync" - "time" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common" - "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite" + parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/graphite" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" "github.com/VictoriaMetrics/metrics" ) var ( rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="graphite"}`) - rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="graphite"}`) + rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="graphite"}`) ) -// insertHandler processes remote write for graphite plaintext protocol. +// InsertHandler processes remote write for graphite plaintext protocol. // // See https://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol -func insertHandler(r io.Reader) error { - return concurrencylimiter.Do(func() error { - return insertHandlerInternal(r) +func InsertHandler(r io.Reader) error { + return writeconcurrencylimiter.Do(func() error { + return parser.ParseStream(r, insertRows) }) } -func insertHandlerInternal(r io.Reader) error { - ctx := getPushCtx() - defer putPushCtx(ctx) - for ctx.Read(r) { - if err := ctx.InsertRows(); err != nil { - return err - } - } - return ctx.Error() -} +func insertRows(rows []parser.Row) error { + ctx := common.GetInsertCtx() + defer common.PutInsertCtx(ctx) -func (ctx *pushCtx) InsertRows() error { - rows := ctx.Rows.Rows - ic := &ctx.Common - ic.Reset(len(rows)) + ctx.Reset(len(rows)) for i := range rows { r := &rows[i] - ic.Labels = ic.Labels[:0] - ic.AddLabel("", r.Metric) + ctx.Labels = ctx.Labels[:0] + ctx.AddLabel("", r.Metric) for j := range r.Tags { tag := &r.Tags[j] - ic.AddLabel(tag.Key, tag.Value) + ctx.AddLabel(tag.Key, tag.Value) } - ic.WriteDataPoint(nil, ic.Labels, r.Timestamp, r.Value) + ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value) } rowsInserted.Add(len(rows)) rowsPerInsert.Update(float64(len(rows))) - return ic.FlushBufs() + return ctx.FlushBufs() } - -const flushTimeout = 3 * time.Second - -func (ctx *pushCtx) Read(r io.Reader) bool { - readCalls.Inc() - if ctx.err != nil { - return false - } - if c, ok := r.(net.Conn); ok { - if err := c.SetReadDeadline(time.Now().Add(flushTimeout)); err != nil { - readErrors.Inc() - ctx.err = fmt.Errorf("cannot set read deadline: %s", err) - return false - } - } - ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlock(r, ctx.reqBuf, ctx.tailBuf) - if ctx.err != nil { - if ne, ok := ctx.err.(net.Error); ok && ne.Timeout() { - // Flush the read data on timeout and try reading again. - ctx.err = nil - } else { - if ctx.err != io.EOF { - readErrors.Inc() - ctx.err = fmt.Errorf("cannot read graphite plaintext protocol data: %s", ctx.err) - } - return false - } - } - ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf)) - - // Fill missing timestamps with the current timestamp rounded to seconds. - currentTimestamp := time.Now().Unix() - rows := ctx.Rows.Rows - for i := range rows { - r := &rows[i] - if r.Timestamp == 0 { - r.Timestamp = currentTimestamp - } - } - - // Convert timestamps from seconds to milliseconds. - for i := range rows { - rows[i].Timestamp *= 1e3 - } - - return true -} - -type pushCtx struct { - Rows graphite.Rows - Common common.InsertCtx - - reqBuf []byte - tailBuf []byte - - err error -} - -func (ctx *pushCtx) Error() error { - if ctx.err == io.EOF { - return nil - } - return ctx.err -} - -func (ctx *pushCtx) reset() { - ctx.Rows.Reset() - ctx.Common.Reset(0) - ctx.reqBuf = ctx.reqBuf[:0] - ctx.tailBuf = ctx.tailBuf[:0] - - ctx.err = nil -} - -var ( - readCalls = metrics.NewCounter(`vm_read_calls_total{name="graphite"}`) - readErrors = metrics.NewCounter(`vm_read_errors_total{name="graphite"}`) -) - -func getPushCtx() *pushCtx { - select { - case ctx := <-pushCtxPoolCh: - return ctx - default: - if v := pushCtxPool.Get(); v != nil { - return v.(*pushCtx) - } - return &pushCtx{} - } -} - -func putPushCtx(ctx *pushCtx) { - ctx.reset() - select { - case pushCtxPoolCh <- ctx: - default: - pushCtxPool.Put(ctx) - } -} - -var pushCtxPool sync.Pool -var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1)) diff --git a/app/vminsert/influx/request_handler.go b/app/vminsert/influx/request_handler.go index ee223a8a6..43d03457e 100644 --- a/app/vminsert/influx/request_handler.go +++ b/app/vminsert/influx/request_handler.go @@ -2,18 +2,15 @@ package influx import ( "flag" - "fmt" - "io" "net/http" "runtime" "sync" - "time" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common" - "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/influx" + parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/influx" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" "github.com/VictoriaMetrics/metrics" ) @@ -24,63 +21,22 @@ var ( var ( rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="influx"}`) - rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="influx"}`) + rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="influx"}`) ) // InsertHandler processes remote write for influx line protocol. // // See https://github.com/influxdata/influxdb/blob/4cbdc197b8117fee648d62e2e5be75c6575352f0/tsdb/README.md func InsertHandler(req *http.Request) error { - return concurrencylimiter.Do(func() error { - return insertHandlerInternal(req) + return writeconcurrencylimiter.Do(func() error { + return parser.ParseStream(req, insertRows) }) } -func insertHandlerInternal(req *http.Request) error { - readCalls.Inc() - - r := req.Body - if req.Header.Get("Content-Encoding") == "gzip" { - zr, err := common.GetGzipReader(r) - if err != nil { - return fmt.Errorf("cannot read gzipped influx line protocol data: %s", err) - } - defer common.PutGzipReader(zr) - r = zr - } - - q := req.URL.Query() - tsMultiplier := int64(1e6) - switch q.Get("precision") { - case "ns": - tsMultiplier = 1e6 - case "u": - tsMultiplier = 1e3 - case "ms": - tsMultiplier = 1 - case "s": - tsMultiplier = -1e3 - case "m": - tsMultiplier = -1e3 * 60 - case "h": - tsMultiplier = -1e3 * 3600 - } - - // Read db tag from https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint - db := q.Get("db") - +func insertRows(db string, rows []parser.Row) error { ctx := getPushCtx() defer putPushCtx(ctx) - for ctx.Read(r, tsMultiplier) { - if err := ctx.InsertRows(db); err != nil { - return err - } - } - return ctx.Error() -} -func (ctx *pushCtx) InsertRows(db string) error { - rows := ctx.Rows.Rows rowsLen := 0 for i := range rows { rowsLen += len(rows[i].Fields) @@ -126,80 +82,16 @@ func (ctx *pushCtx) InsertRows(db string) error { return ic.FlushBufs() } -func (ctx *pushCtx) Read(r io.Reader, tsMultiplier int64) bool { - if ctx.err != nil { - return false - } - ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlock(r, ctx.reqBuf, ctx.tailBuf) - if ctx.err != nil { - if ctx.err != io.EOF { - readErrors.Inc() - ctx.err = fmt.Errorf("cannot read influx line protocol data: %s", ctx.err) - } - return false - } - ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf)) - - // Adjust timestamps according to tsMultiplier - currentTs := time.Now().UnixNano() / 1e6 - if tsMultiplier >= 1 { - for i := range ctx.Rows.Rows { - row := &ctx.Rows.Rows[i] - if row.Timestamp == 0 { - row.Timestamp = currentTs - } else { - row.Timestamp /= tsMultiplier - } - } - } else if tsMultiplier < 0 { - tsMultiplier = -tsMultiplier - currentTs -= currentTs % tsMultiplier - for i := range ctx.Rows.Rows { - row := &ctx.Rows.Rows[i] - if row.Timestamp == 0 { - row.Timestamp = currentTs - } else { - row.Timestamp *= tsMultiplier - } - } - } - return true -} - -var ( - readCalls = metrics.NewCounter(`vm_read_calls_total{name="influx"}`) - readErrors = metrics.NewCounter(`vm_read_errors_total{name="influx"}`) -) - type pushCtx struct { - Rows influx.Rows - Common common.InsertCtx - - reqBuf []byte - tailBuf []byte + Common common.InsertCtx metricNameBuf []byte metricGroupBuf []byte - - err error -} - -func (ctx *pushCtx) Error() error { - if ctx.err == io.EOF { - return nil - } - return ctx.err } func (ctx *pushCtx) reset() { - ctx.Rows.Reset() ctx.Common.Reset(0) - - ctx.reqBuf = ctx.reqBuf[:0] - ctx.tailBuf = ctx.tailBuf[:0] ctx.metricNameBuf = ctx.metricNameBuf[:0] ctx.metricGroupBuf = ctx.metricGroupBuf[:0] - - ctx.err = nil } func getPushCtx() *pushCtx { diff --git a/app/vminsert/main.go b/app/vminsert/main.go index 9e66f5db3..79dd90d4c 100644 --- a/app/vminsert/main.go +++ b/app/vminsert/main.go @@ -6,15 +6,20 @@ import ( "net/http" "strings" - "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/graphite" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/influx" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdb" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdbhttp" - "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/prometheus" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/prompush" + "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/promremotewrite" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/vmimport" "github.com/VictoriaMetrics/VictoriaMetrics/lib/httpserver" + graphiteserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/graphite" + opentsdbserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdb" + opentsdbhttpserver "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdbhttp" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/promscrape" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" "github.com/VictoriaMetrics/metrics" ) @@ -28,29 +33,31 @@ var ( ) var ( - graphiteServer *graphite.Server - opentsdbServer *opentsdb.Server - opentsdbhttpServer *opentsdbhttp.Server + graphiteServer *graphiteserver.Server + opentsdbServer *opentsdbserver.Server + opentsdbhttpServer *opentsdbhttpserver.Server ) // Init initializes vminsert. func Init() { storage.SetMaxLabelsPerTimeseries(*maxLabelsPerTimeseries) - concurrencylimiter.Init() + writeconcurrencylimiter.Init() if len(*graphiteListenAddr) > 0 { - graphiteServer = graphite.MustStart(*graphiteListenAddr) + graphiteServer = graphiteserver.MustStart(*graphiteListenAddr, graphite.InsertHandler) } if len(*opentsdbListenAddr) > 0 { - opentsdbServer = opentsdb.MustStart(*opentsdbListenAddr) + opentsdbServer = opentsdbserver.MustStart(*opentsdbListenAddr, opentsdb.InsertHandler, opentsdbhttp.InsertHandler) } if len(*opentsdbHTTPListenAddr) > 0 { - opentsdbhttpServer = opentsdbhttp.MustStart(*opentsdbHTTPListenAddr) + opentsdbhttpServer = opentsdbhttpserver.MustStart(*opentsdbHTTPListenAddr, opentsdbhttp.InsertHandler) } + promscrape.Init(prompush.Push) } // Stop stops vminsert. func Stop() { + promscrape.Stop() if len(*graphiteListenAddr) > 0 { graphiteServer.MustStop() } @@ -68,7 +75,7 @@ func RequestHandler(w http.ResponseWriter, r *http.Request) bool { switch path { case "/api/v1/write": prometheusWriteRequests.Inc() - if err := prometheus.InsertHandler(r); err != nil { + if err := promremotewrite.InsertHandler(r); err != nil { prometheusWriteErrors.Inc() httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err) return true diff --git a/app/vminsert/opentsdb/request_handler.go b/app/vminsert/opentsdb/request_handler.go index 2a0c5a7cf..d18f1d03f 100644 --- a/app/vminsert/opentsdb/request_handler.go +++ b/app/vminsert/opentsdb/request_handler.go @@ -1,161 +1,44 @@ package opentsdb import ( - "fmt" "io" - "net" - "runtime" - "sync" - "time" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common" - "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdb" + parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdb" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" "github.com/VictoriaMetrics/metrics" ) var ( rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="opentsdb"}`) - rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="opentsdb"}`) + rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="opentsdb"}`) ) -// insertHandler processes remote write for OpenTSDB put protocol. +// InsertHandler processes remote write for OpenTSDB put protocol. // // See http://opentsdb.net/docs/build/html/api_telnet/put.html -func insertHandler(r io.Reader) error { - return concurrencylimiter.Do(func() error { - return insertHandlerInternal(r) +func InsertHandler(r io.Reader) error { + return writeconcurrencylimiter.Do(func() error { + return parser.ParseStream(r, insertRows) }) } -func insertHandlerInternal(r io.Reader) error { - ctx := getPushCtx() - defer putPushCtx(ctx) - for ctx.Read(r) { - if err := ctx.InsertRows(); err != nil { - return err - } - } - return ctx.Error() -} +func insertRows(rows []parser.Row) error { + ctx := common.GetInsertCtx() + defer common.PutInsertCtx(ctx) -func (ctx *pushCtx) InsertRows() error { - rows := ctx.Rows.Rows - ic := &ctx.Common - ic.Reset(len(rows)) + ctx.Reset(len(rows)) for i := range rows { r := &rows[i] - ic.Labels = ic.Labels[:0] - ic.AddLabel("", r.Metric) + ctx.Labels = ctx.Labels[:0] + ctx.AddLabel("", r.Metric) for j := range r.Tags { tag := &r.Tags[j] - ic.AddLabel(tag.Key, tag.Value) + ctx.AddLabel(tag.Key, tag.Value) } - ic.WriteDataPoint(nil, ic.Labels, r.Timestamp, r.Value) + ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value) } rowsInserted.Add(len(rows)) rowsPerInsert.Update(float64(len(rows))) - return ic.FlushBufs() + return ctx.FlushBufs() } - -const flushTimeout = 3 * time.Second - -func (ctx *pushCtx) Read(r io.Reader) bool { - readCalls.Inc() - if ctx.err != nil { - return false - } - if c, ok := r.(net.Conn); ok { - if err := c.SetReadDeadline(time.Now().Add(flushTimeout)); err != nil { - readErrors.Inc() - ctx.err = fmt.Errorf("cannot set read deadline: %s", err) - return false - } - } - ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlock(r, ctx.reqBuf, ctx.tailBuf) - if ctx.err != nil { - if ne, ok := ctx.err.(net.Error); ok && ne.Timeout() { - // Flush the read data on timeout and try reading again. - ctx.err = nil - } else { - if ctx.err != io.EOF { - readErrors.Inc() - ctx.err = fmt.Errorf("cannot read OpenTSDB put protocol data: %s", ctx.err) - } - return false - } - } - ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf)) - - // Fill in missing timestamps - currentTimestamp := time.Now().Unix() - rows := ctx.Rows.Rows - for i := range rows { - r := &rows[i] - if r.Timestamp == 0 { - r.Timestamp = currentTimestamp - } - } - - // Convert timestamps from seconds to milliseconds - for i := range rows { - rows[i].Timestamp *= 1e3 - } - return true -} - -type pushCtx struct { - Rows opentsdb.Rows - Common common.InsertCtx - - reqBuf []byte - tailBuf []byte - - err error -} - -func (ctx *pushCtx) Error() error { - if ctx.err == io.EOF { - return nil - } - return ctx.err -} - -func (ctx *pushCtx) reset() { - ctx.Rows.Reset() - ctx.Common.Reset(0) - ctx.reqBuf = ctx.reqBuf[:0] - ctx.tailBuf = ctx.tailBuf[:0] - - ctx.err = nil -} - -var ( - readCalls = metrics.NewCounter(`vm_read_calls_total{name="opentsdb"}`) - readErrors = metrics.NewCounter(`vm_read_errors_total{name="opentsdb"}`) -) - -func getPushCtx() *pushCtx { - select { - case ctx := <-pushCtxPoolCh: - return ctx - default: - if v := pushCtxPool.Get(); v != nil { - return v.(*pushCtx) - } - return &pushCtx{} - } -} - -func putPushCtx(ctx *pushCtx) { - ctx.reset() - select { - case pushCtxPoolCh <- ctx: - default: - pushCtxPool.Put(ctx) - } -} - -var pushCtxPool sync.Pool -var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1)) diff --git a/app/vminsert/opentsdbhttp/request_handler.go b/app/vminsert/opentsdbhttp/request_handler.go index b8385a993..0d7d5bd75 100644 --- a/app/vminsert/opentsdbhttp/request_handler.go +++ b/app/vminsert/opentsdbhttp/request_handler.go @@ -1,151 +1,50 @@ package opentsdbhttp import ( - "flag" "fmt" - "io" "net/http" - "runtime" - "sync" - "time" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common" - "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdbhttp" + parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/opentsdbhttp" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" "github.com/VictoriaMetrics/metrics" ) -var maxInsertRequestSize = flag.Int("opentsdbhttp.maxInsertRequestSize", 32*1024*1024, "The maximum size of OpenTSDB HTTP put request") - var ( - rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="opentsdb-http"}`) - rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="opentsdb-http"}`) - - readCalls = metrics.NewCounter(`vm_read_calls_total{name="opentsdb-http"}`) - readErrors = metrics.NewCounter(`vm_read_errors_total{name="opentsdb-http"}`) - unmarshalErrors = metrics.NewCounter(`vm_unmarshal_errors_total{name="opentsdb-http"}`) + rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="opentsdbhttp"}`) + rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="opentsdbhttp"}`) ) -// insertHandler processes HTTP OpenTSDB put requests. +// InsertHandler processes HTTP OpenTSDB put requests. // See http://opentsdb.net/docs/build/html/api_http/put.html -func insertHandler(req *http.Request) error { - return concurrencylimiter.Do(func() error { - return insertHandlerInternal(req) - }) +func InsertHandler(req *http.Request) error { + path := req.URL.Path + switch path { + case "/api/put": + return writeconcurrencylimiter.Do(func() error { + return parser.ParseStream(req, insertRows) + }) + default: + return fmt.Errorf("unexpected path requested on HTTP OpenTSDB server: %q", path) + } } -func insertHandlerInternal(req *http.Request) error { - readCalls.Inc() +func insertRows(rows []parser.Row) error { + ctx := common.GetInsertCtx() + defer common.PutInsertCtx(ctx) - r := req.Body - if req.Header.Get("Content-Encoding") == "gzip" { - zr, err := common.GetGzipReader(r) - if err != nil { - readErrors.Inc() - return fmt.Errorf("cannot read gzipped http protocol data: %s", err) - } - defer common.PutGzipReader(zr) - r = zr - } - - ctx := getPushCtx() - defer putPushCtx(ctx) - - // Read the request in ctx.reqBuf - lr := io.LimitReader(r, int64(*maxInsertRequestSize)+1) - reqLen, err := ctx.reqBuf.ReadFrom(lr) - if err != nil { - readErrors.Inc() - return fmt.Errorf("cannot read HTTP OpenTSDB request: %s", err) - } - if reqLen > int64(*maxInsertRequestSize) { - readErrors.Inc() - return fmt.Errorf("too big HTTP OpenTSDB request; mustn't exceed `-opentsdbhttp.maxInsertRequestSize=%d` bytes", *maxInsertRequestSize) - } - - // Unmarshal the request to ctx.Rows - p := opentsdbhttp.GetParser() - defer opentsdbhttp.PutParser(p) - v, err := p.ParseBytes(ctx.reqBuf.B) - if err != nil { - unmarshalErrors.Inc() - return fmt.Errorf("cannot parse HTTP OpenTSDB json: %s", err) - } - ctx.Rows.Unmarshal(v) - - // Fill in missing timestamps - currentTimestamp := time.Now().Unix() - rows := ctx.Rows.Rows + ctx.Reset(len(rows)) for i := range rows { r := &rows[i] - if r.Timestamp == 0 { - r.Timestamp = currentTimestamp - } - } - - // Convert timestamps in seconds to milliseconds if needed. - // See http://opentsdb.net/docs/javadoc/net/opentsdb/core/Const.html#SECOND_MASK - for i := range rows { - r := &rows[i] - if r.Timestamp&secondMask == 0 { - r.Timestamp *= 1e3 - } - } - - // Insert ctx.Rows to db. - ic := &ctx.Common - ic.Reset(len(rows)) - for i := range rows { - r := &rows[i] - ic.Labels = ic.Labels[:0] - ic.AddLabel("", r.Metric) + ctx.Labels = ctx.Labels[:0] + ctx.AddLabel("", r.Metric) for j := range r.Tags { tag := &r.Tags[j] - ic.AddLabel(tag.Key, tag.Value) + ctx.AddLabel(tag.Key, tag.Value) } - ic.WriteDataPoint(nil, ic.Labels, r.Timestamp, r.Value) + ctx.WriteDataPoint(nil, ctx.Labels, r.Timestamp, r.Value) } rowsInserted.Add(len(rows)) rowsPerInsert.Update(float64(len(rows))) - return ic.FlushBufs() + return ctx.FlushBufs() } - -const secondMask int64 = 0x7FFFFFFF00000000 - -type pushCtx struct { - Rows opentsdbhttp.Rows - Common common.InsertCtx - - reqBuf bytesutil.ByteBuffer -} - -func (ctx *pushCtx) reset() { - ctx.Rows.Reset() - ctx.Common.Reset(0) - ctx.reqBuf.Reset() -} - -func getPushCtx() *pushCtx { - select { - case ctx := <-pushCtxPoolCh: - return ctx - default: - if v := pushCtxPool.Get(); v != nil { - return v.(*pushCtx) - } - return &pushCtx{} - } -} - -func putPushCtx(ctx *pushCtx) { - ctx.reset() - select { - case pushCtxPoolCh <- ctx: - default: - pushCtxPool.Put(ctx) - } -} - -var pushCtxPool sync.Pool -var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1)) diff --git a/app/vminsert/prompush/push.go b/app/vminsert/prompush/push.go new file mode 100644 index 000000000..6235a7c9a --- /dev/null +++ b/app/vminsert/prompush/push.go @@ -0,0 +1,97 @@ +package prompush + +import ( + "runtime" + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/VictoriaMetrics/metrics" +) + +var ( + rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="promscrape"}`) + rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="promscrape"}`) +) + +// Push pushes wr to to storage. +func Push(wr *prompbmarshal.WriteRequest) { + ctx := getPushCtx() + defer putPushCtx(ctx) + + timeseries := wr.Timeseries + rowsLen := 0 + for i := range timeseries { + rowsLen += len(timeseries[i].Samples) + } + ic := &ctx.Common + ic.Reset(rowsLen) + rowsTotal := 0 + labels := ctx.labels[:0] + for i := range timeseries { + ts := ×eries[i] + labels = labels[:0] + for j := range ts.Labels { + label := &ts.Labels[j] + labels = append(labels, prompb.Label{ + Name: bytesutil.ToUnsafeBytes(label.Name), + Value: bytesutil.ToUnsafeBytes(label.Value), + }) + } + var metricNameRaw []byte + for i := range ts.Samples { + r := &ts.Samples[i] + metricNameRaw = ic.WriteDataPointExt(metricNameRaw, labels, r.Timestamp, r.Value) + } + rowsTotal += len(ts.Samples) + } + ctx.labels = labels + rowsInserted.Add(rowsTotal) + rowsPerInsert.Update(float64(rowsTotal)) + if err := ic.FlushBufs(); err != nil { + logger.Errorf("cannot flush promscrape data to storage: %s", err) + } +} + +type pushCtx struct { + Common common.InsertCtx + labels []prompb.Label +} + +func (ctx *pushCtx) reset() { + ctx.Common.Reset(0) + + for i := range ctx.labels { + label := &ctx.labels[i] + label.Name = nil + label.Value = nil + } + ctx.labels = ctx.labels[:0] +} + +func getPushCtx() *pushCtx { + select { + case ctx := <-pushCtxPoolCh: + return ctx + default: + if v := pushCtxPool.Get(); v != nil { + return v.(*pushCtx) + } + return &pushCtx{} + } +} + +func putPushCtx(ctx *pushCtx) { + ctx.reset() + select { + case pushCtxPoolCh <- ctx: + default: + pushCtxPool.Put(ctx) + } +} + +var pushCtxPool sync.Pool +var pushCtxPoolCh = make(chan *pushCtx, runtime.GOMAXPROCS(-1)) diff --git a/app/vminsert/promremotewrite/request_handler.go b/app/vminsert/promremotewrite/request_handler.go new file mode 100644 index 000000000..7fab4bcfb --- /dev/null +++ b/app/vminsert/promremotewrite/request_handler.go @@ -0,0 +1,47 @@ +package promremotewrite + +import ( + "net/http" + + "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" + parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/promremotewrite" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" + "github.com/VictoriaMetrics/metrics" +) + +var ( + rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="promremotewrite"}`) + rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="promremotewrite"}`) +) + +// InsertHandler processes remote write for prometheus. +func InsertHandler(req *http.Request) error { + return writeconcurrencylimiter.Do(func() error { + return parser.ParseStream(req, insertRows) + }) +} + +func insertRows(timeseries []prompb.TimeSeries) error { + ctx := common.GetInsertCtx() + defer common.PutInsertCtx(ctx) + + rowsLen := 0 + for i := range timeseries { + rowsLen += len(timeseries[i].Samples) + } + ctx.Reset(rowsLen) + rowsTotal := 0 + for i := range timeseries { + ts := ×eries[i] + var metricNameRaw []byte + for i := range ts.Samples { + r := &ts.Samples[i] + metricNameRaw = ctx.WriteDataPointExt(metricNameRaw, ts.Labels, r.Timestamp, r.Value) + } + rowsTotal += len(ts.Samples) + } + rowsInserted.Add(rowsTotal) + rowsPerInsert.Update(float64(rowsTotal)) + return ctx.FlushBufs() +} diff --git a/app/vminsert/vmimport/request_handler.go b/app/vminsert/vmimport/request_handler.go index 9e82d4e6c..ecaf20957 100644 --- a/app/vminsert/vmimport/request_handler.go +++ b/app/vminsert/vmimport/request_handler.go @@ -1,61 +1,35 @@ package vmimport import ( - "flag" - "fmt" - "io" "net/http" "runtime" "sync" "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common" - "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter" - "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/vmimport" "github.com/VictoriaMetrics/VictoriaMetrics/lib/storage" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/writeconcurrencylimiter" "github.com/VictoriaMetrics/metrics" ) -var maxLineLen = flag.Int("import.maxLineLen", 100*1024*1024, "The maximum length in bytes of a single line accepted by /api/v1/import") - var ( rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="vmimport"}`) - rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="vmimport"}`) + rowsPerInsert = metrics.NewHistogram(`vm_rows_per_insert{type="vmimport"}`) ) // InsertHandler processes `/api/v1/import` request. // // See https://github.com/VictoriaMetrics/VictoriaMetrics/issues/6 func InsertHandler(req *http.Request) error { - return concurrencylimiter.Do(func() error { - return insertHandlerInternal(req) + return writeconcurrencylimiter.Do(func() error { + return parser.ParseStream(req, insertRows) }) } -func insertHandlerInternal(req *http.Request) error { - readCalls.Inc() - - r := req.Body - if req.Header.Get("Content-Encoding") == "gzip" { - zr, err := common.GetGzipReader(r) - if err != nil { - return fmt.Errorf("cannot read gzipped vmimport data: %s", err) - } - defer common.PutGzipReader(zr) - r = zr - } - +func insertRows(rows []parser.Row) error { ctx := getPushCtx() defer putPushCtx(ctx) - for ctx.Read(r) { - if err := ctx.InsertRows(); err != nil { - return err - } - } - return ctx.Error() -} -func (ctx *pushCtx) InsertRows() error { - rows := ctx.Rows.Rows rowsLen := 0 for i := range rows { rowsLen += len(rows[i].Values) @@ -85,54 +59,14 @@ func (ctx *pushCtx) InsertRows() error { return ic.FlushBufs() } -func (ctx *pushCtx) Read(r io.Reader) bool { - if ctx.err != nil { - return false - } - ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlockExt(r, ctx.reqBuf, ctx.tailBuf, *maxLineLen) - if ctx.err != nil { - if ctx.err != io.EOF { - readErrors.Inc() - ctx.err = fmt.Errorf("cannot read vmimport data: %s", ctx.err) - } - return false - } - ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf)) - return true -} - -var ( - readCalls = metrics.NewCounter(`vm_read_calls_total{name="vmimport"}`) - readErrors = metrics.NewCounter(`vm_read_errors_total{name="vmimport"}`) -) - type pushCtx struct { - Rows Rows - Common common.InsertCtx - - reqBuf []byte - tailBuf []byte + Common common.InsertCtx metricNameBuf []byte - - err error -} - -func (ctx *pushCtx) Error() error { - if ctx.err == io.EOF { - return nil - } - return ctx.err } func (ctx *pushCtx) reset() { - ctx.Rows.Reset() ctx.Common.Reset(0) - - ctx.reqBuf = ctx.reqBuf[:0] - ctx.tailBuf = ctx.tailBuf[:0] ctx.metricNameBuf = ctx.metricNameBuf[:0] - - ctx.err = nil } func getPushCtx() *pushCtx { diff --git a/go.mod b/go.mod index d727849d9..67b364380 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/cespare/xxhash/v2 v2.1.1 github.com/golang/snappy v0.0.1 github.com/klauspost/compress v1.10.0 + github.com/valyala/fasthttp v1.9.0 github.com/valyala/fastjson v1.5.0 github.com/valyala/fastrand v1.0.0 github.com/valyala/gozstd v1.6.4 @@ -18,6 +19,7 @@ require ( golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 golang.org/x/tools v0.0.0-20200214225126-5916a50871fb // indirect google.golang.org/api v0.17.0 + gopkg.in/yaml.v2 v2.2.8 ) go 1.12 diff --git a/go.sum b/go.sum index a7df25ffa..d1ad32cb8 100644 --- a/go.sum +++ b/go.sum @@ -93,10 +93,12 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.0 h1:92XGj1AcYzA6UrVdd4qIIBrT8OroryvRvdmg/IfmC7Y= github.com/klauspost/compress v1.10.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -114,6 +116,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/fasthttp v1.9.0 h1:hNpmUdy/+ZXYpGy0OBfm7K0UQTzb73W0T0U4iJIVrMw= +github.com/valyala/fasthttp v1.9.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= github.com/valyala/fastjson v1.5.0 h1:DGrb4wEYso2HdGLyLmNoyNCQnCWfjd8yhghPv5/5YQg= github.com/valyala/fastjson v1.5.0/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI= @@ -141,7 +145,6 @@ golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxT golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= @@ -181,6 +184,7 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -293,6 +297,8 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/lib/filestream/filestream.go b/lib/filestream/filestream.go index b5fcf8d78..0633cad95 100644 --- a/lib/filestream/filestream.go +++ b/lib/filestream/filestream.go @@ -212,7 +212,7 @@ func newWriter(f *os.File, nocache bool) *Writer { // MustClose syncs the underlying file to storage and then closes it. func (w *Writer) MustClose() { if err := w.bw.Flush(); err != nil { - logger.Panicf("FATAL: cannot flush data to file %q: %s", w.f.Name(), err) + logger.Panicf("FATAL: cannot flush buffered data to file %q: %s", w.f.Name(), err) } putBufioWriter(w.bw) w.bw = nil @@ -253,6 +253,14 @@ func (w *Writer) Write(p []byte) (int, error) { return n, nil } +// MustFlush flushes all the buffered data to file. +func (w *Writer) MustFlush() { + if err := w.bw.Flush(); err != nil { + logger.Panicf("FATAL: cannot flush buffered data to file %q: %s", w.f.Name(), err) + } + // Do not call w.f.Sync() for performance reasons. +} + type statWriter struct { *os.File } diff --git a/lib/flagutil/array.go b/lib/flagutil/array.go new file mode 100644 index 000000000..8f1d6846b --- /dev/null +++ b/lib/flagutil/array.go @@ -0,0 +1,28 @@ +package flagutil + +import ( + "flag" + "strings" +) + +// NewArray returns new Array with the given name and descprition. +func NewArray(name, description string) *Array { + var a Array + flag.Var(&a, name, description) + return &a +} + +// Array holds an array of flag values +type Array []string + +// String implements flag.Value interface +func (a *Array) String() string { + return strings.Join(*a, ",") +} + +// Set implements flag.Value interface +func (a *Array) Set(value string) error { + values := strings.Split(value, ",") + *a = append(*a, values...) + return nil +} diff --git a/lib/flagutil/array_test.go b/lib/flagutil/array_test.go new file mode 100644 index 000000000..835d9cfa5 --- /dev/null +++ b/lib/flagutil/array_test.go @@ -0,0 +1,34 @@ +package flagutil + +import ( + "flag" + "os" + "testing" +) + +var fooFlag Array + +func init() { + os.Args = append(os.Args, "--fooFlag=foo", "--fooFlag=bar") + flag.Var(&fooFlag, "fooFlag", "test") +} + +func TestMain(m *testing.M) { + flag.Parse() + os.Exit(m.Run()) +} + +func TestArray(t *testing.T) { + expected := map[string]struct{}{ + "foo": {}, + "bar": {}, + } + if len(expected) != len(fooFlag) { + t.Errorf("len array flag (%d) is not equal to %d", len(fooFlag), len(expected)) + } + for _, i := range fooFlag { + if _, ok := expected[i]; !ok { + t.Errorf("unexpected item in array %v", i) + } + } +} diff --git a/lib/httpserver/metrics.go b/lib/httpserver/metrics.go index 4dae6fb73..7fab0a134 100644 --- a/lib/httpserver/metrics.go +++ b/lib/httpserver/metrics.go @@ -23,13 +23,11 @@ func WritePrometheusMetrics(w io.Writer) { fmt.Fprintf(w, "vm_app_start_timestamp %d\n", startTime.Unix()) fmt.Fprintf(w, "vm_app_uptime_seconds %d\n", int(time.Since(startTime).Seconds())) - // TODO: export other interesting stuff. - // Export flags as metrics. flag.VisitAll(func(f *flag.Flag) { lname := strings.ToLower(f.Name) value := f.Value.String() - if strings.Contains(lname, "pass") || strings.Contains(lname, "key") || strings.Contains(lname, "secret") { + if isSecretFlag(lname) { // Do not expose passwords and keys to prometheus. value = "secret" } @@ -38,3 +36,23 @@ func WritePrometheusMetrics(w io.Writer) { } var startTime = time.Now() + +// RegisterSecretFlag registers flagName as secret. +// +// This function must be called before starting httpserver. +// It cannot be called from concurrent goroutines. +// +// Secret flags aren't exported at `/metrics` page. +func RegisterSecretFlag(flagName string) { + lname := strings.ToLower(flagName) + secretFlags[lname] = true +} + +var secretFlags = make(map[string]bool) + +func isSecretFlag(s string) bool { + if strings.Contains(s, "pass") || strings.Contains(s, "key") || strings.Contains(s, "secret") || strings.Contains(s, "token") { + return true + } + return secretFlags[s] +} diff --git a/app/vminsert/graphite/server.go b/lib/ingestserver/graphite/server.go similarity index 81% rename from app/vminsert/graphite/server.go rename to lib/ingestserver/graphite/server.go index 621e3c6e0..41955a4c0 100644 --- a/app/vminsert/graphite/server.go +++ b/lib/ingestserver/graphite/server.go @@ -1,6 +1,7 @@ package graphite import ( + "io" "net" "runtime" "strings" @@ -14,11 +15,11 @@ import ( ) var ( - writeRequestsTCP = metrics.NewCounter(`vm_graphite_requests_total{name="write", net="tcp"}`) - writeErrorsTCP = metrics.NewCounter(`vm_graphite_request_errors_total{name="write", net="tcp"}`) + writeRequestsTCP = metrics.NewCounter(`vm_ingestserver_graphite_requests_total{name="write", net="tcp"}`) + writeErrorsTCP = metrics.NewCounter(`vm_ingestserver_graphite_request_errors_total{name="write", net="tcp"}`) - writeRequestsUDP = metrics.NewCounter(`vm_graphite_requests_total{name="write", net="udp"}`) - writeErrorsUDP = metrics.NewCounter(`vm_graphite_request_errors_total{name="write", net="udp"}`) + writeRequestsUDP = metrics.NewCounter(`vm_ingestserver_graphite_requests_total{name="write", net="udp"}`) + writeErrorsUDP = metrics.NewCounter(`vm_ingestserver_graphite_request_errors_total{name="write", net="udp"}`) ) // Server accepts Graphite plaintext lines over TCP and UDP. @@ -31,8 +32,10 @@ type Server struct { // MustStart starts graphite server on the given addr. // +// The incoming connections are processed with insertHandler. +// // MustStop must be called on the returned server when it is no longer needed. -func MustStart(addr string) *Server { +func MustStart(addr string, insertHandler func(r io.Reader) error) *Server { logger.Infof("starting TCP Graphite server at %q", addr) lnTCP, err := netutil.NewTCPListener("graphite", addr) if err != nil { @@ -53,13 +56,13 @@ func MustStart(addr string) *Server { s.wg.Add(1) go func() { defer s.wg.Done() - serveTCP(lnTCP) + serveTCP(lnTCP, insertHandler) logger.Infof("stopped TCP Graphite server at %q", addr) }() s.wg.Add(1) go func() { defer s.wg.Done() - serveUDP(lnUDP) + serveUDP(lnUDP, insertHandler) logger.Infof("stopped UDP Graphite server at %q", addr) }() return s @@ -79,7 +82,7 @@ func (s *Server) MustStop() { logger.Infof("TCP and UDP Graphite servers at %q have been stopped", s.addr) } -func serveTCP(ln net.Listener) { +func serveTCP(ln net.Listener, insertHandler func(r io.Reader) error) { for { c, err := ln.Accept() if err != nil { @@ -107,7 +110,7 @@ func serveTCP(ln net.Listener) { } } -func serveUDP(ln net.PacketConn) { +func serveUDP(ln net.PacketConn, insertHandler func(r io.Reader) error) { gomaxprocs := runtime.GOMAXPROCS(-1) var wg sync.WaitGroup for i := 0; i < gomaxprocs; i++ { diff --git a/app/vminsert/opentsdb/listener_switch.go b/lib/ingestserver/opentsdb/listener_switch.go similarity index 100% rename from app/vminsert/opentsdb/listener_switch.go rename to lib/ingestserver/opentsdb/listener_switch.go diff --git a/app/vminsert/opentsdb/server.go b/lib/ingestserver/opentsdb/server.go similarity index 89% rename from app/vminsert/opentsdb/server.go rename to lib/ingestserver/opentsdb/server.go index 5f62355c1..b38e217fe 100644 --- a/app/vminsert/opentsdb/server.go +++ b/lib/ingestserver/opentsdb/server.go @@ -1,14 +1,16 @@ package opentsdb import ( + "io" "net" + "net/http" "runtime" "strings" "sync" "time" - "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/opentsdbhttp" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/ingestserver/opentsdbhttp" "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" "github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil" "github.com/VictoriaMetrics/metrics" @@ -36,7 +38,7 @@ type Server struct { // MustStart starts OpenTSDB collector on the given addr. // // MustStop must be called on the returned server when it is no longer needed. -func MustStart(addr string) *Server { +func MustStart(addr string, telnetInsertHandler func(r io.Reader) error, httpInsertHandler func(req *http.Request) error) *Server { logger.Infof("starting TCP OpenTSDB collector at %q", addr) lnTCP, err := netutil.NewTCPListener("opentsdb", addr) if err != nil { @@ -45,7 +47,7 @@ func MustStart(addr string) *Server { ls := newListenerSwitch(lnTCP) lnHTTP := ls.newHTTPListener() lnTelnet := ls.newTelnetListener() - httpServer := opentsdbhttp.MustServe(lnHTTP) + httpServer := opentsdbhttp.MustServe(lnHTTP, httpInsertHandler) logger.Infof("starting UDP OpenTSDB collector at %q", addr) lnUDP, err := net.ListenPacket("udp4", addr) @@ -62,7 +64,7 @@ func MustStart(addr string) *Server { s.wg.Add(1) go func() { defer s.wg.Done() - serveTelnet(lnTelnet) + serveTelnet(lnTelnet, telnetInsertHandler) logger.Infof("stopped TCP telnet OpenTSDB server at %q", addr) }() s.wg.Add(1) @@ -74,7 +76,7 @@ func MustStart(addr string) *Server { s.wg.Add(1) go func() { defer s.wg.Done() - serveUDP(lnUDP) + serveUDP(lnUDP, telnetInsertHandler) logger.Infof("stopped UDP OpenTSDB server at %q", addr) }() return s @@ -100,7 +102,7 @@ func (s *Server) MustStop() { logger.Infof("TCP and UDP OpenTSDB servers at %q have been stopped", s.addr) } -func serveTelnet(ln net.Listener) { +func serveTelnet(ln net.Listener, insertHandler func(r io.Reader) error) { for { c, err := ln.Accept() if err != nil { @@ -128,7 +130,7 @@ func serveTelnet(ln net.Listener) { } } -func serveUDP(ln net.PacketConn) { +func serveUDP(ln net.PacketConn, insertHandler func(r io.Reader) error) { gomaxprocs := runtime.GOMAXPROCS(-1) var wg sync.WaitGroup for i := 0; i < gomaxprocs; i++ { diff --git a/app/vminsert/opentsdbhttp/server.go b/lib/ingestserver/opentsdbhttp/server.go similarity index 70% rename from app/vminsert/opentsdbhttp/server.go rename to lib/ingestserver/opentsdbhttp/server.go index 0aaaeb32c..65b06af55 100644 --- a/app/vminsert/opentsdbhttp/server.go +++ b/lib/ingestserver/opentsdbhttp/server.go @@ -14,8 +14,8 @@ import ( ) var ( - writeRequests = metrics.NewCounter(`vm_http_requests_total{path="/api/put", protocol="opentsdb-http"}`) - writeErrors = metrics.NewCounter(`vm_http_request_errors_total{path="/api/put", protocol="opentsdb-http"}`) + writeRequests = metrics.NewCounter(`vm_opentsdbhttp_requests_total{name="write", net="tcp"}`) + writeErrors = metrics.NewCounter(`vm_opentsdbhttp_request_errors_total{name="write", net="tcp"}`) ) // Server represents HTTP OpenTSDB server. @@ -28,20 +28,20 @@ type Server struct { // MustStart starts HTTP OpenTSDB server on the given addr. // // MustStop must be called on the returned server when it is no longer needed. -func MustStart(addr string) *Server { +func MustStart(addr string, insertHandler func(req *http.Request) error) *Server { logger.Infof("starting HTTP OpenTSDB server at %q", addr) lnTCP, err := netutil.NewTCPListener("opentsdbhttp", addr) if err != nil { logger.Fatalf("cannot start HTTP OpenTSDB collector at %q: %s", addr, err) } - return MustServe(lnTCP) + return MustServe(lnTCP, insertHandler) } // MustServe serves OpenTSDB HTTP put requests from ln. // // MustStop must be called on the returned server when it is no longer needed. -func MustServe(ln net.Listener) *Server { - h := newRequestHandler() +func MustServe(ln net.Listener, insertHandler func(req *http.Request) error) *Server { + h := newRequestHandler(insertHandler) hs := &http.Server{ Handler: h, ReadTimeout: 30 * time.Second, @@ -82,20 +82,15 @@ func (s *Server) MustStop() { logger.Infof("OpenTSDB HTTP server at %q has been stopped", s.ln.Addr()) } -func newRequestHandler() http.Handler { - rh := func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/api/put": - writeRequests.Inc() - if err := insertHandler(r); err != nil { - writeErrors.Inc() - httpserver.Errorf(w, "error in %q: %s", r.URL.Path, err) - return - } - w.WriteHeader(http.StatusNoContent) - default: - httpserver.Errorf(w, "unexpected path requested on HTTP OpenTSDB server: %q", r.URL.Path) +func newRequestHandler(insertHandler func(req *http.Request) error) http.Handler { + rh := func(w http.ResponseWriter, req *http.Request) { + writeRequests.Inc() + if err := insertHandler(req); err != nil { + writeErrors.Inc() + httpserver.Errorf(w, "error in %q: %s", req.URL.Path, err) + return } + w.WriteHeader(http.StatusNoContent) } return http.HandlerFunc(rh) } diff --git a/lib/netutil/tcplistener.go b/lib/netutil/tcplistener.go index 02f8dda9f..5ce16d606 100644 --- a/lib/netutil/tcplistener.go +++ b/lib/netutil/tcplistener.go @@ -32,6 +32,11 @@ func NewTCPListener(name, addr string) (*TCPListener, error) { return tln, err } +// TCP6Enabled returns true if dialing and listening for IPv4 TCP is enabled. +func TCP6Enabled() bool { + return *enableTCP6 +} + func getNetwork() string { if *enableTCP6 { // Enable both tcp4 and tcp6 diff --git a/lib/persistentqueue/fastqueue.go b/lib/persistentqueue/fastqueue.go new file mode 100644 index 000000000..9cd217eff --- /dev/null +++ b/lib/persistentqueue/fastqueue.go @@ -0,0 +1,153 @@ +package persistentqueue + +import ( + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" +) + +// FastQueue is a wrapper around Queue, which prefers sending data via memory. +// +// It falls back to sending data via file when readers don't catch up with writers. +type FastQueue struct { + // my protects the state of FastQueue. + mu sync.Mutex + + // cond is used for notifying blocked readers when new data has been added + // or when MustClose is called. + cond sync.Cond + + // q is file-based queue + q *Queue + + // ch is in-memory queue + ch chan *bytesutil.ByteBuffer + + pendingInmemoryBytes uint64 + + mustStop bool +} + +// MustOpenFastQueue opens persistent queue at the given path. +// +// It holds up to maxInmemoryBlocks in memory before falling back to file-based persistence. +func MustOpenFastQueue(path, name string, maxInmemoryBlocks int) *FastQueue { + q := MustOpen(path, name) + fq := &FastQueue{ + q: q, + ch: make(chan *bytesutil.ByteBuffer, maxInmemoryBlocks), + } + fq.cond.L = &fq.mu + logger.Infof("opened fast persistent queue at %q with maxInmemoryBlocks=%d", path, maxInmemoryBlocks) + return fq +} + +// MustClose unblocks all the readers. +// +// It is expected no new writers during and after the call. +func (fq *FastQueue) MustClose() { + fq.mu.Lock() + defer fq.mu.Unlock() + + // Unblock blocked readers + fq.mustStop = true + fq.cond.Broadcast() + + // flush blocks from fq.ch to fq.q, so they can be persisted + fq.flushInmemoryBlocksToFileLocked() + + // Close fq.q + fq.q.MustClose() + + logger.Infof("closed fast persistent queue at %q", fq.q.dir) +} + +func (fq *FastQueue) flushInmemoryBlocksToFileLocked() { + // fq.mu must be locked by the caller. + for len(fq.ch) > 0 { + bb := <-fq.ch + fq.q.MustWriteBlock(bb.B) + fq.pendingInmemoryBytes -= uint64(len(bb.B)) + blockBufPool.Put(bb) + } +} + +// GetPendingBytes returns the number of pending bytes in the fq. +func (fq *FastQueue) GetPendingBytes() uint64 { + fq.mu.Lock() + defer fq.mu.Unlock() + + n := fq.pendingInmemoryBytes + n += fq.q.GetPendingBytes() + return n +} + +// GetInmemoryQueueLen returns the length of inmemory queue. +func (fq *FastQueue) GetInmemoryQueueLen() int { + fq.mu.Lock() + defer fq.mu.Unlock() + + return len(fq.ch) +} + +// MustWriteBlock writes block to fq. +func (fq *FastQueue) MustWriteBlock(block []byte) { + fq.mu.Lock() + defer fq.mu.Unlock() + + if n := fq.q.GetPendingBytes(); n > 0 { + // The file-based queue isn't drained yet. This means that in-memory queue cannot be used yet. + // So put the block to file-based queue. + if len(fq.ch) > 0 { + logger.Panicf("BUG: the in-memory queue must be empty when the file-based queue is non-empty; it contains %d pending bytes", n) + } + fq.q.MustWriteBlock(block) + return + } + if len(fq.ch) == cap(fq.ch) { + // There is no space in the in-memory queue. Put the data to file-based queue. + fq.flushInmemoryBlocksToFileLocked() + fq.q.MustWriteBlock(block) + return + } + // There is enough space in the in-memory queue. + bb := blockBufPool.Get() + bb.B = append(bb.B[:0], block...) + fq.ch <- bb + fq.pendingInmemoryBytes += uint64(len(block)) + if len(fq.ch) == 1 { + // Notify potentially blocked reader + fq.cond.Signal() + } +} + +// MustReadBlock reads the next block from fq to dst and returns it. +func (fq *FastQueue) MustReadBlock(dst []byte) ([]byte, bool) { + fq.mu.Lock() + defer fq.mu.Unlock() + + for { + if fq.mustStop { + return dst, false + } + if len(fq.ch) > 0 { + if n := fq.q.GetPendingBytes(); n > 0 { + logger.Panicf("BUG: the file-based queue must be empty when the inmemory queue is empty; it contains %d pending bytes", n) + } + bb := <-fq.ch + fq.pendingInmemoryBytes -= uint64(len(bb.B)) + dst = append(dst, bb.B...) + blockBufPool.Put(bb) + return dst, true + } + if n := fq.q.GetPendingBytes(); n > 0 { + return fq.q.MustReadBlock(dst) + } + + // There are no blocks. Wait for new block. + fq.cond.Wait() + } +} + +var blockBufPool bytesutil.ByteBufferPool diff --git a/lib/persistentqueue/fastqueue_test.go b/lib/persistentqueue/fastqueue_test.go new file mode 100644 index 000000000..289885a8c --- /dev/null +++ b/lib/persistentqueue/fastqueue_test.go @@ -0,0 +1,274 @@ +package persistentqueue + +import ( + "fmt" + "sync" + "testing" + "time" +) + +func TestFastQueueOpenClose(t *testing.T) { + path := "fast-queue-open-close" + mustDeleteDir(path) + for i := 0; i < 10; i++ { + fq := MustOpenFastQueue(path, "foobar", 100) + fq.MustClose() + } + mustDeleteDir(path) +} + +func TestFastQueueWriteReadInmemory(t *testing.T) { + path := "fast-queue-write-read-inmemory" + mustDeleteDir(path) + + capacity := 100 + fq := MustOpenFastQueue(path, "foobar", capacity) + var blocks []string + for i := 0; i < capacity; i++ { + block := fmt.Sprintf("block %d", i) + fq.MustWriteBlock([]byte(block)) + blocks = append(blocks, block) + } + for _, block := range blocks { + buf, ok := fq.MustReadBlock(nil) + if !ok { + t.Fatalf("unexpected ok=false") + } + if string(buf) != block { + t.Fatalf("unexpected block read; got %q; want %q", buf, block) + } + } + fq.MustClose() + mustDeleteDir(path) +} + +func TestFastQueueWriteReadMixed(t *testing.T) { + path := "fast-queue-write-read-mixed" + mustDeleteDir(path) + + capacity := 100 + fq := MustOpenFastQueue(path, "foobar", capacity) + if n := fq.GetPendingBytes(); n != 0 { + t.Fatalf("the number of pending bytes must be 0; got %d", n) + } + var blocks []string + for i := 0; i < 2*capacity; i++ { + block := fmt.Sprintf("block %d", i) + fq.MustWriteBlock([]byte(block)) + blocks = append(blocks, block) + } + if n := fq.GetPendingBytes(); n == 0 { + t.Fatalf("the number of pending bytes must be greater than 0") + } + for _, block := range blocks { + buf, ok := fq.MustReadBlock(nil) + if !ok { + t.Fatalf("unexpected ok=false") + } + if string(buf) != block { + t.Fatalf("unexpected block read; got %q; want %q", buf, block) + } + } + if n := fq.GetPendingBytes(); n != 0 { + t.Fatalf("the number of pending bytes must be 0; got %d", n) + } + fq.MustClose() + mustDeleteDir(path) +} + +func TestFastQueueWriteReadWithCloses(t *testing.T) { + path := "fast-queue-write-read-with-closes" + mustDeleteDir(path) + + capacity := 100 + fq := MustOpenFastQueue(path, "foobar", capacity) + if n := fq.GetPendingBytes(); n != 0 { + t.Fatalf("the number of pending bytes must be 0; got %d", n) + } + var blocks []string + for i := 0; i < 2*capacity; i++ { + block := fmt.Sprintf("block %d", i) + fq.MustWriteBlock([]byte(block)) + blocks = append(blocks, block) + fq.MustClose() + fq = MustOpenFastQueue(path, "foobar", capacity) + } + if n := fq.GetPendingBytes(); n == 0 { + t.Fatalf("the number of pending bytes must be greater than 0") + } + for _, block := range blocks { + buf, ok := fq.MustReadBlock(nil) + if !ok { + t.Fatalf("unexpected ok=false") + } + if string(buf) != block { + t.Fatalf("unexpected block read; got %q; want %q", buf, block) + } + fq.MustClose() + fq = MustOpenFastQueue(path, "foobar", capacity) + } + if n := fq.GetPendingBytes(); n != 0 { + t.Fatalf("the number of pending bytes must be 0; got %d", n) + } + fq.MustClose() + mustDeleteDir(path) +} + +func TestFastQueueReadUnblockByClose(t *testing.T) { + path := "fast-queue-read-unblock-by-close" + mustDeleteDir(path) + + fq := MustOpenFastQueue(path, "foorbar", 123) + resultCh := make(chan error) + go func() { + data, ok := fq.MustReadBlock(nil) + if ok { + resultCh <- fmt.Errorf("unexpected ok=true") + return + } + if len(data) != 0 { + resultCh <- fmt.Errorf("unexpected non-empty data=%q", data) + return + } + resultCh <- nil + }() + fq.MustClose() + select { + case err := <-resultCh: + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + case <-time.After(time.Second): + t.Fatalf("timeout") + } + mustDeleteDir(path) +} + +func TestFastQueueReadUnblockByWrite(t *testing.T) { + path := "fast-queue-read-unblock-by-write" + mustDeleteDir(path) + + fq := MustOpenFastQueue(path, "foobar", 13) + block := fmt.Sprintf("foodsafdsaf sdf") + resultCh := make(chan error) + go func() { + data, ok := fq.MustReadBlock(nil) + if !ok { + resultCh <- fmt.Errorf("unexpected ok=false") + return + } + if string(data) != block { + resultCh <- fmt.Errorf("unexpected block read; got %q; want %q", data, block) + return + } + resultCh <- nil + }() + fq.MustWriteBlock([]byte(block)) + select { + case err := <-resultCh: + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + case <-time.After(time.Second): + t.Fatalf("timeout") + } + fq.MustClose() + mustDeleteDir(path) +} + +func TestFastQueueReadWriteConcurrent(t *testing.T) { + path := "fast-queue-read-write-concurrent" + mustDeleteDir(path) + + fq := MustOpenFastQueue(path, "foobar", 5) + + var blocks []string + blocksMap := make(map[string]bool) + var blocksMapLock sync.Mutex + for i := 0; i < 1000; i++ { + block := fmt.Sprintf("block %d", i) + blocks = append(blocks, block) + blocksMap[block] = true + } + + // Start readers + var readersWG sync.WaitGroup + for i := 0; i < 10; i++ { + readersWG.Add(1) + go func() { + defer readersWG.Done() + for { + data, ok := fq.MustReadBlock(nil) + if !ok { + return + } + blocksMapLock.Lock() + if !blocksMap[string(data)] { + panic(fmt.Errorf("unexpected data read from the queue: %q", data)) + } + delete(blocksMap, string(data)) + blocksMapLock.Unlock() + } + }() + } + + // Start writers + blocksCh := make(chan string) + var writersWG sync.WaitGroup + for i := 0; i < 10; i++ { + writersWG.Add(1) + go func() { + defer writersWG.Done() + for block := range blocksCh { + fq.MustWriteBlock([]byte(block)) + } + }() + } + + // feed writers + for _, block := range blocks { + blocksCh <- block + } + close(blocksCh) + + // Wait for writers to finish + writersWG.Wait() + + // wait for a while, so readers could catch up + time.Sleep(100 * time.Millisecond) + + // Close fq + fq.MustClose() + + // Wait for readers to finish + readersWG.Wait() + + // Collect the remaining data + fq = MustOpenFastQueue(path, "foobar", 5) + resultCh := make(chan error) + go func() { + for len(blocksMap) > 0 { + data, ok := fq.MustReadBlock(nil) + if !ok { + resultCh <- fmt.Errorf("unexpected ok=false") + return + } + if !blocksMap[string(data)] { + resultCh <- fmt.Errorf("unexpected data read from fq: %q", data) + return + } + delete(blocksMap, string(data)) + } + resultCh <- nil + }() + select { + case err := <-resultCh: + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + case <-time.After(time.Second * 5): + t.Fatalf("timeout") + } + fq.MustClose() + mustDeleteDir(path) +} diff --git a/lib/persistentqueue/fastqueue_timing_test.go b/lib/persistentqueue/fastqueue_timing_test.go new file mode 100644 index 000000000..78e959479 --- /dev/null +++ b/lib/persistentqueue/fastqueue_timing_test.go @@ -0,0 +1,66 @@ +package persistentqueue + +import ( + "fmt" + "runtime" + "testing" +) + +func BenchmarkFastQueueThroughputSerial(b *testing.B) { + const iterationsCount = 10 + for _, blockSize := range []int{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6} { + block := make([]byte, blockSize) + b.Run(fmt.Sprintf("block-size-%d", blockSize), func(b *testing.B) { + b.ReportAllocs() + b.SetBytes(int64(blockSize) * iterationsCount) + path := fmt.Sprintf("bench-fast-queue-throughput-serial-%d", blockSize) + mustDeleteDir(path) + fq := MustOpenFastQueue(path, "foobar", iterationsCount*2) + defer func() { + fq.MustClose() + mustDeleteDir(path) + }() + for i := 0; i < b.N; i++ { + writeReadIterationFastQueue(fq, block, iterationsCount) + } + }) + } +} + +func BenchmarkFastQueueThroughputConcurrent(b *testing.B) { + const iterationsCount = 10 + for _, blockSize := range []int{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6} { + block := make([]byte, blockSize) + b.Run(fmt.Sprintf("block-size-%d", blockSize), func(b *testing.B) { + b.ReportAllocs() + b.SetBytes(int64(blockSize) * iterationsCount) + path := fmt.Sprintf("bench-fast-queue-throughput-concurrent-%d", blockSize) + mustDeleteDir(path) + fq := MustOpenFastQueue(path, "foobar", iterationsCount*runtime.GOMAXPROCS(-1)*2) + defer func() { + fq.MustClose() + mustDeleteDir(path) + }() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + writeReadIterationFastQueue(fq, block, iterationsCount) + } + }) + }) + } +} + +func writeReadIterationFastQueue(fq *FastQueue, block []byte, iterationsCount int) { + for i := 0; i < iterationsCount; i++ { + fq.MustWriteBlock(block) + } + var ok bool + bb := bbPool.Get() + for i := 0; i < iterationsCount; i++ { + bb.B, ok = fq.MustReadBlock(bb.B[:0]) + if !ok { + panic(fmt.Errorf("unexpected ok=false")) + } + } + bbPool.Put(bb) +} diff --git a/lib/persistentqueue/persistentqueue.go b/lib/persistentqueue/persistentqueue.go new file mode 100644 index 000000000..bb41f4563 --- /dev/null +++ b/lib/persistentqueue/persistentqueue.go @@ -0,0 +1,489 @@ +package persistentqueue + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "strconv" + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/encoding" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/filestream" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/fs" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" +) + +// MaxBlockSize is the maximum size of the block persistent queue can work with. +const MaxBlockSize = 32 * 1024 * 1024 + +const defaultChunkFileSize = (MaxBlockSize + 8) * 16 + +var chunkFileNameRegex = regexp.MustCompile("^[0-9A-F]{16}$") + +// Queue represents persistent queue. +type Queue struct { + chunkFileSize uint64 + maxBlockSize uint64 + + dir string + name string + + // mu protects all the fields below. + mu sync.Mutex + + // cond is used for notifying blocked readers when new data has been added + // or when MustClose is called. + cond sync.Cond + + reader *filestream.Reader + readerPath string + readerOffset uint64 + readerLocalOffset uint64 + + writer *filestream.Writer + writerPath string + writerOffset uint64 + writerLocalOffset uint64 + writerFlushedOffset uint64 + + mustStop bool +} + +// GetPendingBytes returns the number of pending bytes in the queue. +func (q *Queue) GetPendingBytes() uint64 { + q.mu.Lock() + n := q.writerOffset - q.readerOffset + q.mu.Unlock() + return n +} + +// MustOpen opens persistent queue from the given path. +func MustOpen(path, name string) *Queue { + return mustOpen(path, name, defaultChunkFileSize, MaxBlockSize) +} + +func mustOpen(path, name string, chunkFileSize, maxBlockSize uint64) *Queue { + if chunkFileSize < 8 || chunkFileSize-8 < maxBlockSize { + logger.Panicf("BUG: too small chunkFileSize=%d for maxBlockSize=%d; chunkFileSize must fit at least one block", chunkFileSize, maxBlockSize) + } + if maxBlockSize <= 0 { + logger.Panicf("BUG: maxBlockSize must be greater than 0; got %d", maxBlockSize) + } + q, err := tryOpeningQueue(path, name, chunkFileSize, maxBlockSize) + if err != nil { + logger.Errorf("cannot open persistent queue at %q: %s; cleaning it up and trying again", path, err) + fs.RemoveDirContents(path) + q, err = tryOpeningQueue(path, name, chunkFileSize, maxBlockSize) + if err != nil { + logger.Panicf("FATAL: %s", err) + } + } + return q +} + +func tryOpeningQueue(path, name string, chunkFileSize, maxBlockSize uint64) (*Queue, error) { + var q Queue + q.chunkFileSize = chunkFileSize + q.maxBlockSize = maxBlockSize + q.dir = path + q.name = name + q.cond.L = &q.mu + + cleanOnError := func() { + if q.reader != nil { + q.reader.MustClose() + } + if q.writer != nil { + q.writer.MustClose() + } + } + + if err := fs.MkdirAllIfNotExist(path); err != nil { + return nil, fmt.Errorf("cannot create directory %q: %s", path, err) + } + + // Read metainfo. + var mi metainfo + metainfoPath := q.metainfoPath() + if err := mi.ReadFromFile(metainfoPath); err != nil { + if !os.IsNotExist(err) { + logger.Errorf("cannot read metainfo for persistent queue from %q: %s; re-creating %q", metainfoPath, err, path) + } + + // path contents is broken or missing. Re-create it from scratch. + fs.RemoveDirContents(path) + mi.Reset() + mi.Name = q.name + if err := mi.WriteToFile(metainfoPath); err != nil { + return nil, fmt.Errorf("cannot create %q: %s", metainfoPath, err) + } + + // Create initial chunk file. + filepath := q.chunkFilePath(0) + if err := fs.WriteFileAtomically(filepath, nil); err != nil { + return nil, fmt.Errorf("cannot create %q: %s", filepath, err) + } + } + if mi.Name != q.name { + return nil, fmt.Errorf("unexpected queue name; got %q; want %q", mi.Name, q.name) + } + + // Locate reader and writer chunks in the path. + fis, err := ioutil.ReadDir(path) + if err != nil { + return nil, fmt.Errorf("cannot read contents of the directory %q: %s", path, err) + } + for _, fi := range fis { + fname := fi.Name() + filepath := path + "/" + fname + if fi.IsDir() { + logger.Errorf("skipping unknown directory %q", filepath) + continue + } + if fname == "metainfo.json" { + // skip metainfo file + continue + } + if !chunkFileNameRegex.MatchString(fname) { + logger.Errorf("skipping unknown file %q", filepath) + continue + } + offset, err := strconv.ParseUint(fname, 16, 64) + if err != nil { + logger.Panicf("BUG: cannot parse hex %q: %s", fname, err) + } + if offset%q.chunkFileSize != 0 { + logger.Errorf("unexpected offset for chunk file %q: %d; it must divide by %d; removing the file", filepath, offset, q.chunkFileSize) + fs.MustRemoveAll(filepath) + continue + } + if mi.ReaderOffset >= offset+q.chunkFileSize { + logger.Errorf("unexpected chunk file found from the past: %q; removing it", filepath) + fs.MustRemoveAll(filepath) + continue + } + if mi.WriterOffset < offset { + logger.Errorf("unexpected chunk file found from the future: %q; removing it", filepath) + fs.MustRemoveAll(filepath) + continue + } + if mi.ReaderOffset >= offset && mi.ReaderOffset < offset+q.chunkFileSize { + // Found the chunk for reading + if q.reader != nil { + logger.Panicf("BUG: reader is already initialized with readerPath=%q, readerOffset=%d, readerLocalOffset=%d", + q.readerPath, q.readerOffset, q.readerLocalOffset) + } + q.readerPath = filepath + q.readerOffset = mi.ReaderOffset + q.readerLocalOffset = mi.ReaderOffset % q.chunkFileSize + if fileSize := fs.MustFileSize(q.readerPath); fileSize < q.readerLocalOffset { + logger.Errorf("chunk file %q size is too small for the given reader offset; file size %d bytes; reader offset: %d bytes; removing the file", + q.readerPath, fileSize, q.readerLocalOffset) + fs.MustRemoveAll(q.readerPath) + continue + } + r, err := filestream.OpenReaderAt(q.readerPath, int64(q.readerLocalOffset), true) + if err != nil { + logger.Errorf("cannot open %q for reading at offset %d: %s; removing this file", q.readerPath, q.readerLocalOffset, err) + fs.MustRemoveAll(filepath) + continue + } + q.reader = r + } + if mi.WriterOffset >= offset && mi.WriterOffset < offset+q.chunkFileSize { + // Found the chunk file for writing + if q.writer != nil { + logger.Panicf("BUG: writer is already initialized with writerPath=%q, writerOffset=%d, writerLocalOffset=%d", + q.writerPath, q.writerOffset, q.writerLocalOffset) + } + q.writerPath = filepath + q.writerOffset = mi.WriterOffset + q.writerLocalOffset = mi.WriterOffset % q.chunkFileSize + q.writerFlushedOffset = mi.WriterOffset + if fileSize := fs.MustFileSize(q.writerPath); fileSize != q.writerLocalOffset { + logger.Errorf("chunk file %q size doesn't match writer offset; file size %d bytes; writer offset: %d bytes", + q.writerPath, fileSize, q.writerLocalOffset) + fs.MustRemoveAll(q.writerPath) + continue + } + w, err := filestream.OpenWriterAt(q.writerPath, int64(q.writerLocalOffset), false) + if err != nil { + logger.Errorf("cannot open %q for writing at offset %d: %s; removing this file", q.writerPath, q.writerLocalOffset, err) + fs.MustRemoveAll(filepath) + continue + } + q.writer = w + } + } + if q.reader == nil { + cleanOnError() + return nil, fmt.Errorf("couldn't find chunk file for reading in %q", q.dir) + } + if q.writer == nil { + cleanOnError() + return nil, fmt.Errorf("couldn't find chunk file for writing in %q", q.dir) + } + return &q, nil +} + +// MustClose closes q. +// +// It unblocks all the MustReadBlock calls. +// +// MustWriteBlock mustn't be called during and after the call to MustClose. +func (q *Queue) MustClose() { + q.mu.Lock() + defer q.mu.Unlock() + + // Unblock goroutines blocked on cond in MustReadBlock. + q.mustStop = true + q.cond.Broadcast() + + // Close writer. + q.writer.MustClose() + q.writer = nil + + // Close reader. + q.reader.MustClose() + q.reader = nil + + // Store metainfo + if err := q.flushMetainfo(); err != nil { + logger.Panicf("FATAL: cannot flush chunked queue metainfo: %s", err) + } +} + +func (q *Queue) chunkFilePath(offset uint64) string { + return fmt.Sprintf("%s/%016X", q.dir, offset) +} + +func (q *Queue) metainfoPath() string { + return q.dir + "/metainfo.json" +} + +// MustWriteBlock writes block to q. +// +// The block size cannot exceed MaxBlockSize. +// +// It is safe calling this function from concurrent goroutines. +func (q *Queue) MustWriteBlock(block []byte) { + if uint64(len(block)) > q.maxBlockSize { + logger.Panicf("BUG: too big block to send: %d bytes; it mustn't exceed %d bytes", len(block), q.maxBlockSize) + } + + q.mu.Lock() + defer q.mu.Unlock() + + if q.mustStop { + logger.Panicf("BUG: MustWriteBlock cannot be called after MustClose") + } + if q.readerOffset > q.writerOffset { + logger.Panicf("BUG: readerOffset=%d shouldn't exceed writerOffset=%d", q.readerOffset, q.writerOffset) + } + mustNotifyReader := q.readerOffset == q.writerOffset + if err := q.writeBlockLocked(block); err != nil { + logger.Panicf("FATAL: %s", err) + } + if mustNotifyReader { + q.cond.Signal() + } +} + +func (q *Queue) writeBlockLocked(block []byte) error { + if q.writerLocalOffset+q.maxBlockSize+8 > q.chunkFileSize { + // Finalize the current chunk and start new one. + q.writer.MustClose() + if n := q.writerOffset % q.chunkFileSize; n > 0 { + q.writerOffset += (q.chunkFileSize - n) + } + q.writerFlushedOffset = q.writerOffset + q.writerLocalOffset = 0 + q.writerPath = q.chunkFilePath(q.writerOffset) + w, err := filestream.Create(q.writerPath, false) + if err != nil { + return fmt.Errorf("cannot create chunk file %q: %s", q.writerPath, err) + } + q.writer = w + if err := q.flushMetainfo(); err != nil { + return fmt.Errorf("cannot flush metainfo: %s", err) + } + } + + // Write block len. + blockLen := uint64(len(block)) + header := headerBufPool.Get() + header.B = encoding.MarshalUint64(header.B, blockLen) + err := q.write(header.B) + headerBufPool.Put(header) + if err != nil { + return fmt.Errorf("cannot write header with size 8 bytes to %q: %s", q.writerPath, err) + } + + // Write block contents. + if err := q.write(block); err != nil { + return fmt.Errorf("cannot write block contents with size %d bytes to %q: %s", len(block), q.writerPath, err) + } + return nil +} + +// MustReadBlock appends the next block from q to dst and returns the result. +// +// false is returned after MustClose call. +// +// It is safe calling this function from concurrent goroutines. +func (q *Queue) MustReadBlock(dst []byte) ([]byte, bool) { + q.mu.Lock() + defer q.mu.Unlock() + + for { + if q.mustStop { + return dst, false + } + if q.readerOffset > q.writerOffset { + logger.Panicf("BUG: readerOffset=%d cannot exceed writerOffset=%d", q.readerOffset, q.writerOffset) + } + if q.readerOffset < q.writerOffset { + break + } + q.cond.Wait() + } + + data, err := q.readBlockLocked(dst) + if err != nil { + logger.Panicf("FATAL: %s", err) + } + return data, true +} + +func (q *Queue) readBlockLocked(dst []byte) ([]byte, error) { + if q.readerLocalOffset+q.maxBlockSize+8 > q.chunkFileSize { + // Remove the current chunk and go to the next chunk. + q.reader.MustClose() + fs.MustRemoveAll(q.readerPath) + if n := q.readerOffset % q.chunkFileSize; n > 0 { + q.readerOffset += (q.chunkFileSize - n) + } + q.readerLocalOffset = 0 + q.readerPath = q.chunkFilePath(q.readerOffset) + r, err := filestream.Open(q.readerPath, true) + if err != nil { + return dst, fmt.Errorf("cannot open chunk file %q: %s", q.readerPath, err) + } + q.reader = r + if err := q.flushMetainfo(); err != nil { + return dst, fmt.Errorf("cannot flush metainfo: %s", err) + } + } + + // Read block len. + header := headerBufPool.Get() + header.B = bytesutil.Resize(header.B, 8) + err := q.readFull(header.B) + blockLen := encoding.UnmarshalUint64(header.B) + headerBufPool.Put(header) + if err != nil { + return dst, fmt.Errorf("cannot read header with size 8 bytes from %q: %s", q.readerPath, err) + } + if blockLen > q.maxBlockSize { + return dst, fmt.Errorf("too big block size read from %q: %d bytes; cannot exceed %d bytes", q.readerPath, blockLen, q.maxBlockSize) + } + + // Read block contents. + dstLen := len(dst) + dst = bytesutil.Resize(dst, dstLen+int(blockLen)) + if err := q.readFull(dst[dstLen:]); err != nil { + return dst, fmt.Errorf("cannot read block contents with size %d bytes from %q: %s", blockLen, q.readerPath, err) + } + return dst, nil +} + +func (q *Queue) write(buf []byte) error { + bufLen := uint64(len(buf)) + n, err := q.writer.Write(buf) + if err != nil { + return err + } + if uint64(n) != bufLen { + return fmt.Errorf("unexpected number of bytes written; got %d bytes; want %d bytes", n, bufLen) + } + q.writerLocalOffset += bufLen + q.writerOffset += bufLen + return nil +} + +func (q *Queue) readFull(buf []byte) error { + bufLen := uint64(len(buf)) + if q.readerOffset+bufLen > q.writerFlushedOffset { + q.writer.MustFlush() + q.writerFlushedOffset = q.writerOffset + } + n, err := io.ReadFull(q.reader, buf) + if err != nil { + return err + } + if uint64(n) != bufLen { + return fmt.Errorf("unexpected number of bytes read; got %d bytes; want %d bytes", n, bufLen) + } + q.readerLocalOffset += bufLen + q.readerOffset += bufLen + return nil +} + +func (q *Queue) flushMetainfo() error { + mi := &metainfo{ + Name: q.name, + ReaderOffset: q.readerOffset, + WriterOffset: q.writerOffset, + } + metainfoPath := q.metainfoPath() + if err := mi.WriteToFile(metainfoPath); err != nil { + return fmt.Errorf("cannot write metainfo to %q: %s", metainfoPath, err) + } + return nil +} + +var headerBufPool bytesutil.ByteBufferPool + +type metainfo struct { + Name string + ReaderOffset uint64 + WriterOffset uint64 +} + +func (mi *metainfo) Reset() { + mi.ReaderOffset = 0 + mi.WriterOffset = 0 +} + +func (mi *metainfo) WriteToFile(path string) error { + data, err := json.Marshal(mi) + if err != nil { + return fmt.Errorf("cannot marshal persistent queue metainfo %#v: %s", mi, err) + } + if err := ioutil.WriteFile(path, data, 0600); err != nil { + return fmt.Errorf("cannot write persistent queue metainfo to %q: %s", path, err) + } + return nil +} + +func (mi *metainfo) ReadFromFile(path string) error { + mi.Reset() + data, err := ioutil.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return err + } + return fmt.Errorf("cannot read %q: %s", path, err) + } + if err := json.Unmarshal(data, mi); err != nil { + return fmt.Errorf("cannot unmarshal persistent queue metainfo from %q: %s", path, err) + } + if mi.ReaderOffset > mi.WriterOffset { + return fmt.Errorf("invalid data read from %q: readerOffset=%d cannot exceed writerOffset=%d", path, mi.ReaderOffset, mi.WriterOffset) + } + return nil +} diff --git a/lib/persistentqueue/persistentqueue_test.go b/lib/persistentqueue/persistentqueue_test.go new file mode 100644 index 000000000..75e2fbc3e --- /dev/null +++ b/lib/persistentqueue/persistentqueue_test.go @@ -0,0 +1,436 @@ +package persistentqueue + +import ( + "fmt" + "io/ioutil" + "os" + "sync" + "testing" + "time" +) + +func TestQueueOpenClose(t *testing.T) { + path := "queue-open-close" + mustDeleteDir(path) + for i := 0; i < 3; i++ { + q := MustOpen(path, "foobar") + if n := q.GetPendingBytes(); n > 0 { + t.Fatalf("pending bytes must be 0; got %d", n) + } + q.MustClose() + } + mustDeleteDir(path) +} + +func TestQueueOpen(t *testing.T) { + t.Run("invalid-metainfo", func(t *testing.T) { + path := "queue-open-invalid-metainfo" + mustCreateDir(path) + mustCreateFile(path+"/metainfo.json", "foobarbaz") + q := MustOpen(path, "foobar") + q.MustClose() + mustDeleteDir(path) + }) + t.Run("junk-files-and-dirs", func(t *testing.T) { + path := "queue-open-junk-files-and-dir" + mustCreateDir(path) + mustCreateEmptyMetainfo(path, "foobar") + mustCreateFile(path+"/junk-file", "foobar") + mustCreateDir(path + "/junk-dir") + q := MustOpen(path, "foobar") + q.MustClose() + mustDeleteDir(path) + }) + t.Run("invalid-chunk-offset", func(t *testing.T) { + path := "queue-open-invalid-chunk-offset" + mustCreateDir(path) + mustCreateEmptyMetainfo(path, "foobar") + mustCreateFile(fmt.Sprintf("%s/%016X", path, 1234), "qwere") + q := MustOpen(path, "foobar") + q.MustClose() + mustDeleteDir(path) + }) + t.Run("too-new-chunk", func(t *testing.T) { + path := "queue-open-too-new-chunk" + mustCreateDir(path) + mustCreateEmptyMetainfo(path, "foobar") + mustCreateFile(fmt.Sprintf("%s/%016X", path, 100*uint64(defaultChunkFileSize)), "asdf") + q := MustOpen(path, "foobar") + q.MustClose() + mustDeleteDir(path) + }) + t.Run("too-old-chunk", func(t *testing.T) { + path := "queue-open-too-old-chunk" + mustCreateDir(path) + mi := &metainfo{ + Name: "foobar", + ReaderOffset: defaultChunkFileSize, + WriterOffset: defaultChunkFileSize, + } + if err := mi.WriteToFile(path + "/metainfo.json"); err != nil { + t.Fatalf("unexpected error: %s", err) + } + mustCreateFile(fmt.Sprintf("%s/%016X", path, 0), "adfsfd") + q := MustOpen(path, mi.Name) + q.MustClose() + mustDeleteDir(path) + }) + t.Run("too-big-reader-offset", func(t *testing.T) { + path := "queue-open-too-big-reader-offset" + mustCreateDir(path) + mi := &metainfo{ + Name: "foobar", + ReaderOffset: defaultChunkFileSize + 123, + } + if err := mi.WriteToFile(path + "/metainfo.json"); err != nil { + t.Fatalf("unexpected error: %s", err) + } + q := MustOpen(path, mi.Name) + q.MustClose() + mustDeleteDir(path) + }) + t.Run("metainfo-dir", func(t *testing.T) { + path := "queue-open-metainfo-dir" + mustCreateDir(path) + mustCreateDir(path + "/metainfo.json") + q := MustOpen(path, "foobar") + q.MustClose() + mustDeleteDir(path) + }) + t.Run("too-small-reader-file", func(t *testing.T) { + path := "too-small-reader-file" + mustCreateDir(path) + mi := &metainfo{ + Name: "foobar", + ReaderOffset: 123, + WriterOffset: 123, + } + if err := mi.WriteToFile(path + "/metainfo.json"); err != nil { + t.Fatalf("unexpected error: %s", err) + } + mustCreateFile(fmt.Sprintf("%s/%016X", path, 0), "sdf") + q := MustOpen(path, mi.Name) + q.MustClose() + mustDeleteDir(path) + }) + t.Run("invalid-writer-file-size", func(t *testing.T) { + path := "too-small-reader-file" + mustCreateDir(path) + mustCreateEmptyMetainfo(path, "foobar") + mustCreateFile(fmt.Sprintf("%s/%016X", path, 0), "sdfdsf") + q := MustOpen(path, "foobar") + q.MustClose() + mustDeleteDir(path) + }) + t.Run("invalid-queue-name", func(t *testing.T) { + path := "invalid-queue-name" + mustCreateDir(path) + mi := &metainfo{ + Name: "foobar", + } + if err := mi.WriteToFile(path + "/metainfo.json"); err != nil { + t.Fatalf("unexpected error: %s", err) + } + mustCreateFile(fmt.Sprintf("%s/%016X", path, 0), "sdf") + q := MustOpen(path, "baz") + q.MustClose() + mustDeleteDir(path) + }) +} + +func TestQueueWriteRead(t *testing.T) { + path := "queue-write-read" + mustDeleteDir(path) + q := MustOpen(path, "foobar") + defer func() { + q.MustClose() + mustDeleteDir(path) + }() + + for j := 0; j < 5; j++ { + var blocks [][]byte + for i := 0; i < 10; i++ { + block := []byte(fmt.Sprintf("block %d+%d", j, i)) + q.MustWriteBlock(block) + blocks = append(blocks, block) + } + if n := q.GetPendingBytes(); n <= 0 { + t.Fatalf("pending bytes must be greater than 0; got %d", n) + } + var buf []byte + var ok bool + for _, block := range blocks { + buf, ok = q.MustReadBlock(buf[:0]) + if !ok { + t.Fatalf("unexpected ok=%v returned from MustReadBlock; want true", ok) + } + if string(buf) != string(block) { + t.Fatalf("unexpected block read; got %q; want %q", buf, block) + } + } + if n := q.GetPendingBytes(); n > 0 { + t.Fatalf("pending bytes must be 0; got %d", n) + } + } +} + +func TestQueueWriteCloseRead(t *testing.T) { + path := "queue-write-close-read" + mustDeleteDir(path) + q := MustOpen(path, "foobar") + defer func() { + q.MustClose() + mustDeleteDir(path) + }() + + for j := 0; j < 5; j++ { + var blocks [][]byte + for i := 0; i < 10; i++ { + block := []byte(fmt.Sprintf("block %d+%d", j, i)) + q.MustWriteBlock(block) + blocks = append(blocks, block) + } + if n := q.GetPendingBytes(); n <= 0 { + t.Fatalf("pending bytes must be greater than 0; got %d", n) + } + q.MustClose() + q = MustOpen(path, "foobar") + if n := q.GetPendingBytes(); n <= 0 { + t.Fatalf("pending bytes must be greater than 0; got %d", n) + } + var buf []byte + var ok bool + for _, block := range blocks { + buf, ok = q.MustReadBlock(buf[:0]) + if !ok { + t.Fatalf("unexpected ok=%v returned from MustReadBlock; want true", ok) + } + if string(buf) != string(block) { + t.Fatalf("unexpected block read; got %q; want %q", buf, block) + } + } + if n := q.GetPendingBytes(); n > 0 { + t.Fatalf("pending bytes must be 0; got %d", n) + } + } +} + +func TestQueueReadEmpty(t *testing.T) { + path := "queue-read-empty" + mustDeleteDir(path) + q := MustOpen(path, "foobar") + defer mustDeleteDir(path) + + resultCh := make(chan error) + go func() { + data, ok := q.MustReadBlock(nil) + var err error + if ok { + err = fmt.Errorf("unexpected ok=%v returned from MustReadBlock; want false", ok) + } else if len(data) > 0 { + err = fmt.Errorf("unexpected non-empty data returned from MustReadBlock: %q", data) + } + resultCh <- err + }() + if n := q.GetPendingBytes(); n > 0 { + t.Fatalf("pending bytes must be 0; got %d", n) + } + q.MustClose() + select { + case err := <-resultCh: + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestQueueReadWriteConcurrent(t *testing.T) { + path := "queue-read-write-concurrent" + mustDeleteDir(path) + q := MustOpen(path, "foobar") + defer mustDeleteDir(path) + + blocksMap := make(map[string]bool, 1000) + var blocksMapLock sync.Mutex + blocks := make([]string, 1000) + for i := 0; i < 1000; i++ { + block := fmt.Sprintf("block #%d", i) + blocksMap[block] = true + blocks[i] = block + } + + // Start block readers + var readersWG sync.WaitGroup + for workerID := 0; workerID < 10; workerID++ { + readersWG.Add(1) + go func() { + defer readersWG.Done() + for { + block, ok := q.MustReadBlock(nil) + if !ok { + return + } + blocksMapLock.Lock() + if !blocksMap[string(block)] { + panic(fmt.Errorf("unexpected block read: %q", block)) + } + delete(blocksMap, string(block)) + blocksMapLock.Unlock() + } + }() + } + + // Start block writers + blocksCh := make(chan string) + var writersWG sync.WaitGroup + for workerID := 0; workerID < 10; workerID++ { + writersWG.Add(1) + go func(workerID int) { + defer writersWG.Done() + for block := range blocksCh { + q.MustWriteBlock([]byte(block)) + } + }(workerID) + } + for _, block := range blocks { + blocksCh <- block + } + close(blocksCh) + + // Wait for block writers to finish + writersWG.Wait() + + // Notify readers that the queue is closed + q.MustClose() + + // Wait for block readers to finish + readersWG.Wait() + + // Read the remaining blocks in q. + q = MustOpen(path, "foobar") + defer q.MustClose() + resultCh := make(chan error) + go func() { + for len(blocksMap) > 0 { + block, ok := q.MustReadBlock(nil) + if !ok { + resultCh <- fmt.Errorf("unexpected ok=false returned from MustReadBlock") + return + } + if !blocksMap[string(block)] { + resultCh <- fmt.Errorf("unexpected block read from the queue: %q", block) + return + } + delete(blocksMap, string(block)) + } + resultCh <- nil + }() + select { + case err := <-resultCh: + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + if n := q.GetPendingBytes(); n > 0 { + t.Fatalf("pending bytes must be 0; got %d", n) + } +} + +func TestQueueChunkManagementSimple(t *testing.T) { + path := "queue-chunk-management-simple" + mustDeleteDir(path) + const chunkFileSize = 100 + const maxBlockSize = 20 + q := mustOpen(path, "foobar", chunkFileSize, maxBlockSize) + defer mustDeleteDir(path) + defer q.MustClose() + var blocks []string + for i := 0; i < 100; i++ { + block := fmt.Sprintf("block %d", i) + q.MustWriteBlock([]byte(block)) + blocks = append(blocks, block) + } + if n := q.GetPendingBytes(); n == 0 { + t.Fatalf("unexpected zero number of bytes pending") + } + for _, block := range blocks { + data, ok := q.MustReadBlock(nil) + if !ok { + t.Fatalf("unexpected ok=false") + } + if block != string(data) { + t.Fatalf("unexpected block read; got %q; want %q", data, block) + } + } + if n := q.GetPendingBytes(); n != 0 { + t.Fatalf("unexpected non-zero number of pending bytes: %d", n) + } +} + +func TestQueueChunkManagementPeriodicClose(t *testing.T) { + path := "queue-chunk-management-periodic-close" + mustDeleteDir(path) + const chunkFileSize = 100 + const maxBlockSize = 20 + q := mustOpen(path, "foobar", chunkFileSize, maxBlockSize) + defer func() { + q.MustClose() + mustDeleteDir(path) + }() + var blocks []string + for i := 0; i < 100; i++ { + block := fmt.Sprintf("block %d", i) + q.MustWriteBlock([]byte(block)) + blocks = append(blocks, block) + q.MustClose() + q = mustOpen(path, "foobar", chunkFileSize, maxBlockSize) + } + if n := q.GetPendingBytes(); n == 0 { + t.Fatalf("unexpected zero number of bytes pending") + } + for _, block := range blocks { + data, ok := q.MustReadBlock(nil) + if !ok { + t.Fatalf("unexpected ok=false") + } + if block != string(data) { + t.Fatalf("unexpected block read; got %q; want %q", data, block) + } + q.MustClose() + q = mustOpen(path, "foobar", chunkFileSize, maxBlockSize) + } + if n := q.GetPendingBytes(); n != 0 { + t.Fatalf("unexpected non-zero number of pending bytes: %d", n) + } +} + +func mustCreateFile(path, contents string) { + if err := ioutil.WriteFile(path, []byte(contents), 0600); err != nil { + panic(fmt.Errorf("cannot create file %q with %d bytes contents: %s", path, len(contents), err)) + } +} + +func mustCreateDir(path string) { + mustDeleteDir(path) + if err := os.MkdirAll(path, 0700); err != nil { + panic(fmt.Errorf("cannot create dir %q: %s", path, err)) + } +} + +func mustDeleteDir(path string) { + if err := os.RemoveAll(path); err != nil { + panic(fmt.Errorf("cannot remove dir %q: %s", path, err)) + } +} + +func mustCreateEmptyMetainfo(path, name string) { + var mi metainfo + mi.Name = name + if err := mi.WriteToFile(path + "/metainfo.json"); err != nil { + panic(fmt.Errorf("cannot create metainfo: %s", err)) + } +} diff --git a/lib/persistentqueue/persistentqueue_timing_test.go b/lib/persistentqueue/persistentqueue_timing_test.go new file mode 100644 index 000000000..853ee2125 --- /dev/null +++ b/lib/persistentqueue/persistentqueue_timing_test.go @@ -0,0 +1,69 @@ +package persistentqueue + +import ( + "fmt" + "testing" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" +) + +func BenchmarkQueueThroughputSerial(b *testing.B) { + const iterationsCount = 10 + for _, blockSize := range []int{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6} { + block := make([]byte, blockSize) + b.Run(fmt.Sprintf("block-size-%d", blockSize), func(b *testing.B) { + b.ReportAllocs() + b.SetBytes(int64(blockSize) * iterationsCount) + path := fmt.Sprintf("bench-queue-throughput-serial-%d", blockSize) + mustDeleteDir(path) + q := MustOpen(path, "foobar") + defer func() { + q.MustClose() + mustDeleteDir(path) + }() + for i := 0; i < b.N; i++ { + writeReadIteration(q, block, iterationsCount) + } + }) + } +} + +func BenchmarkQueueThroughputConcurrent(b *testing.B) { + const iterationsCount = 10 + for _, blockSize := range []int{1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6} { + block := make([]byte, blockSize) + b.Run(fmt.Sprintf("block-size-%d", blockSize), func(b *testing.B) { + b.ReportAllocs() + b.SetBytes(int64(blockSize) * iterationsCount) + path := fmt.Sprintf("bench-queue-throughput-concurrent-%d", blockSize) + mustDeleteDir(path) + q := MustOpen(path, "foobar") + defer func() { + q.MustClose() + mustDeleteDir(path) + }() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + writeReadIteration(q, block, iterationsCount) + } + }) + }) + } +} + +func writeReadIteration(q *Queue, block []byte, iterationsCount int) { + for i := 0; i < iterationsCount; i++ { + q.MustWriteBlock(block) + } + var ok bool + bb := bbPool.Get() + for i := 0; i < iterationsCount; i++ { + bb.B, ok = q.MustReadBlock(bb.B[:0]) + if !ok { + panic(fmt.Errorf("unexpected ok=false")) + } + } + bbPool.Put(bb) +} + +var bbPool bytesutil.ByteBufferPool diff --git a/lib/prompbmarshal/remote.pb.go b/lib/prompbmarshal/remote.pb.go new file mode 100644 index 000000000..6d8c29bd2 --- /dev/null +++ b/lib/prompbmarshal/remote.pb.go @@ -0,0 +1,79 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: remote.proto + +package prompbmarshal + +import ( + math_bits "math/bits" +) + +type WriteRequest struct { + Timeseries []TimeSeries `protobuf:"bytes,1,rep,name=timeseries,proto3" json:"timeseries"` +} + +func (m *WriteRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Timeseries) > 0 { + for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Timeseries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRemote(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintRemote(dAtA []byte, offset int, v uint64) int { + offset -= sovRemote(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *WriteRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Timeseries) > 0 { + for _, e := range m.Timeseries { + l = e.Size() + n += 1 + l + sovRemote(uint64(l)) + } + } + return n +} + +func sovRemote(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} diff --git a/lib/prompbmarshal/remote.proto b/lib/prompbmarshal/remote.proto new file mode 100644 index 000000000..5f82182ed --- /dev/null +++ b/lib/prompbmarshal/remote.proto @@ -0,0 +1,82 @@ +// Copyright 2016 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package prometheus; + +option go_package = "prompbmarshal"; + +import "types.proto"; +import "gogoproto/gogo.proto"; + +message WriteRequest { + repeated prometheus.TimeSeries timeseries = 1 [(gogoproto.nullable) = false]; +} + +// ReadRequest represents a remote read request. +message ReadRequest { + repeated Query queries = 1; + + enum ResponseType { + // Server will return a single ReadResponse message with matched series that includes list of raw samples. + // It's recommended to use streamed response types instead. + // + // Response headers: + // Content-Type: "application/x-protobuf" + // Content-Encoding: "snappy" + SAMPLES = 0; + // Server will stream a delimited ChunkedReadResponse message that contains XOR encoded chunks for a single series. + // Each message is following varint size and fixed size bigendian uint32 for CRC32 Castagnoli checksum. + // + // Response headers: + // Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse" + // Content-Encoding: "" + STREAMED_XOR_CHUNKS = 1; + } + + // accepted_response_types allows negotiating the content type of the response. + // + // Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is + // implemented by server, error is returned. + // For request that do not contain `accepted_response_types` field the SAMPLES response type will be used. + repeated ResponseType accepted_response_types = 2; +} + +// ReadResponse is a response when response_type equals SAMPLES. +message ReadResponse { + // In same order as the request's queries. + repeated QueryResult results = 1; +} + +message Query { + int64 start_timestamp_ms = 1; + int64 end_timestamp_ms = 2; + repeated prometheus.LabelMatcher matchers = 3; + prometheus.ReadHints hints = 4; +} + +message QueryResult { + // Samples within a time series must be ordered by time. + repeated prometheus.TimeSeries timeseries = 1; +} + +// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS. +// We strictly stream full series after series, optionally split by time. This means that a single frame can contain +// partition of the single series, but once a new series is started to be streamed it means that no more chunks will +// be sent for previous one. Series are returned sorted in the same way TSDB block are internally. +message ChunkedReadResponse { + repeated prometheus.ChunkedSeries chunked_series = 1; + + // query_index represents an index of the query from ReadRequest.queries these chunks relates to. + int64 query_index = 2; +} diff --git a/lib/prompbmarshal/types.pb.go b/lib/prompbmarshal/types.pb.go new file mode 100644 index 000000000..423535339 --- /dev/null +++ b/lib/prompbmarshal/types.pb.go @@ -0,0 +1,216 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: types.proto + +package prompbmarshal + +import ( + encoding_binary "encoding/binary" + math "math" + math_bits "math/bits" +) + +type Sample struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +// TimeSeries represents samples and labels for a single time series. +type TimeSeries struct { + Labels []Label `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels"` + Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` +} + +type Label struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Sample) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sample) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Timestamp != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *TimeSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Samples) > 0 { + for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Labels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Label) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Label) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Label) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + offset -= sovTypes(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Sample) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.Timestamp != 0 { + n += 1 + sovTypes(uint64(m.Timestamp)) + } + return n +} + +func (m *TimeSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Samples) > 0 { + for _, e := range m.Samples { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *Label) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} diff --git a/lib/prompbmarshal/types.proto b/lib/prompbmarshal/types.proto new file mode 100644 index 000000000..0d047b8c6 --- /dev/null +++ b/lib/prompbmarshal/types.proto @@ -0,0 +1,85 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; +package prometheus; + +option go_package = "prompbmarshal"; + +import "gogoproto/gogo.proto"; + +message Sample { + double value = 1; + int64 timestamp = 2; +} + +// TimeSeries represents samples and labels for a single time series. +message TimeSeries { + repeated Label labels = 1 [(gogoproto.nullable) = false]; + repeated Sample samples = 2 [(gogoproto.nullable) = false]; +} + +message Label { + string name = 1; + string value = 2; +} + +message Labels { + repeated Label labels = 1 [(gogoproto.nullable) = false]; +} + +// Matcher specifies a rule, which can match or set of labels or not. +message LabelMatcher { + enum Type { + EQ = 0; + NEQ = 1; + RE = 2; + NRE = 3; + } + Type type = 1; + string name = 2; + string value = 3; +} + +message ReadHints { + int64 step_ms = 1; // Query step size in milliseconds. + string func = 2; // String representation of surrounding function or aggregation. + int64 start_ms = 3; // Start time in milliseconds. + int64 end_ms = 4; // End time in milliseconds. + repeated string grouping = 5; // List of label names used in aggregation. + bool by = 6; // Indicate whether it is without or by. + int64 range_ms = 7; // Range vector selector range in milliseconds. +} + +// Chunk represents a TSDB chunk. +// Time range [min, max] is inclusive. +message Chunk { + int64 min_time_ms = 1; + int64 max_time_ms = 2; + + // We require this to match chunkenc.Encoding. + enum Encoding { + UNKNOWN = 0; + XOR = 1; + } + Encoding type = 3; + bytes data = 4; +} + +// ChunkedSeries represents single, encoded time series. +message ChunkedSeries { + // Labels should be sorted. + repeated Label labels = 1 [(gogoproto.nullable) = false]; + // Chunks will be in start time order and may overlap. + repeated Chunk chunks = 2 [(gogoproto.nullable) = false]; +} diff --git a/lib/prompbmarshal/util.go b/lib/prompbmarshal/util.go new file mode 100644 index 000000000..72d45e954 --- /dev/null +++ b/lib/prompbmarshal/util.go @@ -0,0 +1,30 @@ +package prompbmarshal + +import ( + "fmt" +) + +// MarshalWriteRequest marshals wr to dst and returns the result. +func MarshalWriteRequest(dst []byte, wr *WriteRequest) []byte { + size := wr.Size() + dstLen := len(dst) + if n := size - (cap(dst) - dstLen); n > 0 { + dst = append(dst[:cap(dst)], make([]byte, n)...) + } + dst = dst[:dstLen+size] + n, err := wr.MarshalToSizedBuffer(dst[dstLen:]) + if err != nil { + panic(fmt.Errorf("BUG: unexpected error when marshaling WriteRequest: %s", err)) + } + return dst[:dstLen+n] +} + +// ResetWriteRequest resets wr. +func ResetWriteRequest(wr *WriteRequest) { + for i := range wr.Timeseries { + ts := wr.Timeseries[i] + ts.Labels = nil + ts.Samples = nil + } + wr.Timeseries = wr.Timeseries[:0] +} diff --git a/lib/promrelabel/config.go b/lib/promrelabel/config.go new file mode 100644 index 000000000..f0df2073b --- /dev/null +++ b/lib/promrelabel/config.go @@ -0,0 +1,129 @@ +package promrelabel + +import ( + "fmt" + "io/ioutil" + "regexp" + + "gopkg.in/yaml.v2" +) + +// RelabelConfig represents relabel config. +// +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config +type RelabelConfig struct { + SourceLabels []string `yaml:"source_labels"` + Separator *string `yaml:"separator"` + TargetLabel string `yaml:"target_label"` + Regex *string `yaml:"regex"` + Modulus uint64 `yaml:"modulus"` + Replacement *string `yaml:"replacement"` + Action string `yaml:"action"` +} + +// LoadRelabelConfigs loads relabel configs from the given path. +func LoadRelabelConfigs(path string) ([]ParsedRelabelConfig, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("cannot read `relabel_configs` from %q: %s", path, err) + } + var rcs []RelabelConfig + if err := yaml.Unmarshal(data, &rcs); err != nil { + return nil, fmt.Errorf("cannot unmarshal `relabel_configs` from %q: %s", path, err) + } + return ParseRelabelConfigs(nil, rcs) +} + +// ParseRelabelConfigs parses rcs to dst. +func ParseRelabelConfigs(dst []ParsedRelabelConfig, rcs []RelabelConfig) ([]ParsedRelabelConfig, error) { + if len(rcs) == 0 { + return dst, nil + } + for i := range rcs { + var err error + dst, err = parseRelabelConfig(dst, &rcs[i]) + if err != nil { + return dst, fmt.Errorf("error when parsing `relabel_config` #%d: %s", i+1, err) + } + } + return dst, nil +} + +var defaultRegexForRelabelConfig = regexp.MustCompile("^(.*)$") + +func parseRelabelConfig(dst []ParsedRelabelConfig, rc *RelabelConfig) ([]ParsedRelabelConfig, error) { + sourceLabels := rc.SourceLabels + separator := ";" + if rc.Separator != nil { + separator = *rc.Separator + } + targetLabel := rc.TargetLabel + regexCompiled := defaultRegexForRelabelConfig + if rc.Regex != nil { + regex := *rc.Regex + if rc.Action != "replace_all" && rc.Action != "labelmap_all" { + regex = "^" + *rc.Regex + "$" + } + re, err := regexp.Compile(regex) + if err != nil { + return dst, fmt.Errorf("cannot parse `regex` %q: %s", regex, err) + } + regexCompiled = re + } + modulus := rc.Modulus + replacement := "$1" + if rc.Replacement != nil { + replacement = *rc.Replacement + } + action := rc.Action + if action == "" { + action = "replace" + } + switch action { + case "replace": + if targetLabel == "" { + return dst, fmt.Errorf("missing `target_label` for `action=replace`") + } + case "replace_all": + if len(sourceLabels) == 0 { + return dst, fmt.Errorf("missing `source_labels` for `action=replace_all`") + } + if targetLabel == "" { + return dst, fmt.Errorf("missing `target_label` for `action=replace`") + } + case "keep": + if len(sourceLabels) == 0 { + return dst, fmt.Errorf("missing `source_labels` for `action=keep`") + } + case "drop": + if len(sourceLabels) == 0 { + return dst, fmt.Errorf("missing `source_labels` for `action=drop`") + } + case "hashmod": + if len(sourceLabels) == 0 { + return dst, fmt.Errorf("missing `source_labels` for `action=hashmod`") + } + if targetLabel == "" { + return dst, fmt.Errorf("missing `target_label` for `action=hashmod`") + } + if modulus < 1 { + return dst, fmt.Errorf("unexpected `modulus` for `action=hashmod`: %d; must be greater than 0", modulus) + } + case "labelmap": + case "labelmap_all": + case "labeldrop": + case "labelkeep": + default: + return dst, fmt.Errorf("unknown `action` %q", action) + } + dst = append(dst, ParsedRelabelConfig{ + SourceLabels: sourceLabels, + Separator: separator, + TargetLabel: targetLabel, + Regex: regexCompiled, + Modulus: modulus, + Replacement: replacement, + Action: action, + }) + return dst, nil +} diff --git a/lib/promrelabel/config_test.go b/lib/promrelabel/config_test.go new file mode 100644 index 000000000..7c38e1540 --- /dev/null +++ b/lib/promrelabel/config_test.go @@ -0,0 +1,163 @@ +package promrelabel + +import ( + "reflect" + "testing" +) + +func TestLoadRelabelConfigsSuccess(t *testing.T) { + path := "testdata/relabel_configs_valid.yml" + prcs, err := LoadRelabelConfigs(path) + if err != nil { + t.Fatalf("cannot load relabel configs from %q: %s", path, err) + } + if len(prcs) != 7 { + t.Fatalf("unexpected number of relabel configs loaded from %q; got %d; want %d", path, len(prcs), 7) + } +} + +func TestLoadRelabelConfigsFailure(t *testing.T) { + f := func(path string) { + t.Helper() + rcs, err := LoadRelabelConfigs(path) + if err == nil { + t.Fatalf("expecting non-nil error") + } + if len(rcs) != 0 { + t.Fatalf("unexpected non-empty rcs: %#v", rcs) + } + } + t.Run("non-existing-file", func(t *testing.T) { + f("testdata/non-exsiting-file") + }) + t.Run("invalid-file", func(t *testing.T) { + f("testdata/invalid_config.yml") + }) +} + +func TestParseRelabelConfigsSuccess(t *testing.T) { + f := func(rcs []RelabelConfig, prcsExpected []ParsedRelabelConfig) { + t.Helper() + prcs, err := ParseRelabelConfigs(nil, rcs) + if err != nil { + t.Fatalf("unexected error: %s", err) + } + if !reflect.DeepEqual(prcs, prcsExpected) { + t.Fatalf("unexpected prcs; got\n%#v\nwant\n%#v", prcs, prcsExpected) + } + } + f(nil, nil) + f([]RelabelConfig{ + { + SourceLabels: []string{"foo", "bar"}, + TargetLabel: "xxx", + }, + }, []ParsedRelabelConfig{ + { + SourceLabels: []string{"foo", "bar"}, + Separator: ";", + TargetLabel: "xxx", + Regex: defaultRegexForRelabelConfig, + Replacement: "$1", + Action: "replace", + }, + }) +} + +func TestParseRelabelConfigsFailure(t *testing.T) { + f := func(rcs []RelabelConfig) { + t.Helper() + prcs, err := ParseRelabelConfigs(nil, rcs) + if err == nil { + t.Fatalf("expecting non-nil error") + } + if len(prcs) > 0 { + t.Fatalf("unexpected non-empty prcs: %#v", prcs) + } + } + t.Run("invalid-regex", func(t *testing.T) { + f([]RelabelConfig{ + { + SourceLabels: []string{"aaa"}, + TargetLabel: "xxx", + Regex: strPtr("foo[bar"), + }, + }) + }) + t.Run("replace-missing-target-label", func(t *testing.T) { + f([]RelabelConfig{ + { + Action: "replace", + SourceLabels: []string{"foo"}, + }, + }) + }) + t.Run("replace_all-missing-source-labels", func(t *testing.T) { + f([]RelabelConfig{ + { + Action: "replace_all", + TargetLabel: "xxx", + }, + }) + }) + t.Run("replace_all-missing-target-label", func(t *testing.T) { + f([]RelabelConfig{ + { + Action: "replace_all", + SourceLabels: []string{"foo"}, + }, + }) + }) + t.Run("keep-missing-source-labels", func(t *testing.T) { + f([]RelabelConfig{ + { + Action: "keep", + }, + }) + }) + t.Run("drop-missing-source-labels", func(t *testing.T) { + f([]RelabelConfig{ + { + Action: "drop", + }, + }) + }) + t.Run("hashmod-missing-source-labels", func(t *testing.T) { + f([]RelabelConfig{ + { + Action: "hashmod", + TargetLabel: "aaa", + Modulus: 123, + }, + }) + }) + t.Run("hashmod-missing-target-label", func(t *testing.T) { + f([]RelabelConfig{ + { + Action: "hashmod", + SourceLabels: []string{"aaa"}, + Modulus: 123, + }, + }) + }) + t.Run("hashmod-missing-modulus", func(t *testing.T) { + f([]RelabelConfig{ + { + Action: "hashmod", + SourceLabels: []string{"aaa"}, + TargetLabel: "xxx", + }, + }) + }) + t.Run("invalid-action", func(t *testing.T) { + f([]RelabelConfig{ + { + Action: "invalid-action", + }, + }) + }) +} + +func strPtr(s string) *string { + return &s +} diff --git a/lib/promrelabel/relabel.go b/lib/promrelabel/relabel.go new file mode 100644 index 000000000..960eb69e7 --- /dev/null +++ b/lib/promrelabel/relabel.go @@ -0,0 +1,268 @@ +package promrelabel + +import ( + "regexp" + "strconv" + "strings" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + xxhash "github.com/cespare/xxhash/v2" +) + +// ParsedRelabelConfig contains parsed `relabel_config`. +// +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config +type ParsedRelabelConfig struct { + SourceLabels []string + Separator string + TargetLabel string + Regex *regexp.Regexp + Modulus uint64 + Replacement string + Action string +} + +// ApplyRelabelConfigs applies prcs to labels starting from the labelsOffset. +// +// If isFinalize is set, then FinalizeLabels is called on the labels[labelsOffset:]. +// +// The returned labels at labels[labelsOffset:] are sorted. +func ApplyRelabelConfigs(labels []prompbmarshal.Label, labelsOffset int, prcs []ParsedRelabelConfig, isFinalize bool) []prompbmarshal.Label { + for i := range prcs { + tmp := applyRelabelConfig(labels, labelsOffset, &prcs[i]) + if len(tmp) == labelsOffset { + // All the labels have been removed. + return tmp + } + labels = tmp + } + labels = removeEmptyLabels(labels, labelsOffset) + if isFinalize { + labels = FinalizeLabels(labels[:labelsOffset], labels[labelsOffset:]) + } + SortLabels(labels[labelsOffset:]) + return labels +} + +func removeEmptyLabels(labels []prompbmarshal.Label, labelsOffset int) []prompbmarshal.Label { + src := labels[labelsOffset:] + needsRemoval := false + for i := range src { + label := &src[i] + if label.Name == "" || label.Value == "" { + needsRemoval = true + break + } + } + if !needsRemoval { + return labels + } + dst := labels[:labelsOffset] + for i := range src { + label := &src[i] + if label.Name != "" && label.Value != "" { + dst = append(dst, *label) + } + } + return dst +} + +// FinalizeLabels finalizes labels according to relabel_config rules. +// +// It renames `__address__` to `instance` and removes labels with "__" in the beginning. +// +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config +func FinalizeLabels(dst, src []prompbmarshal.Label) []prompbmarshal.Label { + for i := range src { + label := &src[i] + name := label.Name + if !strings.HasPrefix(name, "__") || name == "__name__" { + dst = append(dst, *label) + continue + } + if name == "__address__" { + if GetLabelByName(src, "instance") != nil { + // The `instance` label is already set. Skip `__address__` label. + continue + } + // Rename `__address__` label to `instance`. + labelCopy := *label + labelCopy.Name = "instance" + dst = append(dst, labelCopy) + } + } + return dst +} + +// applyRelabelConfig applies relabeling according to cfg. +// +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config +func applyRelabelConfig(labels []prompbmarshal.Label, labelsOffset int, cfg *ParsedRelabelConfig) []prompbmarshal.Label { + src := labels[labelsOffset:] + switch cfg.Action { + case "replace": + bb := relabelBufPool.Get() + bb.B = concatLabelValues(bb.B[:0], src, cfg.SourceLabels, cfg.Separator) + if len(bb.B) == 0 && cfg.Regex == defaultRegexForRelabelConfig && !strings.Contains(cfg.Replacement, "$") { + // Fast path for the following rule that just sets label value: + // - target_label: foobar + // replacement: something-here + relabelBufPool.Put(bb) + return setLabelValue(labels, labelsOffset, cfg.TargetLabel, cfg.Replacement) + } + match := cfg.Regex.FindSubmatchIndex(bb.B) + if match == nil { + // Fast path - nothing to replace. + relabelBufPool.Put(bb) + return labels + } + sourceStr := bytesutil.ToUnsafeString(bb.B) + value := relabelBufPool.Get() + value.B = cfg.Regex.ExpandString(value.B[:0], cfg.Replacement, sourceStr, match) + relabelBufPool.Put(bb) + valueStr := string(value.B) + relabelBufPool.Put(value) + return setLabelValue(labels, labelsOffset, cfg.TargetLabel, valueStr) + case "replace_all": + bb := relabelBufPool.Get() + bb.B = concatLabelValues(bb.B[:0], src, cfg.SourceLabels, cfg.Separator) + if !cfg.Regex.Match(bb.B) { + // Fast path - nothing to replace. + relabelBufPool.Put(bb) + return labels + } + sourceStr := string(bb.B) // Make a copy of bb, since it can be returned from ReplaceAllString + relabelBufPool.Put(bb) + valueStr := cfg.Regex.ReplaceAllString(sourceStr, cfg.Replacement) + return setLabelValue(labels, labelsOffset, cfg.TargetLabel, valueStr) + case "keep": + bb := relabelBufPool.Get() + bb.B = concatLabelValues(bb.B[:0], src, cfg.SourceLabels, cfg.Separator) + keep := cfg.Regex.Match(bb.B) + relabelBufPool.Put(bb) + if !keep { + return labels[:labelsOffset] + } + return labels + case "drop": + bb := relabelBufPool.Get() + bb.B = concatLabelValues(bb.B[:0], src, cfg.SourceLabels, cfg.Separator) + drop := cfg.Regex.Match(bb.B) + relabelBufPool.Put(bb) + if drop { + return labels[:labelsOffset] + } + return labels + case "hashmod": + bb := relabelBufPool.Get() + bb.B = concatLabelValues(bb.B[:0], src, cfg.SourceLabels, cfg.Separator) + h := xxhash.Sum64(bb.B) % cfg.Modulus + value := strconv.Itoa(int(h)) + relabelBufPool.Put(bb) + return setLabelValue(labels, labelsOffset, cfg.TargetLabel, value) + case "labelmap": + for i := range src { + label := &src[i] + match := cfg.Regex.FindStringSubmatchIndex(label.Name) + if match == nil { + continue + } + value := relabelBufPool.Get() + value.B = cfg.Regex.ExpandString(value.B[:0], cfg.Replacement, label.Name, match) + label.Name = string(value.B) + relabelBufPool.Put(value) + } + return labels + case "labelmap_all": + for i := range src { + label := &src[i] + if !cfg.Regex.MatchString(label.Name) { + continue + } + label.Name = cfg.Regex.ReplaceAllString(label.Name, cfg.Replacement) + } + return labels + case "labeldrop": + keepSrc := true + for i := range src { + if cfg.Regex.MatchString(src[i].Name) { + keepSrc = false + break + } + } + if keepSrc { + return labels + } + dst := labels[:labelsOffset] + for i := range src { + label := &src[i] + if !cfg.Regex.MatchString(label.Name) { + dst = append(dst, *label) + } + } + return dst + case "labelkeep": + keepSrc := true + for i := range src { + if !cfg.Regex.MatchString(src[i].Name) { + keepSrc = false + break + } + } + if keepSrc { + return labels + } + dst := labels[:labelsOffset] + for i := range src { + label := &src[i] + if cfg.Regex.MatchString(label.Name) { + dst = append(dst, *label) + } + } + return dst + default: + logger.Panicf("BUG: unknown `action`: %q", cfg.Action) + return labels + } +} + +var relabelBufPool bytesutil.ByteBufferPool + +func concatLabelValues(dst []byte, labels []prompbmarshal.Label, labelNames []string, separator string) []byte { + if len(labelNames) == 0 { + return dst + } + for _, labelName := range labelNames { + label := GetLabelByName(labels, labelName) + if label != nil { + dst = append(dst, label.Value...) + } + dst = append(dst, separator...) + } + return dst[:len(dst)-len(separator)] +} + +func setLabelValue(labels []prompbmarshal.Label, labelsOffset int, name, value string) []prompbmarshal.Label { + if label := GetLabelByName(labels[labelsOffset:], name); label != nil { + label.Value = value + return labels + } + labels = append(labels, prompbmarshal.Label{ + Name: name, + Value: value, + }) + return labels +} + +// GetLabelByName returns label with the given name from labels. +func GetLabelByName(labels []prompbmarshal.Label, name string) *prompbmarshal.Label { + for i := range labels { + label := &labels[i] + if label.Name == name { + return label + } + } + return nil +} diff --git a/lib/promrelabel/relabel_test.go b/lib/promrelabel/relabel_test.go new file mode 100644 index 000000000..46037aa16 --- /dev/null +++ b/lib/promrelabel/relabel_test.go @@ -0,0 +1,630 @@ +package promrelabel + +import ( + "reflect" + "regexp" + "testing" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" +) + +func TestApplyRelabelConfigs(t *testing.T) { + f := func(prcs []ParsedRelabelConfig, labels []prompbmarshal.Label, isFinalize bool, resultExpected []prompbmarshal.Label) { + t.Helper() + result := ApplyRelabelConfigs(labels, 0, prcs, isFinalize) + if !reflect.DeepEqual(result, resultExpected) { + t.Fatalf("unexpected result; got\n%v\nwant\n%v", result, resultExpected) + } + } + t.Run("empty_replabel_configs", func(t *testing.T) { + f(nil, nil, false, nil) + f(nil, nil, true, nil) + f(nil, []prompbmarshal.Label{ + { + Name: "foo", + Value: "bar", + }, + }, false, []prompbmarshal.Label{ + { + Name: "foo", + Value: "bar", + }, + }) + f(nil, []prompbmarshal.Label{ + { + Name: "foo", + Value: "bar", + }, + { + Name: "__name__", + Value: "xxx", + }, + { + Name: "__aaa", + Value: "yyy", + }, + }, true, []prompbmarshal.Label{ + { + Name: "__name__", + Value: "xxx", + }, + { + Name: "foo", + Value: "bar", + }, + }) + }) + t.Run("replace-miss", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "replace", + TargetLabel: "bar", + Regex: defaultRegexForRelabelConfig, + Replacement: "$1", + }, + }, nil, false, []prompbmarshal.Label{}) + f([]ParsedRelabelConfig{ + { + Action: "replace", + SourceLabels: []string{"foo"}, + TargetLabel: "bar", + Regex: defaultRegexForRelabelConfig, + Replacement: "$1", + }, + }, nil, false, []prompbmarshal.Label{}) + f([]ParsedRelabelConfig{ + { + Action: "replace", + SourceLabels: []string{"foo"}, + TargetLabel: "bar", + Regex: defaultRegexForRelabelConfig, + Replacement: "$1", + }, + }, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + }, false, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + }) + f([]ParsedRelabelConfig{ + { + Action: "replace", + SourceLabels: []string{"foo"}, + TargetLabel: "bar", + Regex: regexp.MustCompile(".+"), + Replacement: "$1", + }, + }, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + }, false, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + }) + }) + t.Run("replace-hit", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "replace", + SourceLabels: []string{"xxx", "foo"}, + Separator: ";", + TargetLabel: "bar", + Regex: defaultRegexForRelabelConfig, + Replacement: "a-$1-b", + }, + }, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + }, false, []prompbmarshal.Label{ + { + Name: "bar", + Value: "a-yyy;-b", + }, + { + Name: "xxx", + Value: "yyy", + }, + }) + }) + t.Run("replace_all-miss", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "replace_all", + TargetLabel: "bar", + Regex: defaultRegexForRelabelConfig, + Replacement: "$1", + }, + }, nil, false, []prompbmarshal.Label{}) + f([]ParsedRelabelConfig{ + { + Action: "replace_all", + SourceLabels: []string{"foo"}, + TargetLabel: "bar", + Regex: defaultRegexForRelabelConfig, + Replacement: "$1", + }, + }, nil, false, []prompbmarshal.Label{}) + f([]ParsedRelabelConfig{ + { + Action: "replace_all", + SourceLabels: []string{"foo"}, + TargetLabel: "bar", + Regex: defaultRegexForRelabelConfig, + Replacement: "$1", + }, + }, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + }, false, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + }) + f([]ParsedRelabelConfig{ + { + Action: "replace_all", + SourceLabels: []string{"foo"}, + TargetLabel: "bar", + Regex: regexp.MustCompile(".+"), + Replacement: "$1", + }, + }, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + }, false, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + }) + }) + t.Run("replace_all-hit", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "replace_all", + SourceLabels: []string{"xxx", "foo"}, + Separator: ";", + TargetLabel: "xxx", + Regex: regexp.MustCompile("(;)"), + Replacement: "-$1-", + }, + }, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "y;y", + }, + }, false, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "y-;-y-;-", + }, + }) + }) + t.Run("replace-add-multi-labels", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "replace", + SourceLabels: []string{"xxx"}, + TargetLabel: "bar", + Regex: defaultRegexForRelabelConfig, + Replacement: "a-$1", + }, + { + Action: "replace", + SourceLabels: []string{"bar"}, + TargetLabel: "zar", + Regex: defaultRegexForRelabelConfig, + Replacement: "b-$1", + }, + }, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + { + Name: "__address__", + Value: "a.bc", + }, + }, true, []prompbmarshal.Label{ + { + Name: "bar", + Value: "a-yyy", + }, + { + Name: "instance", + Value: "a.bc", + }, + { + Name: "xxx", + Value: "yyy", + }, + { + Name: "zar", + Value: "b-a-yyy", + }, + }) + }) + t.Run("replace-self", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "replace", + SourceLabels: []string{"foo"}, + TargetLabel: "foo", + Regex: defaultRegexForRelabelConfig, + Replacement: "a-$1", + }, + }, []prompbmarshal.Label{ + { + Name: "foo", + Value: "aaxx", + }, + }, true, []prompbmarshal.Label{ + { + Name: "foo", + Value: "a-aaxx", + }, + }) + }) + t.Run("replace-missing-source", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "replace", + TargetLabel: "foo", + Regex: defaultRegexForRelabelConfig, + Replacement: "foobar", + }, + }, []prompbmarshal.Label{}, true, []prompbmarshal.Label{ + { + Name: "foo", + Value: "foobar", + }, + }) + }) + t.Run("keep-miss", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "keep", + SourceLabels: []string{"foo"}, + Regex: regexp.MustCompile(".+"), + }, + }, nil, true, nil) + f([]ParsedRelabelConfig{ + { + Action: "keep", + SourceLabels: []string{"foo"}, + Regex: regexp.MustCompile(".+"), + }, + }, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + }, true, []prompbmarshal.Label{}) + }) + t.Run("keep-hit", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "keep", + SourceLabels: []string{"foo"}, + Regex: regexp.MustCompile(".+"), + }, + }, []prompbmarshal.Label{ + { + Name: "foo", + Value: "yyy", + }, + }, false, []prompbmarshal.Label{ + { + Name: "foo", + Value: "yyy", + }, + }) + }) + t.Run("drop-miss", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "drop", + SourceLabels: []string{"foo"}, + Regex: regexp.MustCompile(".+"), + }, + }, nil, false, nil) + f([]ParsedRelabelConfig{ + { + Action: "drop", + SourceLabels: []string{"foo"}, + Regex: regexp.MustCompile(".+"), + }, + }, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + }, true, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + }) + }) + t.Run("drop-hit", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "drop", + SourceLabels: []string{"foo"}, + Regex: regexp.MustCompile(".+"), + }, + }, []prompbmarshal.Label{ + { + Name: "foo", + Value: "yyy", + }, + }, true, []prompbmarshal.Label{}) + }) + t.Run("hashmod-miss", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "hashmod", + SourceLabels: []string{"foo"}, + TargetLabel: "aaa", + Modulus: 123, + }, + }, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + }, false, []prompbmarshal.Label{ + { + Name: "aaa", + Value: "81", + }, + { + Name: "xxx", + Value: "yyy", + }, + }) + }) + t.Run("hashmod-hit", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "hashmod", + SourceLabels: []string{"foo"}, + TargetLabel: "aaa", + Modulus: 123, + }, + }, []prompbmarshal.Label{ + { + Name: "foo", + Value: "yyy", + }, + }, true, []prompbmarshal.Label{ + { + Name: "aaa", + Value: "73", + }, + { + Name: "foo", + Value: "yyy", + }, + }) + }) + t.Run("labelmap", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "labelmap", + Regex: regexp.MustCompile("foo(.+)"), + Replacement: "$1-x", + }, + }, []prompbmarshal.Label{ + { + Name: "foo", + Value: "yyy", + }, + { + Name: "foobar", + Value: "aaa", + }, + }, true, []prompbmarshal.Label{ + { + Name: "bar-x", + Value: "aaa", + }, + { + Name: "foo", + Value: "yyy", + }, + }) + }) + t.Run("labelmap_all", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "labelmap_all", + Regex: regexp.MustCompile(`\.`), + Replacement: "-", + }, + }, []prompbmarshal.Label{ + { + Name: "foo.bar.baz", + Value: "yyy", + }, + { + Name: "foobar", + Value: "aaa", + }, + }, true, []prompbmarshal.Label{ + { + Name: "foo-bar-baz", + Value: "yyy", + }, + { + Name: "foobar", + Value: "aaa", + }, + }) + }) + t.Run("labeldrop", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "labeldrop", + Regex: regexp.MustCompile("dropme.*"), + }, + }, []prompbmarshal.Label{ + { + Name: "aaa", + Value: "bbb", + }, + }, true, []prompbmarshal.Label{ + { + Name: "aaa", + Value: "bbb", + }, + }) + f([]ParsedRelabelConfig{ + { + Action: "labeldrop", + Regex: regexp.MustCompile("dropme.*"), + }, + }, []prompbmarshal.Label{ + { + Name: "xxx", + Value: "yyy", + }, + { + Name: "dropme-please", + Value: "aaa", + }, + { + Name: "foo", + Value: "bar", + }, + }, false, []prompbmarshal.Label{ + { + Name: "foo", + Value: "bar", + }, + { + Name: "xxx", + Value: "yyy", + }, + }) + }) + t.Run("labelkeep", func(t *testing.T) { + f([]ParsedRelabelConfig{ + { + Action: "labelkeep", + Regex: regexp.MustCompile("keepme.*"), + }, + }, []prompbmarshal.Label{ + { + Name: "keepme", + Value: "aaa", + }, + }, true, []prompbmarshal.Label{ + { + Name: "keepme", + Value: "aaa", + }, + }) + f([]ParsedRelabelConfig{ + { + Action: "labelkeep", + Regex: regexp.MustCompile("keepme.*"), + }, + }, []prompbmarshal.Label{ + { + Name: "keepme", + Value: "aaa", + }, + { + Name: "aaaa", + Value: "awef", + }, + { + Name: "keepme-aaa", + Value: "234", + }, + }, false, []prompbmarshal.Label{ + { + Name: "keepme", + Value: "aaa", + }, + { + Name: "keepme-aaa", + Value: "234", + }, + }) + }) +} + +func TestFinalizeLabels(t *testing.T) { + f := func(labels, resultExpected []prompbmarshal.Label) { + t.Helper() + result := FinalizeLabels(nil, labels) + if !reflect.DeepEqual(result, resultExpected) { + t.Fatalf("unexpected result; got\n%v\nwant\n%v", result, resultExpected) + } + } + f(nil, nil) + f([]prompbmarshal.Label{ + { + Name: "foo", + Value: "bar", + }, + { + Name: "__aaa", + Value: "ass", + }, + { + Name: "__address__", + Value: "foo.com", + }, + }, []prompbmarshal.Label{ + { + Name: "foo", + Value: "bar", + }, + { + Name: "instance", + Value: "foo.com", + }, + }) + f([]prompbmarshal.Label{ + { + Name: "foo", + Value: "bar", + }, + { + Name: "instance", + Value: "ass", + }, + { + Name: "__address__", + Value: "foo.com", + }, + }, []prompbmarshal.Label{ + { + Name: "foo", + Value: "bar", + }, + { + Name: "instance", + Value: "ass", + }, + }) +} diff --git a/lib/promrelabel/relabel_timing_test.go b/lib/promrelabel/relabel_timing_test.go new file mode 100644 index 000000000..c16a3a113 --- /dev/null +++ b/lib/promrelabel/relabel_timing_test.go @@ -0,0 +1,567 @@ +package promrelabel + +import ( + "fmt" + "regexp" + "testing" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" +) + +func BenchmarkApplyRelabelConfigs(b *testing.B) { + b.Run("replace-label-copy", func(b *testing.B) { + prcs := []ParsedRelabelConfig{ + { + Action: "replace", + SourceLabels: []string{"id"}, + TargetLabel: "__name__", + Regex: defaultRegexForRelabelConfig, + Replacement: "$1", + }, + } + labelsOrig := []prompbmarshal.Label{ + { + Name: "__name__", + Value: "metric", + }, + { + Name: "id", + Value: "foobar-random-string-here", + }, + } + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + var labels []prompbmarshal.Label + for pb.Next() { + labels = append(labels[:0], labelsOrig...) + labels = ApplyRelabelConfigs(labels, 0, prcs, true) + if len(labels) != len(labelsOrig) { + panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels)) + } + if labels[0].Name != "__name__" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[0].Name, "__name__")) + } + if labels[0].Value != "foobar-random-string-here" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[0].Value, "foobar-random-string-here")) + } + if labels[1].Name != "id" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[1].Name, "id")) + } + if labels[1].Value != "foobar-random-string-here" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[1].Value, "foobar-random-string-here")) + } + } + }) + }) + b.Run("replace-set-label", func(b *testing.B) { + prcs := []ParsedRelabelConfig{ + { + Action: "replace", + TargetLabel: "__name__", + Regex: defaultRegexForRelabelConfig, + Replacement: "foobar", + }, + } + labelsOrig := []prompbmarshal.Label{ + { + Name: "__name__", + Value: "metric", + }, + { + Name: "id", + Value: "foobar-random-string-here", + }, + } + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + var labels []prompbmarshal.Label + for pb.Next() { + labels = append(labels[:0], labelsOrig...) + labels = ApplyRelabelConfigs(labels, 0, prcs, true) + if len(labels) != len(labelsOrig) { + panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels)) + } + if labels[0].Name != "__name__" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[0].Name, "__name__")) + } + if labels[0].Value != "foobar" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[0].Value, "foobar")) + } + if labels[1].Name != "id" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[1].Name, "id")) + } + if labels[1].Value != "foobar-random-string-here" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[1].Value, "foobar-random-string-here")) + } + } + }) + }) + b.Run("replace-add-label", func(b *testing.B) { + prcs := []ParsedRelabelConfig{ + { + Action: "replace", + TargetLabel: "aaa", + Regex: defaultRegexForRelabelConfig, + Replacement: "foobar", + }, + } + labelsOrig := []prompbmarshal.Label{ + { + Name: "__name__", + Value: "metric", + }, + } + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + var labels []prompbmarshal.Label + for pb.Next() { + labels = append(labels[:0], labelsOrig...) + labels = ApplyRelabelConfigs(labels, 0, prcs, true) + if len(labels) != 2 { + panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 2, labels)) + } + if labels[0].Name != "__name__" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[0].Name, "__name__")) + } + if labels[0].Value != "metric" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[0].Value, "metric")) + } + if labels[1].Name != "aaa" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[1].Name, "aaa")) + } + if labels[1].Value != "foobar" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[1].Value, "foobar")) + } + } + }) + }) + b.Run("replace-mismatch", func(b *testing.B) { + prcs := []ParsedRelabelConfig{ + { + Action: "replace", + SourceLabels: []string{"non-existing-label"}, + TargetLabel: "id", + Regex: regexp.MustCompile("(foobar)-.*"), + Replacement: "$1", + }, + } + labelsOrig := []prompbmarshal.Label{ + { + Name: "__name__", + Value: "metric", + }, + { + Name: "id", + Value: "foobar-random-string-here", + }, + } + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + var labels []prompbmarshal.Label + for pb.Next() { + labels = append(labels[:0], labelsOrig...) + labels = ApplyRelabelConfigs(labels, 0, prcs, true) + if len(labels) != len(labelsOrig) { + panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels)) + } + if labels[0].Name != "__name__" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[0].Name, "__name__")) + } + if labels[0].Value != "metric" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[0].Value, "metric")) + } + if labels[1].Name != "id" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[1].Name, "id")) + } + if labels[1].Value != "foobar-random-string-here" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[1].Value, "foobar-random-string-here")) + } + } + }) + }) + b.Run("replace-match", func(b *testing.B) { + prcs := []ParsedRelabelConfig{ + { + Action: "replace", + SourceLabels: []string{"id"}, + TargetLabel: "id", + Regex: regexp.MustCompile("(foobar)-.*"), + Replacement: "$1", + }, + } + labelsOrig := []prompbmarshal.Label{ + { + Name: "__name__", + Value: "metric", + }, + { + Name: "id", + Value: "foobar-random-string-here", + }, + } + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + var labels []prompbmarshal.Label + for pb.Next() { + labels = append(labels[:0], labelsOrig...) + labels = ApplyRelabelConfigs(labels, 0, prcs, true) + if len(labels) != len(labelsOrig) { + panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels)) + } + if labels[0].Name != "__name__" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[0].Name, "__name__")) + } + if labels[0].Value != "metric" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[0].Value, "metric")) + } + if labels[1].Name != "id" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[1].Name, "id")) + } + if labels[1].Value != "foobar" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[1].Value, "foobar")) + } + } + }) + }) + b.Run("drop-mismatch", func(b *testing.B) { + prcs := []ParsedRelabelConfig{ + { + Action: "drop", + SourceLabels: []string{"non-existing-label"}, + Regex: regexp.MustCompile("(foobar)-.*"), + }, + } + labelsOrig := []prompbmarshal.Label{ + { + Name: "__name__", + Value: "metric", + }, + { + Name: "id", + Value: "foobar-random-string-here", + }, + } + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + var labels []prompbmarshal.Label + for pb.Next() { + labels = append(labels[:0], labelsOrig...) + labels = ApplyRelabelConfigs(labels, 0, prcs, true) + if len(labels) != len(labelsOrig) { + panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels)) + } + if labels[0].Name != "__name__" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[0].Name, "__name__")) + } + if labels[0].Value != "metric" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[0].Value, "metric")) + } + if labels[1].Name != "id" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[1].Name, "id")) + } + if labels[1].Value != "foobar-random-string-here" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[1].Value, "foobar-random-string-here")) + } + } + }) + }) + b.Run("drop-match", func(b *testing.B) { + prcs := []ParsedRelabelConfig{ + { + Action: "drop", + SourceLabels: []string{"id"}, + Regex: regexp.MustCompile("(foobar)-.*"), + }, + } + labelsOrig := []prompbmarshal.Label{ + { + Name: "__name__", + Value: "metric", + }, + { + Name: "id", + Value: "foobar-random-string-here", + }, + } + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + var labels []prompbmarshal.Label + for pb.Next() { + labels = append(labels[:0], labelsOrig...) + labels = ApplyRelabelConfigs(labels, 0, prcs, true) + if len(labels) != 0 { + panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 0, labels)) + } + } + }) + }) + b.Run("keep-mismatch", func(b *testing.B) { + prcs := []ParsedRelabelConfig{ + { + Action: "keep", + SourceLabels: []string{"non-existing-label"}, + Regex: regexp.MustCompile("(foobar)-.*"), + }, + } + labelsOrig := []prompbmarshal.Label{ + { + Name: "__name__", + Value: "metric", + }, + { + Name: "id", + Value: "foobar-random-string-here", + }, + } + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + var labels []prompbmarshal.Label + for pb.Next() { + labels = append(labels[:0], labelsOrig...) + labels = ApplyRelabelConfigs(labels, 0, prcs, true) + if len(labels) != 0 { + panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 0, labels)) + } + } + }) + }) + b.Run("keep-match", func(b *testing.B) { + prcs := []ParsedRelabelConfig{ + { + Action: "keep", + SourceLabels: []string{"id"}, + Regex: regexp.MustCompile("(foobar)-.*"), + }, + } + labelsOrig := []prompbmarshal.Label{ + { + Name: "__name__", + Value: "metric", + }, + { + Name: "id", + Value: "foobar-random-string-here", + }, + } + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + var labels []prompbmarshal.Label + for pb.Next() { + labels = append(labels[:0], labelsOrig...) + labels = ApplyRelabelConfigs(labels, 0, prcs, true) + if len(labels) != len(labelsOrig) { + panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels)) + } + if labels[0].Name != "__name__" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[0].Name, "__name__")) + } + if labels[0].Value != "metric" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[0].Value, "metric")) + } + if labels[1].Name != "id" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[1].Name, "id")) + } + if labels[1].Value != "foobar-random-string-here" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[1].Value, "foobar-random-string-here")) + } + } + }) + }) + b.Run("labeldrop-mismatch", func(b *testing.B) { + prcs := []ParsedRelabelConfig{ + { + Action: "labeldrop", + Regex: regexp.MustCompile("non-existing-label"), + }, + } + labelsOrig := []prompbmarshal.Label{ + { + Name: "__name__", + Value: "metric", + }, + { + Name: "id", + Value: "foobar-random-string-here", + }, + } + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + var labels []prompbmarshal.Label + for pb.Next() { + labels = append(labels[:0], labelsOrig...) + labels = ApplyRelabelConfigs(labels, 0, prcs, true) + if len(labels) != len(labelsOrig) { + panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels)) + } + if labels[0].Name != "__name__" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[0].Name, "__name__")) + } + if labels[0].Value != "metric" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[0].Value, "metric")) + } + if labels[1].Name != "id" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[1].Name, "id")) + } + if labels[1].Value != "foobar-random-string-here" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[1].Value, "foobar-random-string-here")) + } + } + }) + }) + b.Run("labeldrop-match", func(b *testing.B) { + prcs := []ParsedRelabelConfig{ + { + Action: "labeldrop", + Regex: regexp.MustCompile("id"), + }, + } + labelsOrig := []prompbmarshal.Label{ + { + Name: "__name__", + Value: "metric", + }, + { + Name: "id", + Value: "foobar-random-string-here", + }, + } + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + var labels []prompbmarshal.Label + for pb.Next() { + labels = append(labels[:0], labelsOrig...) + labels = ApplyRelabelConfigs(labels, 0, prcs, true) + if len(labels) != 1 { + panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 1, labels)) + } + if labels[0].Name != "__name__" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[0].Name, "__name__")) + } + if labels[0].Value != "metric" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[0].Value, "metric")) + } + } + }) + }) + b.Run("labelkeep-mismatch", func(b *testing.B) { + prcs := []ParsedRelabelConfig{ + { + Action: "labelkeep", + Regex: regexp.MustCompile("non-existing-label"), + }, + } + labelsOrig := []prompbmarshal.Label{ + { + Name: "__name__", + Value: "metric", + }, + { + Name: "id", + Value: "foobar-random-string-here", + }, + } + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + var labels []prompbmarshal.Label + for pb.Next() { + labels = append(labels[:0], labelsOrig...) + labels = ApplyRelabelConfigs(labels, 0, prcs, true) + if len(labels) != 0 { + panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 0, labels)) + } + } + }) + }) + b.Run("labelkeep-match", func(b *testing.B) { + prcs := []ParsedRelabelConfig{ + { + Action: "labelkeep", + Regex: regexp.MustCompile("id"), + }, + } + labelsOrig := []prompbmarshal.Label{ + { + Name: "__name__", + Value: "metric", + }, + { + Name: "id", + Value: "foobar-random-string-here", + }, + } + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + var labels []prompbmarshal.Label + for pb.Next() { + labels = append(labels[:0], labelsOrig...) + labels = ApplyRelabelConfigs(labels, 0, prcs, true) + if len(labels) != 1 { + panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), 1, labels)) + } + if labels[0].Name != "id" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[0].Name, "id")) + } + if labels[0].Value != "foobar-random-string-here" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[0].Value, "foobar-random-string-here")) + } + } + }) + }) + b.Run("hashmod", func(b *testing.B) { + prcs := []ParsedRelabelConfig{ + { + Action: "hashmod", + SourceLabels: []string{"id"}, + TargetLabel: "id", + Modulus: 23, + }, + } + labelsOrig := []prompbmarshal.Label{ + { + Name: "__name__", + Value: "metric", + }, + { + Name: "id", + Value: "foobar-random-string-here", + }, + } + b.ReportAllocs() + b.SetBytes(1) + b.RunParallel(func(pb *testing.PB) { + var labels []prompbmarshal.Label + for pb.Next() { + labels = append(labels[:0], labelsOrig...) + labels = ApplyRelabelConfigs(labels, 0, prcs, true) + if len(labels) != len(labelsOrig) { + panic(fmt.Errorf("unexpected number of labels; got %d; want %d; labels:\n%#v", len(labels), len(labelsOrig), labels)) + } + if labels[0].Name != "__name__" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[0].Name, "__name__")) + } + if labels[0].Value != "metric" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[0].Value, "metric")) + } + if labels[1].Name != "id" { + panic(fmt.Errorf("unexpected label name; got %q; want %q", labels[1].Name, "id")) + } + if labels[1].Value != "11" { + panic(fmt.Errorf("unexpected label value; got %q; want %q", labels[1].Value, "11")) + } + } + }) + }) +} diff --git a/lib/promrelabel/sort.go b/lib/promrelabel/sort.go new file mode 100644 index 000000000..cfe5e03e3 --- /dev/null +++ b/lib/promrelabel/sort.go @@ -0,0 +1,37 @@ +package promrelabel + +import ( + "sort" + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" +) + +// SortLabels sorts labels. +func SortLabels(labels []prompbmarshal.Label) { + ls := labelsSorterPool.Get().(*labelsSorter) + *ls = labels + if !sort.IsSorted(ls) { + sort.Sort(ls) + } + *ls = nil + labelsSorterPool.Put(ls) +} + +var labelsSorterPool = &sync.Pool{ + New: func() interface{} { + return &labelsSorter{} + }, +} + +type labelsSorter []prompbmarshal.Label + +func (ls *labelsSorter) Len() int { return len(*ls) } +func (ls *labelsSorter) Swap(i, j int) { + a := *ls + a[i], a[j] = a[j], a[i] +} +func (ls *labelsSorter) Less(i, j int) bool { + a := *ls + return a[i].Name < a[j].Name +} diff --git a/lib/promrelabel/sort_test.go b/lib/promrelabel/sort_test.go new file mode 100644 index 000000000..9c34e3329 --- /dev/null +++ b/lib/promrelabel/sort_test.go @@ -0,0 +1,43 @@ +package promrelabel + +import ( + "reflect" + "testing" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" +) + +func TestSortLabels(t *testing.T) { + labels := []prompbmarshal.Label{ + { + Name: "foo", + Value: "bar", + }, + { + Name: "aa", + Value: "bb", + }, + { + Name: "ba", + Value: "zz", + }, + } + labelsExpected := []prompbmarshal.Label{ + { + Name: "aa", + Value: "bb", + }, + { + Name: "ba", + Value: "zz", + }, + { + Name: "foo", + Value: "bar", + }, + } + SortLabels(labels) + if !reflect.DeepEqual(labels, labelsExpected) { + t.Fatalf("unexpected sorted labels; got\n%v\nwant\n%v", labels, labelsExpected) + } +} diff --git a/lib/promrelabel/testdata/invalid_config.yml b/lib/promrelabel/testdata/invalid_config.yml new file mode 100644 index 000000000..a37ed1da9 --- /dev/null +++ b/lib/promrelabel/testdata/invalid_config.yml @@ -0,0 +1,3 @@ +fodsofdsf +f dsfdds +fdsfsdsfd diff --git a/lib/promrelabel/testdata/relabel_configs_valid.yml b/lib/promrelabel/testdata/relabel_configs_valid.yml new file mode 100644 index 000000000..7ebe8ef3a --- /dev/null +++ b/lib/promrelabel/testdata/relabel_configs_valid.yml @@ -0,0 +1,20 @@ +- target_label: bar +- source_labels: [aa] + separator: "foobar" + regex: "foo.+bar" + target_label: aaa + replacement: "xxx" +- action: keep + source_labels: [aaa] +- action: drop + source_labels: [aaa] +- action: hashmod + source_labels: [aaa] + target_label: aaa + modulus: 234 +- action: replace_all + source_labels: [aa] + target_label: bb +- action: labelmap_all + regex: "\\." + replacement: ":" diff --git a/lib/promscrape/client.go b/lib/promscrape/client.go new file mode 100644 index 000000000..6f33f408e --- /dev/null +++ b/lib/promscrape/client.go @@ -0,0 +1,134 @@ +package promscrape + +import ( + "crypto/tls" + "flag" + "fmt" + "strings" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/netutil" + "github.com/VictoriaMetrics/metrics" + "github.com/valyala/fasthttp" +) + +var ( + maxScrapeSize = flag.Int("promscrape.maxScrapeSize", 16*1024*1024, "The maximum size of scrape response in bytes to process from Prometheus targets. "+ + "Bigger responses are rejected") + disableCompression = flag.Bool("promscrape.disableCompression", false, "Whether to disable sending 'Accept-Encoding: gzip' request headers to scrape targets. "+ + "This may reduce CPU usage on scrape targets at the cost of higher network bandwidth utilization") +) + +type client struct { + hc *fasthttp.HostClient + + scrapeURL string + host string + requestURI string + authHeader string +} + +func newClient(sw *ScrapeWork) *client { + var u fasthttp.URI + u.Update(sw.ScrapeURL) + host := string(u.Host()) + requestURI := string(u.RequestURI()) + isTLS := string(u.Scheme()) == "https" + var tlsCfg *tls.Config + if isTLS { + tlsCfg = getTLSConfig(sw) + } + if !strings.Contains(host, ":") { + if !isTLS { + host += ":80" + } else { + host += ":443" + } + } + hc := &fasthttp.HostClient{ + Addr: host, + Name: "vm_promscrape", + Dial: statDial, + DialDualStack: netutil.TCP6Enabled(), + IsTLS: isTLS, + TLSConfig: tlsCfg, + MaxIdleConnDuration: 2 * sw.ScrapeInterval, + ReadTimeout: sw.ScrapeTimeout, + WriteTimeout: 10 * time.Second, + MaxResponseBodySize: *maxScrapeSize, + } + return &client{ + hc: hc, + + scrapeURL: sw.ScrapeURL, + host: host, + requestURI: requestURI, + authHeader: sw.Authorization, + } +} + +func (c *client) ReadData(dst []byte) ([]byte, error) { + req := fasthttp.AcquireRequest() + req.SetRequestURI(c.requestURI) + req.SetHost(c.host) + if !*disableCompression { + req.Header.Set("Accept-Encoding", "gzip") + } + if c.authHeader != "" { + req.Header.Set("Authorization", c.authHeader) + } + resp := fasthttp.AcquireResponse() + // There is no need in calling DoTimeout, since the timeout is already set in c.hc.ReadTimeout. + err := c.hc.Do(req, resp) + fasthttp.ReleaseRequest(req) + if err != nil { + fasthttp.ReleaseResponse(resp) + if err == fasthttp.ErrTimeout { + scrapesTimedout.Inc() + return dst, fmt.Errorf("error when scraping %q with timeout %s: %s", c.scrapeURL, c.hc.ReadTimeout, err) + } + return dst, fmt.Errorf("error when scraping %q: %s", c.scrapeURL, err) + } + dstLen := len(dst) + if ce := resp.Header.Peek("Content-Encoding"); string(ce) == "gzip" { + var err error + dst, err = fasthttp.AppendGunzipBytes(dst, resp.Body()) + if err != nil { + fasthttp.ReleaseResponse(resp) + scrapesGunzipFailed.Inc() + return dst, fmt.Errorf("cannot ungzip response from %q: %s", c.scrapeURL, err) + } + scrapesGunzipped.Inc() + } else { + dst = append(dst, resp.Body()...) + } + statusCode := resp.StatusCode() + if statusCode != fasthttp.StatusOK { + metrics.GetOrCreateCounter(fmt.Sprintf(`vm_promscrape_scrapes_total{status_code="%d"}`, statusCode)).Inc() + return dst, fmt.Errorf("unexpected status code returned when scraping %q: %d; expecting %d; response body: %q", + c.scrapeURL, statusCode, fasthttp.StatusOK, dst[dstLen:]) + } + scrapesOK.Inc() + fasthttp.ReleaseResponse(resp) + return dst, nil +} + +var ( + scrapesTimedout = metrics.NewCounter(`vm_promscrape_scrapes_timed_out_total`) + scrapesOK = metrics.NewCounter(`vm_promscrape_scrapes_total{status_code="200"}`) + scrapesGunzipped = metrics.NewCounter(`vm_promscrape_scrapes_gunziped_total`) + scrapesGunzipFailed = metrics.NewCounter(`vm_promscrape_scrapes_gunzip_failed_total`) +) + +func getTLSConfig(sw *ScrapeWork) *tls.Config { + tlsCfg := &tls.Config{ + RootCAs: sw.TLSRootCA, + ClientSessionCache: tls.NewLRUClientSessionCache(0), + } + if sw.TLSCertificate != nil { + tlsCfg.Certificates = []tls.Certificate{*sw.TLSCertificate} + } + tlsCfg.ServerName = sw.TLSServerName + tlsCfg.InsecureSkipVerify = sw.TLSInsecureSkipVerify + return tlsCfg +} diff --git a/lib/promscrape/config.go b/lib/promscrape/config.go new file mode 100644 index 000000000..1ea784c20 --- /dev/null +++ b/lib/promscrape/config.go @@ -0,0 +1,588 @@ +package promscrape + +import ( + "crypto/tls" + "crypto/x509" + "encoding/base64" + "fmt" + "io/ioutil" + "net/url" + "path/filepath" + "strings" + "time" + "unicode" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel" + "gopkg.in/yaml.v2" +) + +// Config represents essential parts from Prometheus config defined at https://prometheus.io/docs/prometheus/latest/configuration/configuration/ +type Config struct { + Global GlobalConfig `yaml:"global"` + ScrapeConfigs []ScrapeConfig `yaml:"scrape_configs"` + + // This is set to the directory from where the config has been loaded. + baseDir string +} + +// GlobalConfig represents essential parts for `global` section of Prometheus config. +// +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/ +type GlobalConfig struct { + ScrapeInterval time.Duration `yaml:"scrape_interval"` + ScrapeTimeout time.Duration `yaml:"scrape_timeout"` + ExternalLabels map[string]string `yaml:"external_labels"` +} + +// ScrapeConfig represents essential parts for `scrape_config` section of Prometheus config. +// +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config +type ScrapeConfig struct { + JobName string `yaml:"job_name"` + ScrapeInterval time.Duration `yaml:"scrape_interval"` + ScrapeTimeout time.Duration `yaml:"scrape_timeout"` + MetricsPath string `yaml:"metrics_path"` + HonorLabels bool `yaml:"honor_labels"` + HonorTimestamps bool `yaml:"honor_timestamps"` + Scheme string `yaml:"scheme"` + Params map[string][]string `yaml:"params"` + BasicAuth *BasicAuthConfig `yaml:"basic_auth"` + BearerToken string `yaml:"bearer_token"` + BearerTokenFile string `yaml:"bearer_token_file"` + TLSConfig *TLSConfig `yaml:"tls_config"` + StaticConfigs []StaticConfig `yaml:"static_configs"` + FileSDConfigs []FileSDConfig `yaml:"file_sd_configs"` + RelabelConfigs []promrelabel.RelabelConfig `yaml:"relabel_configs"` + MetricRelabelConfigs []promrelabel.RelabelConfig `yaml:"metric_relabel_configs"` + ScrapeLimit int `yaml:"scrape_limit"` + + // This is set in loadConfig + swc *scrapeWorkConfig +} + +// FileSDConfig represents file-based service discovery config. +// +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config +type FileSDConfig struct { + Files []string `yaml:"files"` + // `refresh_interval` is ignored. See `-prometheus.fileSDCheckInterval` +} + +// TLSConfig represents TLS config. +// +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tls_config +type TLSConfig struct { + CAFile string `yaml:"ca_file"` + CertFile string `yaml:"cert_file"` + KeyFile string `yaml:"key_file"` + ServerName string `yaml:"server_name"` + InsecureSkipVerify bool `yaml:"insecure_skip_verify"` +} + +// BasicAuthConfig represents basic auth config. +type BasicAuthConfig struct { + Username string `yaml:"username"` + Password string `yaml:"password"` + PasswordFile string `yaml:"password_file"` +} + +// StaticConfig represents essential parts for `static_config` section of Prometheus config. +// +// See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config +type StaticConfig struct { + Targets []string `yaml:"targets"` + Labels map[string]string `yaml:"labels"` +} + +func loadStaticConfigs(path string) ([]StaticConfig, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("cannot read `static_configs` from %q: %s", path, err) + } + var stcs []StaticConfig + if err := yaml.Unmarshal(data, &stcs); err != nil { + return nil, fmt.Errorf("cannot unmarshal `static_configs` from %q: %s", path, err) + } + return stcs, nil +} + +// loadConfig loads Prometheus config from the given path. +func loadConfig(path string) (cfg *Config, err error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("cannot read Prometheus config from %q: %s", path, err) + } + var cfgObj Config + if err := cfgObj.parse(data, path); err != nil { + return nil, fmt.Errorf("cannot parse Prometheus config from %q: %s", path, err) + } + return &cfgObj, nil +} + +func (cfg *Config) parse(data []byte, path string) error { + if err := yaml.Unmarshal(data, cfg); err != nil { + return fmt.Errorf("cannot unmarshal data: %s", err) + } + absPath, err := filepath.Abs(path) + if err != nil { + return fmt.Errorf("cannot obtain abs path for %q: %s", path, err) + } + cfg.baseDir = filepath.Dir(absPath) + for i := range cfg.ScrapeConfigs { + sc := &cfg.ScrapeConfigs[i] + swc, err := getScrapeWorkConfig(sc, cfg.baseDir, &cfg.Global) + if err != nil { + return fmt.Errorf("cannot parse `scrape_config` #%d: %s", i+1, err) + } + sc.swc = swc + } + return nil +} + +func (cfg *Config) fileSDConfigsCount() int { + n := 0 + for i := range cfg.ScrapeConfigs { + n += len(cfg.ScrapeConfigs[i].FileSDConfigs) + } + return n +} + +// getFileSDScrapeWork returns `file_sd_configs` ScrapeWork from cfg. +func (cfg *Config) getFileSDScrapeWork(prev []ScrapeWork) ([]ScrapeWork, error) { + var sws []ScrapeWork + for i := range cfg.ScrapeConfigs { + var err error + sws, err = cfg.ScrapeConfigs[i].appendFileSDScrapeWork(sws, prev, cfg.baseDir) + if err != nil { + return nil, fmt.Errorf("error when parsing `scrape_config` #%d: %s", i+1, err) + } + } + return sws, nil +} + +// getStaticScrapeWork returns `static_configs` ScrapeWork from from cfg. +func (cfg *Config) getStaticScrapeWork() ([]ScrapeWork, error) { + var sws []ScrapeWork + for i := range cfg.ScrapeConfigs { + var err error + sws, err = cfg.ScrapeConfigs[i].appendStaticScrapeWork(sws) + if err != nil { + return nil, fmt.Errorf("error when parsing `scrape_config` #%d: %s", i+1, err) + } + } + return sws, nil +} + +func (sc *ScrapeConfig) appendFileSDScrapeWork(dst, prev []ScrapeWork, baseDir string) ([]ScrapeWork, error) { + if len(sc.FileSDConfigs) == 0 { + // Fast path - no `file_sd_configs` + return dst, nil + } + // Create a map for the previous scrape work. + swPrev := make(map[string][]ScrapeWork) + for i := range prev { + sw := &prev[i] + label := promrelabel.GetLabelByName(sw.Labels, "__meta_filepath") + if label == nil { + logger.Panicf("BUG: missing `__meta_filepath` label") + } + swPrev[label.Value] = append(swPrev[label.Value], *sw) + } + for i := range sc.FileSDConfigs { + var err error + dst, err = sc.FileSDConfigs[i].appendScrapeWork(dst, swPrev, baseDir, sc.swc) + if err != nil { + return nil, fmt.Errorf("error when parsing `file_sd_config` #%d: %s", i+1, err) + } + } + return dst, nil +} + +func (sc *ScrapeConfig) appendStaticScrapeWork(dst []ScrapeWork) ([]ScrapeWork, error) { + for i := range sc.StaticConfigs { + var err error + dst, err = sc.StaticConfigs[i].appendScrapeWork(dst, sc.swc) + if err != nil { + return nil, fmt.Errorf("error when parsing `static_config` #%d: %s", i+1, err) + } + } + return dst, nil +} + +func getScrapeWorkConfig(sc *ScrapeConfig, baseDir string, globalCfg *GlobalConfig) (*scrapeWorkConfig, error) { + jobName := sc.JobName + if jobName == "" { + return nil, fmt.Errorf("missing `job_name` field in `scrape_config`") + } + scrapeInterval := sc.ScrapeInterval + if scrapeInterval <= 0 { + scrapeInterval = globalCfg.ScrapeInterval + if scrapeInterval <= 0 { + scrapeInterval = defaultScrapeInterval + } + } + scrapeTimeout := sc.ScrapeTimeout + if scrapeTimeout <= 0 { + scrapeTimeout = globalCfg.ScrapeTimeout + if scrapeTimeout <= 0 { + scrapeTimeout = defaultScrapeTimeout + } + } + honorLabels := sc.HonorLabels + honorTimestamps := sc.HonorTimestamps + metricsPath := sc.MetricsPath + if metricsPath == "" { + metricsPath = "/metrics" + } + scheme := sc.Scheme + if scheme == "" { + scheme = "http" + } + if scheme != "http" && scheme != "https" { + return nil, fmt.Errorf("unexpected `scheme` for `job_name` %q: %q; supported values: http or https", jobName, scheme) + } + params := sc.Params + var authorization string + if sc.BasicAuth != nil { + if sc.BasicAuth.Username == "" { + return nil, fmt.Errorf("missing `username` in `basic_auth` section for `job_name` %q", jobName) + } + username := sc.BasicAuth.Username + password := sc.BasicAuth.Password + if sc.BasicAuth.PasswordFile != "" { + if sc.BasicAuth.Password != "" { + return nil, fmt.Errorf("both `password`=%q and `password_file`=%q are set in `basic_auth` section for `job_name` %q", + sc.BasicAuth.Password, sc.BasicAuth.PasswordFile, jobName) + } + path := getFilepath(baseDir, sc.BasicAuth.PasswordFile) + pass, err := readPasswordFromFile(path) + if err != nil { + return nil, fmt.Errorf("cannot read password from `password_file`=%q set in `basic_auth` section for `job_name` %q: %s", + sc.BasicAuth.PasswordFile, jobName, err) + } + password = pass + } + // See https://en.wikipedia.org/wiki/Basic_access_authentication + token := username + ":" + password + token64 := base64.StdEncoding.EncodeToString([]byte(token)) + authorization = "Basic " + token64 + } + bearerToken := sc.BearerToken + if sc.BearerTokenFile != "" { + if sc.BearerToken != "" { + return nil, fmt.Errorf("both `bearer_token`=%q and `bearer_token_file`=%q are set for `job_name` %q", sc.BearerToken, sc.BearerTokenFile, jobName) + } + path := getFilepath(baseDir, sc.BearerTokenFile) + token, err := readPasswordFromFile(path) + if err != nil { + return nil, fmt.Errorf("cannot read bearer token from `bearer_token_file`=%q for `job_name` %q: %s", sc.BearerTokenFile, jobName, err) + } + bearerToken = token + } + if bearerToken != "" { + if authorization != "" { + return nil, fmt.Errorf("cannot use both `basic_auth` and `bearer_token` for `job_name` %q", jobName) + } + authorization = "Bearer " + bearerToken + } + var tlsRootCA *x509.CertPool + var tlsCertificate *tls.Certificate + tlsServerName := "" + tlsInsecureSkipVerify := false + if sc.TLSConfig != nil { + tlsServerName = sc.TLSConfig.ServerName + tlsInsecureSkipVerify = sc.TLSConfig.InsecureSkipVerify + if sc.TLSConfig.CertFile != "" || sc.TLSConfig.KeyFile != "" { + certPath := getFilepath(baseDir, sc.TLSConfig.CertFile) + keyPath := getFilepath(baseDir, sc.TLSConfig.KeyFile) + cert, err := tls.LoadX509KeyPair(certPath, keyPath) + if err != nil { + return nil, fmt.Errorf("cannot load TLS certificate for `job_name` %q from `cert_file`=%q, `key_file`=%q: %s", + jobName, sc.TLSConfig.CertFile, sc.TLSConfig.KeyFile, err) + } + tlsCertificate = &cert + } + if sc.TLSConfig.CAFile != "" { + path := getFilepath(baseDir, sc.TLSConfig.CAFile) + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("cannot read `ca_file` %q for `job_name` %q: %s", sc.TLSConfig.CAFile, jobName, err) + } + tlsRootCA = x509.NewCertPool() + if !tlsRootCA.AppendCertsFromPEM(data) { + return nil, fmt.Errorf("cannot parse data from `ca_file` %q for `job_name` %q", sc.TLSConfig.CAFile, jobName) + } + } + } + var err error + var relabelConfigs []promrelabel.ParsedRelabelConfig + relabelConfigs, err = promrelabel.ParseRelabelConfigs(relabelConfigs[:0], sc.RelabelConfigs) + if err != nil { + return nil, fmt.Errorf("cannot parse `relabel_configs` for `job_name` %q: %s", jobName, err) + } + var metricRelabelConfigs []promrelabel.ParsedRelabelConfig + metricRelabelConfigs, err = promrelabel.ParseRelabelConfigs(metricRelabelConfigs[:0], sc.MetricRelabelConfigs) + if err != nil { + return nil, fmt.Errorf("cannot parse `metric_relabel_configs` for `job_name` %q: %s", jobName, err) + } + scrapeLimit := sc.ScrapeLimit + swc := &scrapeWorkConfig{ + scrapeInterval: scrapeInterval, + scrapeTimeout: scrapeTimeout, + jobName: jobName, + metricsPath: metricsPath, + scheme: scheme, + params: params, + authorization: authorization, + honorLabels: honorLabels, + honorTimestamps: honorTimestamps, + externalLabels: globalCfg.ExternalLabels, + tlsRootCA: tlsRootCA, + tlsCertificate: tlsCertificate, + tlsServerName: tlsServerName, + tlsInsecureSkipVerify: tlsInsecureSkipVerify, + relabelConfigs: relabelConfigs, + metricRelabelConfigs: metricRelabelConfigs, + scrapeLimit: scrapeLimit, + } + return swc, nil +} + +type scrapeWorkConfig struct { + scrapeInterval time.Duration + scrapeTimeout time.Duration + jobName string + metricsPath string + scheme string + params map[string][]string + authorization string + honorLabels bool + honorTimestamps bool + externalLabels map[string]string + tlsRootCA *x509.CertPool + tlsCertificate *tls.Certificate + tlsServerName string + tlsInsecureSkipVerify bool + relabelConfigs []promrelabel.ParsedRelabelConfig + metricRelabelConfigs []promrelabel.ParsedRelabelConfig + scrapeLimit int + metaLabels map[string]string +} + +func (sdc *FileSDConfig) appendScrapeWork(dst []ScrapeWork, swPrev map[string][]ScrapeWork, baseDir string, swc *scrapeWorkConfig) ([]ScrapeWork, error) { + for _, file := range sdc.Files { + pathPattern := getFilepath(baseDir, file) + paths := []string{pathPattern} + if strings.Contains(pathPattern, "*") { + var err error + paths, err = filepath.Glob(pathPattern) + if err != nil { + return nil, fmt.Errorf("invalid pattern %q in `files` section: %s", file, err) + } + } + for _, path := range paths { + stcs, err := loadStaticConfigs(path) + if err != nil { + // Do not return this error, since other paths may contain valid scrape configs. + if sws := swPrev[path]; sws != nil { + // Re-use the previous valid scrape work for this path. + logger.Errorf("keeping the previously loaded `static_configs` from %q because of error when re-loading the file: %s", path, err) + dst = append(dst, sws...) + } else { + logger.Errorf("skipping loading `static_configs` from %q because of error: %s", path, err) + } + continue + } + swcCopy := *swc + pathShort := path + if strings.HasPrefix(pathShort, baseDir) { + pathShort = path[len(baseDir):] + if len(pathShort) > 0 && pathShort[0] == filepath.Separator { + pathShort = pathShort[1:] + } + } + swcCopy.metaLabels = map[string]string{ + "__meta_filepath": pathShort, + } + for i := range stcs { + dst, err = stcs[i].appendScrapeWork(dst, &swcCopy) + if err != nil { + // Do not return this error, since other paths may contain valid scrape configs. + logger.Errorf("error when parsing `static_config` #%d from %q: %s", i+1, path, err) + continue + } + } + } + } + return dst, nil +} + +func (stc *StaticConfig) appendScrapeWork(dst []ScrapeWork, swc *scrapeWorkConfig) ([]ScrapeWork, error) { + for _, target := range stc.Targets { + if target == "" { + return nil, fmt.Errorf("`static_configs` target for `job_name` %q cannot be empty", swc.jobName) + } + target = addMissingPort(swc.scheme, target) + labels, err := mergeLabels(swc.jobName, swc.scheme, target, swc.metricsPath, stc.Labels, swc.externalLabels, swc.metaLabels, swc.params) + if err != nil { + return nil, fmt.Errorf("cannot merge labels for `static_configs` target for `job_name` %q: %s", swc.jobName, err) + } + labels = promrelabel.ApplyRelabelConfigs(labels, 0, swc.relabelConfigs, false) + if len(labels) == 0 { + // Drop target without labels. + continue + } + // See https://www.robustperception.io/life-of-a-label + schemeRelabeled := "" + if schemeLabel := promrelabel.GetLabelByName(labels, "__scheme__"); schemeLabel != nil { + schemeRelabeled = schemeLabel.Value + } + if schemeRelabeled == "" { + schemeRelabeled = "http" + } + addressLabel := promrelabel.GetLabelByName(labels, "__address__") + if addressLabel == nil || addressLabel.Name == "" { + // Drop target without scrape address. + continue + } + targetRelabeled := addMissingPort(schemeRelabeled, addressLabel.Value) + if strings.Contains(targetRelabeled, "/") { + // Drop target with '/' + continue + } + metricsPathRelabeled := "" + if metricsPathLabel := promrelabel.GetLabelByName(labels, "__metrics_path__"); metricsPathLabel != nil { + metricsPathRelabeled = metricsPathLabel.Value + } + if metricsPathRelabeled == "" { + metricsPathRelabeled = "/metrics" + } + paramsRelabeled := getParamsFromLabels(labels, swc.params) + optionalQuestion := "?" + if len(paramsRelabeled) == 0 || strings.Contains(metricsPathRelabeled, "?") { + optionalQuestion = "" + } + paramsStr := url.Values(paramsRelabeled).Encode() + scrapeURL := fmt.Sprintf("%s://%s%s%s%s", schemeRelabeled, targetRelabeled, metricsPathRelabeled, optionalQuestion, paramsStr) + if _, err := url.Parse(scrapeURL); err != nil { + return nil, fmt.Errorf("invalid url %q for scheme=%q (%q), target=%q (%q), metrics_path=%q (%q) for `job_name` %q: %s", + scrapeURL, swc.scheme, schemeRelabeled, target, targetRelabeled, swc.metricsPath, metricsPathRelabeled, swc.jobName, err) + } + dst = append(dst, ScrapeWork{ + ScrapeURL: scrapeURL, + ScrapeInterval: swc.scrapeInterval, + ScrapeTimeout: swc.scrapeTimeout, + HonorLabels: swc.honorLabels, + HonorTimestamps: swc.honorTimestamps, + Labels: labels, + Authorization: swc.authorization, + TLSRootCA: swc.tlsRootCA, + TLSCertificate: swc.tlsCertificate, + TLSServerName: swc.tlsServerName, + TLSInsecureSkipVerify: swc.tlsInsecureSkipVerify, + MetricRelabelConfigs: swc.metricRelabelConfigs, + ScrapeLimit: swc.scrapeLimit, + }) + } + return dst, nil +} + +func getParamsFromLabels(labels []prompbmarshal.Label, paramsOrig map[string][]string) map[string][]string { + // See https://www.robustperception.io/life-of-a-label + m := make(map[string][]string) + for i := range labels { + label := &labels[i] + if !strings.HasPrefix(label.Name, "__param_") { + continue + } + name := label.Name[len("__param_"):] + values := []string{label.Value} + if p := paramsOrig[name]; len(p) > 1 { + values = append(values, p[1:]...) + } + m[name] = values + } + return m +} + +func mergeLabels(job, scheme, target, metricsPath string, labels, externalLabels, metaLabels map[string]string, params map[string][]string) ([]prompbmarshal.Label, error) { + // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + m := map[string]string{ + "job": job, + "__address__": target, + "__scheme__": scheme, + "__metrics_path__": metricsPath, + } + for k, v := range externalLabels { + if vOrig, ok := m[k]; ok { + return nil, fmt.Errorf("external label `%q: %q` clashes with the previously set label with value %q", k, v, vOrig) + } + m[k] = v + } + for k, v := range metaLabels { + if vOrig, ok := m[k]; ok { + return nil, fmt.Errorf("meta label `%q: %q` clashes with the previously set label with value %q", k, v, vOrig) + } + m[k] = v + } + for k, v := range labels { + if vOrig, ok := m[k]; ok { + return nil, fmt.Errorf("label `%q: %q` clashes with the previously set label with value %q", k, v, vOrig) + } + m[k] = v + } + for k, args := range params { + if len(args) == 0 { + continue + } + k = "__param_" + k + v := args[0] + if vOrig, ok := m[k]; ok { + return nil, fmt.Errorf("param `%q: %q` claches with the previously set label with value %q", k, v, vOrig) + } + m[k] = v + } + result := make([]prompbmarshal.Label, 0, len(m)) + for k, v := range m { + result = append(result, prompbmarshal.Label{ + Name: k, + Value: v, + }) + } + return result, nil +} + +func getFilepath(baseDir, path string) string { + if filepath.IsAbs(path) { + return path + } + return filepath.Join(baseDir, path) +} + +func readPasswordFromFile(path string) (string, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return "", err + } + pass := strings.TrimRightFunc(string(data), unicode.IsSpace) + return pass, nil +} + +func addMissingPort(scheme, target string) string { + if strings.Contains(target, ":") { + return target + } + if scheme == "https" { + target += ":443" + } else { + target += ":80" + } + return target +} + +const ( + defaultScrapeInterval = time.Minute + defaultScrapeTimeout = 10 * time.Second +) diff --git a/lib/promscrape/config_test.go b/lib/promscrape/config_test.go new file mode 100644 index 000000000..791643064 --- /dev/null +++ b/lib/promscrape/config_test.go @@ -0,0 +1,1096 @@ +package promscrape + +import ( + "crypto/tls" + "fmt" + "reflect" + "regexp" + "testing" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel" +) + +func TestLoadStaticConfigs(t *testing.T) { + scs, err := loadStaticConfigs("testdata/file_sd.json") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if len(scs) == 0 { + t.Fatalf("expecting non-zero static configs") + } + + // Try loading non-existing file + scs, err = loadStaticConfigs("testdata/non-exsiting-file") + if err == nil { + t.Fatalf("expecting non-nil error") + } + if scs != nil { + t.Fatalf("unexpected non-nil static configs: %#v", scs) + } + + // Try loading invalid file + scs, err = loadStaticConfigs("testdata/prometheus.yml") + if err == nil { + t.Fatalf("expecting non-nil error") + } + if scs != nil { + t.Fatalf("unexpected non-nil static configs: %#v", scs) + } +} + +func TestLoadConfig(t *testing.T) { + cfg, err := loadConfig("testdata/prometheus.yml") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if n := cfg.fileSDConfigsCount(); n != 2 { + t.Fatalf("unexpected number of `file_sd_configs`; got %d; want %d; cfg:\n%#v", n, 2, cfg) + } + + // Try loading non-existing file + cfg, err = loadConfig("testdata/non-existing-file") + if err == nil { + t.Fatalf("expecting non-nil error") + } + if cfg != nil { + t.Fatalf("unexpected non-nil config: %#v", cfg) + } + + // Try loading invalid file + cfg, err = loadConfig("testdata/file_sd_1.yml") + if err == nil { + t.Fatalf("expecting non-nil error") + } + if cfg != nil { + t.Fatalf("unexpected non-nil config: %#v", cfg) + } +} + +func TestGetFileSDScrapeWork(t *testing.T) { + data := ` +scrape_configs: +- job_name: foo + file_sd_configs: + - files: [testdata/file_sd.json] +` + var cfg Config + if err := cfg.parse([]byte(data), "sss"); err != nil { + t.Fatalf("cannot parase data: %s", err) + } + sws, err := cfg.getFileSDScrapeWork(nil) + if err != nil { + t.Fatalf("cannot obtain `file_sd_config`: %s", err) + } + if !equalStaticConfigForScrapeWorks(sws, sws) { + t.Fatalf("unexpected non-equal static configs;\nsws:\n%#v", sws) + } + + // Load another static config + dataNew := ` +scrape_configs: +- job_name: foo + file_sd_configs: + - files: [testdata/file_sd_1.yml] +` + var cfgNew Config + if err := cfgNew.parse([]byte(dataNew), "sss"); err != nil { + t.Fatalf("cannot parse data: %s", err) + } + swsNew, err := cfgNew.getFileSDScrapeWork(sws) + if err != nil { + t.Fatalf("cannot obtain `file_sd_config`: %s", err) + } + if equalStaticConfigForScrapeWorks(swsNew, sws) { + t.Fatalf("unexpected equal static configs;\nswsNew:\n%#v\nsws:\n%#v", swsNew, sws) + } + + // Try loading invalid static config + data = ` +scrape_configs: +- job_name: foo + file_sd_configs: + - files: [testdata/prometheus.yml] +` + if err := cfg.parse([]byte(data), "sss"); err != nil { + t.Fatalf("cannot parse data: %s", err) + } + sws, err = cfg.getFileSDScrapeWork(swsNew) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if len(sws) != 0 { + t.Fatalf("unexpected non-empty sws:\n%#v", sws) + } + + // Empty target in static config + data = ` +scrape_configs: +- job_name: foo + file_sd_configs: + - files: [testdata/empty_target_file_sd.yml] +` + if err := cfg.parse([]byte(data), "sss"); err != nil { + t.Fatalf("cannot parse data: %s", err) + } + sws, err = cfg.getFileSDScrapeWork(swsNew) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if len(sws) != 0 { + t.Fatalf("unexpected non-empty sws:\n%#v", sws) + } +} + +func getFileSDScrapeWork(data []byte, path string) ([]ScrapeWork, error) { + var cfg Config + if err := cfg.parse(data, path); err != nil { + return nil, fmt.Errorf("cannot parse data: %s", err) + } + return cfg.getFileSDScrapeWork(nil) +} + +func getStaticScrapeWork(data []byte, path string) ([]ScrapeWork, error) { + var cfg Config + if err := cfg.parse(data, path); err != nil { + return nil, fmt.Errorf("cannot parse data: %s", err) + } + return cfg.getStaticScrapeWork() +} + +func TestGetStaticScrapeWorkFailure(t *testing.T) { + f := func(data string) { + t.Helper() + sws, err := getStaticScrapeWork([]byte(data), "non-existing-file") + if err == nil { + t.Fatalf("expecting non-nil error") + } + if sws != nil { + t.Fatalf("expecting nil sws") + } + } + + // incorrect yaml + f(`foo bar baz`) + + // Missing job_name + f(` +scrape_configs: +- static_configs: + - targets: ["foo"] +`) + + // Invalid scheme + f(` +scrape_configs: +- job_name: x + scheme: asdf + static_configs: + - targets: ["foo"] +`) + + // Empty target + f(` +scrape_configs: +- job_name: x + static_configs: + - targets: ["foo", ""] +`) + + // Invalid url + f(` +scrape_configs: +- job_name: x + static_configs: + - targets: ["a b"] +`) + + // Missing username in `basic_auth` + f(` +scrape_configs: +- job_name: x + basic_auth: + password: sss + static_configs: + - targets: ["a"] +`) + + // Both password and password_file set in `basic_auth` + f(` +scrape_configs: +- job_name: x + basic_auth: + username: foobar + password: sss + password_file: sdfdf + static_configs: + - targets: ["a"] +`) + + // Invalid password_file set in `basic_auth` + f(` +scrape_configs: +- job_name: x + basic_auth: + username: foobar + password_file: /non_existing_file.pass + static_configs: + - targets: ["a"] +`) + + // Both `bearer_token` and `bearer_token_file` are set + f(` +scrape_configs: +- job_name: x + bearer_token: foo + bearer_token_file: bar + static_configs: + - targets: ["a"] +`) + + // Both `basic_auth` and `bearer_token` are set + f(` +scrape_configs: +- job_name: x + bearer_token: foo + basic_auth: + username: foo + password: bar + static_configs: + - targets: ["a"] +`) + + // Invalid `bearer_token_file` + f(` +scrape_configs: +- job_name: x + bearer_token_file: non_existing_file.bearer + static_configs: + - targets: ["a"] +`) + + // Clash of external_label with job or instance + f(` +global: + external_labels: + job: foobar +scrape_configs: +- job_name: aaa + static_configs: + - targets: ["a"] +`) + + // Clash of external_label with static_configs label + f(` +global: + external_labels: + xxx: foobar +scrape_configs: +- job_name: aaa + static_configs: + - targets: ["a"] + labels: + xxx: yyy +`) + + // Clash of param with external_labels + f(` +global: + external_labels: + __param_xxx: foobar +scrape_configs: +- job_name: aaa + params: + xxx: [abcd] + static_configs: + - targets: ["a"] +`) + + // non-existing ca_file + f(` +scrape_configs: +- job_name: aa + tls_config: + ca_file: non/extising/file + static_configs: + - targets: ["s"] +`) + + // invalid ca_file + f(` +scrape_configs: +- job_name: aa + tls_config: + ca_file: testdata/prometheus.yml + static_configs: + - targets: ["s"] +`) + + // non-existing cert_file + f(` +scrape_configs: +- job_name: aa + tls_config: + cert_file: non/extising/file + static_configs: + - targets: ["s"] +`) + + // non-existing key_file + f(` +scrape_configs: +- job_name: aa + tls_config: + key_file: non/extising/file + static_configs: + - targets: ["s"] +`) + + // Invalid regex in relabel_configs + f(` +scrape_configs: +- job_name: aa + relabel_configs: + - regex: "(" + source_labels: [foo] + target_label: bar + static_configs: + - targets: ["s"] +`) + + // Missing target_label for action=replace in relabel_configs + f(` +scrape_configs: +- job_name: aa + relabel_configs: + - action: replace + source_labels: [foo] + static_configs: + - targets: ["s"] +`) + + // Missing source_labels for action=keep in relabel_configs + f(` +scrape_configs: +- job_name: aa + relabel_configs: + - action: keep + static_configs: + - targets: ["s"] +`) + + // Missing source_labels for action=drop in relabel_configs + f(` +scrape_configs: +- job_name: aa + relabel_configs: + - action: drop + static_configs: + - targets: ["s"] +`) + + // Missing source_labels for action=hashmod in relabel_configs + f(` +scrape_configs: +- job_name: aa + relabel_configs: + - action: hashmod + target_label: bar + modulus: 123 + static_configs: + - targets: ["s"] +`) + + // Missing target for action=hashmod in relabel_configs + f(` +scrape_configs: +- job_name: aa + relabel_configs: + - action: hashmod + source_labels: [foo] + modulus: 123 + static_configs: + - targets: ["s"] +`) + + // Missing modulus for action=hashmod in relabel_configs + f(` +scrape_configs: +- job_name: aa + relabel_configs: + - action: hashmod + source_labels: [foo] + target_label: bar + static_configs: + - targets: ["s"] +`) + + // Invalid action in relabel_configs + f(` +scrape_configs: +- job_name: aa + relabel_configs: + - action: foobar + static_configs: + - targets: ["s"] +`) +} + +func TestGetFileSDScrapeWorkSuccess(t *testing.T) { + f := func(data string, expectedSws []ScrapeWork) { + t.Helper() + sws, err := getFileSDScrapeWork([]byte(data), "non-existing-file") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !reflect.DeepEqual(sws, expectedSws) { + t.Fatalf("unexpected scrapeWork; got\n%v\nwant\n%v", sws, expectedSws) + } + } + f(` +scrape_configs: +- job_name: foo + static_configs: + - targets: ["xxx"] +`, nil) + f(` +scrape_configs: +- job_name: foo + metrics_path: /abc/de + file_sd_configs: + - files: ["testdata/file_sd.json", "testdata/file_sd*.yml"] +`, []ScrapeWork{ + { + ScrapeURL: "http://host1:80/abc/de", + ScrapeInterval: defaultScrapeInterval, + ScrapeTimeout: defaultScrapeTimeout, + HonorLabels: false, + HonorTimestamps: false, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "host1:80", + }, + { + Name: "__meta_filepath", + Value: "testdata/file_sd.json", + }, + { + Name: "__metrics_path__", + Value: "/abc/de", + }, + { + Name: "__scheme__", + Value: "http", + }, + { + Name: "job", + Value: "foo", + }, + { + Name: "qwe", + Value: "rty", + }, + }, + }, + { + ScrapeURL: "http://host2:80/abc/de", + ScrapeInterval: defaultScrapeInterval, + ScrapeTimeout: defaultScrapeTimeout, + HonorLabels: false, + HonorTimestamps: false, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "host2:80", + }, + { + Name: "__meta_filepath", + Value: "testdata/file_sd.json", + }, + { + Name: "__metrics_path__", + Value: "/abc/de", + }, + { + Name: "__scheme__", + Value: "http", + }, + { + Name: "job", + Value: "foo", + }, + { + Name: "qwe", + Value: "rty", + }, + }, + }, + { + ScrapeURL: "http://localhost:9090/abc/de", + ScrapeInterval: defaultScrapeInterval, + ScrapeTimeout: defaultScrapeTimeout, + HonorLabels: false, + HonorTimestamps: false, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "localhost:9090", + }, + { + Name: "__meta_filepath", + Value: "testdata/file_sd_1.yml", + }, + { + Name: "__metrics_path__", + Value: "/abc/de", + }, + { + Name: "__scheme__", + Value: "http", + }, + { + Name: "job", + Value: "foo", + }, + { + Name: "yml", + Value: "test", + }, + }, + }, + }) +} + +func TestGetStaticScrapeWorkSuccess(t *testing.T) { + f := func(data string, expectedSws []ScrapeWork) { + t.Helper() + sws, err := getStaticScrapeWork([]byte(data), "non-exsiting-file") + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + if !reflect.DeepEqual(sws, expectedSws) { + t.Fatalf("unexpected scrapeWork; got\n%v\nwant\n%v", sws, expectedSws) + } + } + f(``, nil) + f(` +scrape_configs: +- job_name: foo + static_configs: + - targets: ["foo.bar:1234"] +`, []ScrapeWork{ + { + ScrapeURL: "http://foo.bar:1234/metrics", + ScrapeInterval: defaultScrapeInterval, + ScrapeTimeout: defaultScrapeTimeout, + HonorLabels: false, + HonorTimestamps: false, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "foo.bar:1234", + }, + { + Name: "__metrics_path__", + Value: "/metrics", + }, + { + Name: "__scheme__", + Value: "http", + }, + { + Name: "job", + Value: "foo", + }, + }, + }, + }) + f(` +global: + external_labels: + datacenter: foobar + jobs: xxx +scrape_configs: +- job_name: foo + static_configs: + - targets: ["foo.bar:1234"] +`, []ScrapeWork{ + { + ScrapeURL: "http://foo.bar:1234/metrics", + ScrapeInterval: defaultScrapeInterval, + ScrapeTimeout: defaultScrapeTimeout, + HonorLabels: false, + HonorTimestamps: false, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "foo.bar:1234", + }, + { + Name: "__metrics_path__", + Value: "/metrics", + }, + { + Name: "__scheme__", + Value: "http", + }, + { + Name: "datacenter", + Value: "foobar", + }, + { + Name: "job", + Value: "foo", + }, + { + Name: "jobs", + Value: "xxx", + }, + }, + }, + }) + f(` +global: + scrape_interval: 8s + scrape_timeout: 34s +scrape_configs: +- job_name: foo + scrape_interval: 543s + scrape_timeout: 12s + metrics_path: /foo/bar + scheme: https + honor_labels: true + honor_timestamps: true + params: + p: ["x&y", "="] + xaa: + bearer_token: xyz + static_configs: + - targets: ["foo.bar", "aaa"] + labels: + x: y +- job_name: qwer + basic_auth: + username: user + password: pass + tls_config: + server_name: foobar + insecure_skip_verify: true + static_configs: + - targets: [1.2.3.4] +`, []ScrapeWork{ + { + ScrapeURL: "https://foo.bar:443/foo/bar?p=x%26y&p=%3D", + ScrapeInterval: 543 * time.Second, + ScrapeTimeout: 12 * time.Second, + HonorLabels: true, + HonorTimestamps: true, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "foo.bar:443", + }, + { + Name: "__metrics_path__", + Value: "/foo/bar", + }, + { + Name: "__param_p", + Value: "x&y", + }, + { + Name: "__scheme__", + Value: "https", + }, + { + Name: "job", + Value: "foo", + }, + { + Name: "x", + Value: "y", + }, + }, + Authorization: "Bearer xyz", + }, + { + ScrapeURL: "https://aaa:443/foo/bar?p=x%26y&p=%3D", + ScrapeInterval: 543 * time.Second, + ScrapeTimeout: 12 * time.Second, + HonorLabels: true, + HonorTimestamps: true, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "aaa:443", + }, + { + Name: "__metrics_path__", + Value: "/foo/bar", + }, + { + Name: "__param_p", + Value: "x&y", + }, + { + Name: "__scheme__", + Value: "https", + }, + { + Name: "job", + Value: "foo", + }, + { + Name: "x", + Value: "y", + }, + }, + Authorization: "Bearer xyz", + }, + { + ScrapeURL: "http://1.2.3.4:80/metrics", + ScrapeInterval: 8 * time.Second, + ScrapeTimeout: 34 * time.Second, + HonorLabels: false, + HonorTimestamps: false, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "1.2.3.4:80", + }, + { + Name: "__metrics_path__", + Value: "/metrics", + }, + { + Name: "__scheme__", + Value: "http", + }, + { + Name: "job", + Value: "qwer", + }, + }, + Authorization: "Basic dXNlcjpwYXNz", + TLSServerName: "foobar", + TLSInsecureSkipVerify: true, + }, + }) + f(` +scrape_configs: +- job_name: foo + relabel_configs: + - source_labels: [__scheme__, __address__] + separator: "://" + target_label: __tmp_url + - source_labels: [__tmp_url, __metrics_path__] + separator: "" + target_label: url + - action: labeldrop + regex: "job|__tmp_.+" + - action: drop + source_labels: [__address__] + regex: "drop-.*" + - action: keep + source_labels: [__param_x] + regex: keep_me + - action: labelkeep + regex: "__.*|url" + - action: labelmap + regex: "(url)" + replacement: "prefix:${1}" + - action: hashmod + modulus: 123 + source_labels: [__address__] + target_label: hash + - action: replace + source_labels: [__address__] + target_label: foobar + replacement: "" + params: + x: [keep_me] + static_configs: + - targets: ["foo.bar:1234", "drop-this-target"] +`, []ScrapeWork{ + { + ScrapeURL: "http://foo.bar:1234/metrics?x=keep_me", + ScrapeInterval: defaultScrapeInterval, + ScrapeTimeout: defaultScrapeTimeout, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "foo.bar:1234", + }, + { + Name: "__metrics_path__", + Value: "/metrics", + }, + { + Name: "__param_x", + Value: "keep_me", + }, + { + Name: "__scheme__", + Value: "http", + }, + { + Name: "hash", + Value: "82", + }, + { + Name: "prefix:url", + Value: "http://foo.bar:1234/metrics", + }, + }, + }, + }) + f(` +scrape_configs: +- job_name: foo + scheme: https + relabel_configs: + - action: replace + source_labels: [non-existing-label] + target_label: instance + replacement: fake.addr + - action: replace + source_labels: [__address__] + target_label: foobar + regex: "missing-regex" + replacement: aaabbb + - action: replace + source_labels: [__scheme__] + target_label: job + - action: replace + source_labels: [__scheme__] + target_label: __scheme__ + replacement: mailto + - target_label: __metrics_path__ + replacement: /abc.de + - target_label: __param_a + replacement: b + static_configs: + - targets: ["foo.bar:1234"] +`, []ScrapeWork{ + { + ScrapeURL: "mailto://foo.bar:1234/abc.de?a=b", + ScrapeInterval: defaultScrapeInterval, + ScrapeTimeout: defaultScrapeTimeout, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "foo.bar:1234", + }, + { + Name: "__metrics_path__", + Value: "/abc.de", + }, + { + Name: "__param_a", + Value: "b", + }, + { + Name: "__scheme__", + Value: "mailto", + }, + { + Name: "instance", + Value: "fake.addr", + }, + { + Name: "job", + Value: "https", + }, + }, + }, + }) + f(` +scrape_configs: +- job_name: foo + scheme: https + relabel_configs: + - action: keep + source_labels: [__address__] + regex: "foo\\.bar:.*" + - action: hashmod + source_labels: [job] + modulus: 4 + target_label: job + - action: labeldrop + regex: "non-matching-regex" + - action: labelkeep + regex: "job|__address__" + - action: labeldrop + regex: "" + static_configs: + - targets: ["foo.bar:1234", "xyz"] +`, []ScrapeWork{ + { + ScrapeURL: "http://foo.bar:1234/metrics", + ScrapeInterval: defaultScrapeInterval, + ScrapeTimeout: defaultScrapeTimeout, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "foo.bar:1234", + }, + { + Name: "job", + Value: "3", + }, + }, + }, + }) + f(` +scrape_configs: +- job_name: foo + metric_relabel_configs: + - source_labels: [foo] + target_label: abc + static_configs: + - targets: ["foo.bar:1234"] +`, []ScrapeWork{ + { + ScrapeURL: "http://foo.bar:1234/metrics", + ScrapeInterval: defaultScrapeInterval, + ScrapeTimeout: defaultScrapeTimeout, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "foo.bar:1234", + }, + { + Name: "__metrics_path__", + Value: "/metrics", + }, + { + Name: "__scheme__", + Value: "http", + }, + { + Name: "job", + Value: "foo", + }, + }, + MetricRelabelConfigs: []promrelabel.ParsedRelabelConfig{ + { + SourceLabels: []string{"foo"}, + Separator: ";", + TargetLabel: "abc", + Regex: defaultRegexForRelabelConfig, + Replacement: "$1", + Action: "replace", + }, + }, + }, + }) + f(` +scrape_configs: +- job_name: foo + basic_auth: + username: xyz + password_file: testdata/password.txt + static_configs: + - targets: ["foo.bar:1234"] +`, []ScrapeWork{ + { + ScrapeURL: "http://foo.bar:1234/metrics", + ScrapeInterval: defaultScrapeInterval, + ScrapeTimeout: defaultScrapeTimeout, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "foo.bar:1234", + }, + { + Name: "__metrics_path__", + Value: "/metrics", + }, + { + Name: "__scheme__", + Value: "http", + }, + { + Name: "job", + Value: "foo", + }, + }, + Authorization: "Basic eHl6OnNlY3JldC1wYXNz", + }, + }) + f(` +scrape_configs: +- job_name: foo + bearer_token_file: testdata/password.txt + static_configs: + - targets: ["foo.bar:1234"] +`, []ScrapeWork{ + { + ScrapeURL: "http://foo.bar:1234/metrics", + ScrapeInterval: defaultScrapeInterval, + ScrapeTimeout: defaultScrapeTimeout, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "foo.bar:1234", + }, + { + Name: "__metrics_path__", + Value: "/metrics", + }, + { + Name: "__scheme__", + Value: "http", + }, + { + Name: "job", + Value: "foo", + }, + }, + Authorization: "Bearer secret-pass", + }, + }) + snakeoilCert, err := tls.LoadX509KeyPair("testdata/ssl-cert-snakeoil.pem", "testdata/ssl-cert-snakeoil.key") + if err != nil { + t.Fatalf("cannot load snakeoil cert: %s", err) + } + f(` +scrape_configs: +- job_name: foo + tls_config: + cert_file: testdata/ssl-cert-snakeoil.pem + key_file: testdata/ssl-cert-snakeoil.key + static_configs: + - targets: ["foo.bar:1234"] +`, []ScrapeWork{ + { + ScrapeURL: "http://foo.bar:1234/metrics", + ScrapeInterval: defaultScrapeInterval, + ScrapeTimeout: defaultScrapeTimeout, + Labels: []prompbmarshal.Label{ + { + Name: "__address__", + Value: "foo.bar:1234", + }, + { + Name: "__metrics_path__", + Value: "/metrics", + }, + { + Name: "__scheme__", + Value: "http", + }, + { + Name: "job", + Value: "foo", + }, + }, + TLSCertificate: &snakeoilCert, + }, + }) + +} + +var defaultRegexForRelabelConfig = regexp.MustCompile("^(.*)$") diff --git a/lib/promscrape/scraper.go b/lib/promscrape/scraper.go new file mode 100644 index 000000000..cdfa61a02 --- /dev/null +++ b/lib/promscrape/scraper.go @@ -0,0 +1,243 @@ +package promscrape + +import ( + "flag" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/VictoriaMetrics/metrics" +) + +var ( + fileSDCheckInterval = flag.Duration("promscrape.fileSDCheckInterval", time.Minute, "Interval for checking for changes in 'file_sd_config'. "+ + "See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#file_sd_config") + promscrapeConfigFile = flag.String("promscrape.config", "", "Optional path to Prometheus config file with 'scrape_configs' section containing targets to scrape. "+ + "See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config for details") +) + +// Init initializes Prometheus scraper with config from the `-promscrape.config`. +// +// Scraped data is passed to pushData. +func Init(pushData func(wr *prompbmarshal.WriteRequest)) { + stopCh = make(chan struct{}) + scraperWG.Add(1) + go func() { + defer scraperWG.Done() + runScraper(*promscrapeConfigFile, pushData, stopCh) + }() +} + +// Stop stops Prometheus scraper. +func Stop() { + close(stopCh) + scraperWG.Wait() +} + +var ( + stopCh chan struct{} + scraperWG sync.WaitGroup +) + +func runScraper(configFile string, pushData func(wr *prompbmarshal.WriteRequest), globalStopCh <-chan struct{}) { + if configFile == "" { + // Nothing to scrape. + return + } + sighupCh := make(chan os.Signal, 1) + signal.Notify(sighupCh, syscall.SIGHUP) + + logger.Infof("reading Prometheus configs from %q", configFile) + cfg, err := loadConfig(configFile) + if err != nil { + logger.Fatalf("cannot read %q: %s", configFile, err) + } + swsStatic, err := cfg.getStaticScrapeWork() + if err != nil { + logger.Fatalf("cannot parse `static_configs` from %q: %s", configFile, err) + } + swsFileSD, err := cfg.getFileSDScrapeWork(nil) + if err != nil { + logger.Fatalf("cannot parse `file_sd_config` from %q: %s", configFile, err) + } + + mustStop := false + for !mustStop { + stopCh := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + runStaticScrapers(swsStatic, pushData, stopCh) + }() + wg.Add(1) + go func() { + defer wg.Done() + runFileSDScrapers(swsFileSD, cfg, pushData, stopCh) + }() + + waitForChans: + select { + case <-sighupCh: + logger.Infof("SIGHUP received; reloading Prometheus configs from %q", configFile) + cfgNew, err := loadConfig(configFile) + if err != nil { + logger.Errorf("cannot read %q: %s; continuing with the previous config", configFile, err) + goto waitForChans + } + swsStaticNew, err := cfg.getStaticScrapeWork() + if err != nil { + logger.Errorf("cannot parse `static_configs` from %q: %s; continuing with the previous config", configFile, err) + goto waitForChans + } + swsFileSDNew, err := cfg.getFileSDScrapeWork(swsFileSD) + if err != nil { + logger.Errorf("cannot parse `file_sd_config` from %q: %s; continuing with the previous config", configFile, err) + } + cfg = cfgNew + swsStatic = swsStaticNew + swsFileSD = swsFileSDNew + case <-globalStopCh: + mustStop = true + } + + logger.Infof("stopping Prometheus scrapers") + startTime := time.Now() + close(stopCh) + wg.Wait() + logger.Infof("stopped Prometheus scrapers in %.3f seconds", time.Since(startTime).Seconds()) + configReloads.Inc() + } +} + +var configReloads = metrics.NewCounter(`vm_promscrape_config_reloads_total`) + +func runStaticScrapers(sws []ScrapeWork, pushData func(wr *prompbmarshal.WriteRequest), stopCh <-chan struct{}) { + if len(sws) == 0 { + return + } + logger.Infof("starting %d scrapers for `static_config` targets", len(sws)) + staticTargets.Set(uint64(len(sws))) + runScrapeWorkers(sws, pushData, stopCh) + staticTargets.Set(0) + logger.Infof("stopped all the %d scrapers for `static_config` targets", len(sws)) +} + +var staticTargets = metrics.NewCounter(`vm_promscrape_targets{type="static"}`) + +func runFileSDScrapers(sws []ScrapeWork, cfg *Config, pushData func(wr *prompbmarshal.WriteRequest), stopCh <-chan struct{}) { + if cfg.fileSDConfigsCount() == 0 { + return + } + ticker := time.NewTicker(*fileSDCheckInterval) + defer ticker.Stop() + mustStop := false + for !mustStop { + localStopCh := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(1) + go func(sws []ScrapeWork) { + defer wg.Done() + logger.Infof("starting %d scrapers for `file_sd_config` targets", len(sws)) + fileSDTargets.Set(uint64(len(sws))) + runScrapeWorkers(sws, pushData, localStopCh) + fileSDTargets.Set(0) + logger.Infof("stopped all the %d scrapers for `file_sd_config` targets", len(sws)) + }(sws) + waitForChans: + select { + case <-ticker.C: + swsNew, err := cfg.getFileSDScrapeWork(sws) + if err != nil { + logger.Panicf("BUG: error when re-reading `file_sd_config` targets the second time: %s", err) + } + if equalStaticConfigForScrapeWorks(swsNew, sws) { + // Nothing changed, continue waiting for updated scrape work + goto waitForChans + } + logger.Infof("restarting scrapers for changed `file_sd_config` targets") + sws = swsNew + case <-stopCh: + mustStop = true + } + + close(localStopCh) + wg.Wait() + fileSDReloads.Inc() + } +} + +var ( + fileSDTargets = metrics.NewCounter(`vm_promscrape_targets{type="file_sd"}`) + fileSDReloads = metrics.NewCounter(`vm_promscrape_file_sd_reloads_total`) +) + +func equalStaticConfigForScrapeWorks(as, bs []ScrapeWork) bool { + if len(as) != len(bs) { + return false + } + for i := range as { + if !equalStaticConfigForScrapeWork(&as[i], &bs[i]) { + return false + } + } + return true +} + +func equalStaticConfigForScrapeWork(a, b *ScrapeWork) bool { + // `static_config` can change only ScrapeURL and Labels. So compare only them. + if a.ScrapeURL != b.ScrapeURL { + return false + } + if !equalLabels(a.Labels, b.Labels) { + return false + } + return true +} + +func equalLabels(as, bs []prompbmarshal.Label) bool { + if len(as) != len(bs) { + return false + } + for i := range as { + if !equalLabel(&as[i], &bs[i]) { + return false + } + } + return true +} + +func equalLabel(a, b *prompbmarshal.Label) bool { + if a.Name != b.Name { + return false + } + if a.Value != b.Value { + return false + } + return true +} + +// runScrapeWorkers runs sws. +// +// This function returns after closing stopCh. +func runScrapeWorkers(sws []ScrapeWork, pushData func(wr *prompbmarshal.WriteRequest), stopCh <-chan struct{}) { + var wg sync.WaitGroup + for i := range sws { + cfg := &sws[i] + c := newClient(cfg) + var sw scrapeWork + sw.Config = *cfg + sw.ReadData = c.ReadData + sw.PushData = pushData + wg.Add(1) + go func() { + defer wg.Done() + sw.run(stopCh) + }() + } + wg.Wait() +} diff --git a/lib/promscrape/scrapework.go b/lib/promscrape/scrapework.go new file mode 100644 index 000000000..eca22a702 --- /dev/null +++ b/lib/promscrape/scrapework.go @@ -0,0 +1,261 @@ +package promscrape + +import ( + "crypto/tls" + "crypto/x509" + "math/rand" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/logger" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel" + parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus" + "github.com/VictoriaMetrics/metrics" +) + +// ScrapeWork represents a unit of work for scraping Prometheus metrics. +type ScrapeWork struct { + // Full URL (including query args) for the scrape. + ScrapeURL string + + // Interval for scraping the ScrapeURL. + ScrapeInterval time.Duration + + // Timeout for scraping the ScrapeURL. + ScrapeTimeout time.Duration + + // How to deal with conflicting labels. + // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config + HonorLabels bool + + // How to deal with scraped timestamps. + // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config + HonorTimestamps bool + + // Labels to add to the scraped metrics. + // + // The list contains at least the following labels according to https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + // + // * job + // * __address__ + // * __scheme__ + // * __metrics_path__ + // * __param_ + // * __meta_* + // * user-defined labels set via `relabel_configs` section in `scrape_config` + // + // See also https://prometheus.io/docs/concepts/jobs_instances/ + Labels []prompbmarshal.Label + + // Optional `Authorization` header. + // + // It may contain `Basic ....` or `Bearer ....` string. + Authorization string + + // Optional TLS config + TLSRootCA *x509.CertPool + TLSCertificate *tls.Certificate + TLSServerName string + TLSInsecureSkipVerify bool + + // Optional `metric_relabel_configs`. + MetricRelabelConfigs []promrelabel.ParsedRelabelConfig + + // The maximum number of metrics to scrape after relabeling. + ScrapeLimit int +} + +type scrapeWork struct { + // Config for the scrape. + Config ScrapeWork + + // ReadData is called for reading the data. + ReadData func(dst []byte) ([]byte, error) + + // PushData is called for pushing collected data. + PushData func(wr *prompbmarshal.WriteRequest) + + bodyBuf []byte + rows parser.Rows + tmpRow parser.Row + + writeRequest prompbmarshal.WriteRequest + labels []prompbmarshal.Label + samples []prompbmarshal.Sample +} + +func (sw *scrapeWork) run(stopCh <-chan struct{}) { + // Randomize start time for the first scrape in order to spread load + // when scraping many targets. + randSleep := time.Duration(float64(sw.Config.ScrapeInterval) * rand.Float64()) + timer := time.NewTimer(randSleep) + var ticker *time.Ticker + select { + case <-stopCh: + timer.Stop() + return + case t := <-timer.C: + ticker = time.NewTicker(sw.Config.ScrapeInterval) + timestamp := t.UnixNano() / 1e6 + sw.scrapeAndLogError(timestamp) + } + defer ticker.Stop() + for { + startTime := time.Now() + select { + case <-stopCh: + return + case t := <-ticker.C: + // Adjust t if it is from the past (i.e. stale tick) + // This can be the case if the previous scrape took longer than the scrape interval. + if t.Sub(startTime) < 0 { + t = startTime + } + timestamp := t.UnixNano() / 1e6 + sw.scrapeAndLogError(timestamp) + } + } +} + +func (sw *scrapeWork) logError(s string) { + logger.ErrorfSkipframes(1, "error when scraping %q: %s", sw.Config.ScrapeURL, s) +} + +func (sw *scrapeWork) scrapeAndLogError(timestamp int64) { + if err := sw.scrapeInternal(timestamp); err != nil { + logger.Errorf("error when scraping %q: %s", sw.Config.ScrapeURL, err) + } +} + +var ( + scrapeDuration = metrics.NewHistogram("vm_promscrape_scrape_duration_seconds") + scrapeResponseSize = metrics.NewHistogram("vm_promscrape_scrape_response_size_bytes") + scrapedSamples = metrics.NewHistogram("vm_promscrape_scraped_samples") + scrapesSkippedByScrapeLimit = metrics.NewCounter("vm_promscrape_scrapes_skipped_by_scrape_limit_total") + scrapesFailed = metrics.NewCounter("vm_promscrape_scrapes_failed_total") + pushDataDuration = metrics.NewHistogram("vm_promscrape_push_data_duration_seconds") +) + +func (sw *scrapeWork) scrapeInternal(timestamp int64) error { + var err error + sw.bodyBuf, err = sw.ReadData(sw.bodyBuf[:0]) + endTimestamp := time.Now().UnixNano() / 1e6 + duration := float64(endTimestamp-timestamp) / 1e3 + scrapeDuration.Update(duration) + scrapeResponseSize.Update(float64(len(sw.bodyBuf))) + up := 1 + if err != nil { + up = 0 + scrapesFailed.Inc() + } else { + bodyString := bytesutil.ToUnsafeString(sw.bodyBuf) + sw.rows.UnmarshalWithErrLogger(bodyString, sw.logError) + } + srcRows := sw.rows.Rows + samplesScraped := len(srcRows) + scrapedSamples.Update(float64(samplesScraped)) + for i := range srcRows { + sw.addRowToTimeseries(&srcRows[i], timestamp) + } + sw.rows.Reset() + if sw.Config.ScrapeLimit > 0 && len(sw.writeRequest.Timeseries) > sw.Config.ScrapeLimit { + prompbmarshal.ResetWriteRequest(&sw.writeRequest) + up = 0 + scrapesSkippedByScrapeLimit.Inc() + } + samplesPostRelabeling := len(sw.writeRequest.Timeseries) + sw.addAutoTimeseries("up", float64(up), timestamp) + sw.addAutoTimeseries("scrape_duration_seconds", duration, timestamp) + sw.addAutoTimeseries("scrape_samples_scraped", float64(samplesScraped), timestamp) + sw.addAutoTimeseries("scrape_samples_post_metric_relabeling", float64(samplesPostRelabeling), timestamp) + startTime := time.Now() + sw.PushData(&sw.writeRequest) + pushDataDuration.UpdateDuration(startTime) + prompbmarshal.ResetWriteRequest(&sw.writeRequest) + sw.labels = sw.labels[:0] + sw.samples = sw.samples[:0] + tsmGlobal.Update(&sw.Config, up == 1, timestamp, int64(duration*1000), err) + return err +} + +// addAutoTimeseries adds automatically generated time series with the given name, value and timestamp. +// +// See https://prometheus.io/docs/concepts/jobs_instances/#automatically-generated-labels-and-time-series +func (sw *scrapeWork) addAutoTimeseries(name string, value float64, timestamp int64) { + sw.tmpRow.Metric = name + sw.tmpRow.Tags = nil + sw.tmpRow.Value = value + sw.tmpRow.Timestamp = timestamp + sw.addRowToTimeseries(&sw.tmpRow, timestamp) +} + +func (sw *scrapeWork) addRowToTimeseries(r *parser.Row, timestamp int64) { + labelsLen := len(sw.labels) + sw.labels = appendLabels(sw.labels, r.Metric, r.Tags, sw.Config.Labels, sw.Config.HonorLabels) + sw.labels = promrelabel.ApplyRelabelConfigs(sw.labels, labelsLen, sw.Config.MetricRelabelConfigs, true) + if len(sw.labels) == labelsLen { + // Skip row without labels. + return + } + labels := sw.labels[labelsLen:] + sw.samples = append(sw.samples, prompbmarshal.Sample{}) + sample := &sw.samples[len(sw.samples)-1] + sample.Value = r.Value + sample.Timestamp = r.Timestamp + if !sw.Config.HonorTimestamps || sample.Timestamp == 0 { + sample.Timestamp = timestamp + } + wr := &sw.writeRequest + wr.Timeseries = append(wr.Timeseries, prompbmarshal.TimeSeries{}) + ts := &wr.Timeseries[len(wr.Timeseries)-1] + ts.Labels = labels + ts.Samples = sw.samples[len(sw.samples)-1:] +} + +func appendLabels(dst []prompbmarshal.Label, metric string, src []parser.Tag, extraLabels []prompbmarshal.Label, honorLabels bool) []prompbmarshal.Label { + dstLen := len(dst) + dst = append(dst, prompbmarshal.Label{ + Name: "__name__", + Value: metric, + }) + for i := range src { + tag := &src[i] + dst = append(dst, prompbmarshal.Label{ + Name: tag.Key, + Value: tag.Value, + }) + } + dst = append(dst, extraLabels...) + labels := dst[dstLen:] + if len(labels) <= 1 { + // Fast path - only a single label. + return dst + } + + // de-duplicate labels + dstLabels := labels[:0] + for i := range labels { + label := &labels[i] + prevLabel := promrelabel.GetLabelByName(dstLabels, label.Name) + if prevLabel == nil { + dstLabels = append(dstLabels, *label) + continue + } + if honorLabels { + // Skip the extra label with the same name. + continue + } + // Rename the prevLabel to "exported_" + label.Name. + // See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config + exportedName := "exported_" + label.Name + if promrelabel.GetLabelByName(dstLabels, exportedName) != nil { + // Override duplicate with the current label. + *prevLabel = *label + continue + } + prevLabel.Name = exportedName + dstLabels = append(dstLabels, *label) + } + return dst[:dstLen+len(dstLabels)] +} diff --git a/lib/promscrape/scrapework_test.go b/lib/promscrape/scrapework_test.go new file mode 100644 index 000000000..3788ea2e7 --- /dev/null +++ b/lib/promscrape/scrapework_test.go @@ -0,0 +1,348 @@ +package promscrape + +import ( + "fmt" + "regexp" + "strings" + "testing" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel" + parser "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/prometheus" +) + +func TestScrapeWorkScrapeInternalFailure(t *testing.T) { + dataExpected := ` + up 0 123 + scrape_samples_scraped 0 123 + scrape_duration_seconds 0 123 + scrape_samples_post_metric_relabeling 0 123 +` + timeseriesExpected := parseData(dataExpected) + + var sw scrapeWork + + readDataCalls := 0 + sw.ReadData = func(dst []byte) ([]byte, error) { + readDataCalls++ + return dst, fmt.Errorf("error when reading data") + } + + pushDataCalls := 0 + var pushDataErr error + sw.PushData = func(wr *prompbmarshal.WriteRequest) { + if err := expectEqualTimeseries(wr.Timeseries, timeseriesExpected); err != nil { + pushDataErr = fmt.Errorf("unexpected data pushed: %s\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected) + } + pushDataCalls++ + } + + timestamp := int64(123) + if err := sw.scrapeInternal(timestamp); err == nil { + t.Fatalf("expecting non-nil error") + } + if pushDataErr != nil { + t.Fatalf("unexpected error: %s", pushDataErr) + } + if readDataCalls != 1 { + t.Fatalf("unexpected number of readData calls; got %d; want %d", readDataCalls, 1) + } + if pushDataCalls != 1 { + t.Fatalf("unexpected number of pushData calls; got %d; want %d", pushDataCalls, 1) + } +} + +func TestScrapeWorkScrapeInternalSuccess(t *testing.T) { + f := func(data string, cfg *ScrapeWork, dataExpected string) { + t.Helper() + + timeseriesExpected := parseData(dataExpected) + + var sw scrapeWork + sw.Config = *cfg + + readDataCalls := 0 + sw.ReadData = func(dst []byte) ([]byte, error) { + readDataCalls++ + dst = append(dst, data...) + return dst, nil + } + + pushDataCalls := 0 + var pushDataErr error + sw.PushData = func(wr *prompbmarshal.WriteRequest) { + if err := expectEqualTimeseries(wr.Timeseries, timeseriesExpected); err != nil { + pushDataErr = fmt.Errorf("unexpected data pushed: %s\ngot\n%#v\nwant\n%#v", err, wr.Timeseries, timeseriesExpected) + } + pushDataCalls++ + } + + timestamp := int64(123) + if err := sw.scrapeInternal(timestamp); err != nil { + t.Fatalf("unexpected error: %s", err) + } + if pushDataErr != nil { + t.Fatalf("unexpected error: %s", pushDataErr) + } + if readDataCalls != 1 { + t.Fatalf("unexpected number of readData calls; got %d; want %d", readDataCalls, 1) + } + if pushDataCalls != 1 { + t.Fatalf("unexpected number of pushData calls; got %d; want %d", pushDataCalls, 1) + } + } + + f(``, &ScrapeWork{}, ` + up 1 123 + scrape_samples_scraped 0 123 + scrape_duration_seconds 0 123 + scrape_samples_post_metric_relabeling 0 123 + `) + f(` + foo{bar="baz"} 34.45 3 + abc -2 + `, &ScrapeWork{}, ` + foo{bar="baz"} 34.45 123 + abc -2 123 + up 1 123 + scrape_samples_scraped 2 123 + scrape_duration_seconds 0 123 + scrape_samples_post_metric_relabeling 2 123 + `) + f(` + foo{bar="baz"} 34.45 3 + abc -2 + `, &ScrapeWork{ + HonorTimestamps: true, + Labels: []prompbmarshal.Label{ + { + Name: "foo", + Value: "x", + }, + }, + }, ` + foo{bar="baz",foo="x"} 34.45 3 + abc{foo="x"} -2 123 + up{foo="x"} 1 123 + scrape_samples_scraped{foo="x"} 2 123 + scrape_duration_seconds{foo="x"} 0 123 + scrape_samples_post_metric_relabeling{foo="x"} 2 123 + `) + f(` + foo{job="orig",bar="baz"} 34.45 + bar{y="2",job="aa",a="b",job="bb",x="1"} -3e4 2345 + `, &ScrapeWork{ + HonorLabels: false, + Labels: []prompbmarshal.Label{ + { + Name: "job", + Value: "override", + }, + }, + }, ` + foo{exported_job="orig",job="override",bar="baz"} 34.45 123 + bar{exported_job="aa",job="override",x="1",a="b",y="2"} -3e4 123 + up{job="override"} 1 123 + scrape_samples_scraped{job="override"} 2 123 + scrape_duration_seconds{job="override"} 0 123 + scrape_samples_post_metric_relabeling{job="override"} 2 123 + `) + f(` + foo{job="orig",bar="baz"} 34.45 + bar{job="aa",a="b",job="bb"} -3e4 2345 + `, &ScrapeWork{ + HonorLabels: true, + Labels: []prompbmarshal.Label{ + { + Name: "job", + Value: "override", + }, + }, + }, ` + foo{job="orig",bar="baz"} 34.45 123 + bar{job="aa",a="b"} -3e4 123 + up{job="override"} 1 123 + scrape_samples_scraped{job="override"} 2 123 + scrape_duration_seconds{job="override"} 0 123 + scrape_samples_post_metric_relabeling{job="override"} 2 123 + `) + f(` + foo{bar="baz"} 34.44 + bar{a="b",c="d"} -3e4 + `, &ScrapeWork{ + HonorLabels: true, + Labels: []prompbmarshal.Label{ + { + Name: "job", + Value: "xx", + }, + { + Name: "__address__", + Value: "foo.com", + }, + }, + MetricRelabelConfigs: []promrelabel.ParsedRelabelConfig{ + { + SourceLabels: []string{"__address__", "job"}, + Separator: "/", + TargetLabel: "instance", + Regex: defaultRegexForRelabelConfig, + Replacement: "$1", + Action: "replace", + }, + { + Action: "labeldrop", + Regex: regexp.MustCompile("^c$"), + }, + }, + }, ` + foo{bar="baz",job="xx",instance="foo.com/xx"} 34.44 123 + bar{a="b",job="xx",instance="foo.com/xx"} -3e4 123 + up{job="xx",instance="foo.com/xx"} 1 123 + scrape_samples_scraped{job="xx",instance="foo.com/xx"} 2 123 + scrape_duration_seconds{job="xx",instance="foo.com/xx"} 0 123 + scrape_samples_post_metric_relabeling{job="xx",instance="foo.com/xx"} 2 123 + `) + f(` + foo{bar="baz"} 34.44 + bar{a="b",c="d"} -3e4 + dropme{foo="bar"} 334 + dropme{xxx="yy",ss="dsf"} 843 + `, &ScrapeWork{ + HonorLabels: true, + Labels: []prompbmarshal.Label{ + { + Name: "job", + Value: "xx", + }, + { + Name: "__address__", + Value: "foo.com", + }, + }, + MetricRelabelConfigs: []promrelabel.ParsedRelabelConfig{ + { + Action: "drop", + SourceLabels: []string{"a", "c"}, + Regex: regexp.MustCompile("^bd$"), + }, + { + Action: "drop", + SourceLabels: []string{"__name__"}, + Regex: regexp.MustCompile("^(dropme|up)$"), + }, + }, + }, ` + foo{bar="baz",job="xx",instance="foo.com"} 34.44 123 + scrape_samples_scraped{job="xx",instance="foo.com"} 4 123 + scrape_duration_seconds{job="xx",instance="foo.com"} 0 123 + scrape_samples_post_metric_relabeling{job="xx",instance="foo.com"} 1 123 + `) + f(` + foo{bar="baz"} 34.44 + bar{a="b",c="d"} -3e4 + `, &ScrapeWork{ + HonorLabels: true, + ScrapeLimit: 1, + }, ` + up 0 123 + scrape_samples_scraped 2 123 + scrape_duration_seconds 0 123 + scrape_samples_post_metric_relabeling 0 123 + `) +} + +func parseData(data string) []prompbmarshal.TimeSeries { + var rows parser.Rows + errLogger := func(s string) { + panic(fmt.Errorf("unexpected error when unmarshaling Prometheus rows: %s", s)) + } + rows.UnmarshalWithErrLogger(data, errLogger) + var tss []prompbmarshal.TimeSeries + for _, r := range rows.Rows { + labels := []prompbmarshal.Label{ + { + Name: "__name__", + Value: r.Metric, + }, + } + for _, tag := range r.Tags { + labels = append(labels, prompbmarshal.Label{ + Name: tag.Key, + Value: tag.Value, + }) + } + var ts prompbmarshal.TimeSeries + ts.Labels = labels + ts.Samples = []prompbmarshal.Sample{ + { + Value: r.Value, + Timestamp: r.Timestamp, + }, + } + tss = append(tss, ts) + } + return tss +} + +func expectEqualTimeseries(tss, tssExpected []prompbmarshal.TimeSeries) error { + m, err := timeseriesToMap(tss) + if err != nil { + return fmt.Errorf("invalid generated timeseries: %s", err) + } + mExpected, err := timeseriesToMap(tssExpected) + if err != nil { + return fmt.Errorf("invalid expected timeseries: %s", err) + } + if len(m) != len(mExpected) { + return fmt.Errorf("unexpected time series len; got %d; want %d", len(m), len(mExpected)) + } + for k, tsExpected := range mExpected { + ts := m[k] + if ts != tsExpected { + return fmt.Errorf("unexpected timeseries %q; got\n%s\nwant\n%s", k, ts, tsExpected) + } + } + return nil +} + +func timeseriesToMap(tss []prompbmarshal.TimeSeries) (map[string]string, error) { + m := make(map[string]string, len(tss)) + for i := range tss { + ts := &tss[i] + if len(ts.Labels) == 0 { + return nil, fmt.Errorf("unexpected empty labels for timeseries #%d; timeseries: %#v", i, ts) + } + if len(ts.Samples) != 1 { + return nil, fmt.Errorf("unexpected number of samples for timeseries #%d; got %d; want %d", i, len(ts.Samples), 1) + } + if ts.Labels[0].Name != "__name__" { + return nil, fmt.Errorf("unexpected first name for timeseries #%d; got %q; want %q", i, ts.Labels[0].Name, "__name__") + } + if ts.Labels[0].Value == "scrape_duration_seconds" { + // Reset scrape_duration_seconds value to 0, since it is non-deterministic + ts.Samples[0].Value = 0 + } + m[ts.Labels[0].Value] = timeseriesToString(ts) + } + return m, nil +} + +func timeseriesToString(ts *prompbmarshal.TimeSeries) string { + promrelabel.SortLabels(ts.Labels) + var sb strings.Builder + fmt.Fprintf(&sb, "{") + for i, label := range ts.Labels { + fmt.Fprintf(&sb, "%s=%q", label.Name, label.Value) + if i+1 < len(ts.Labels) { + fmt.Fprintf(&sb, ",") + } + } + fmt.Fprintf(&sb, "} ") + if len(ts.Samples) != 1 { + panic(fmt.Errorf("expecting a single sample; got %d samples", len(ts.Samples))) + } + s := ts.Samples[0] + fmt.Fprintf(&sb, "%g %d", s.Value, s.Timestamp) + return sb.String() +} diff --git a/lib/promscrape/scrapework_timing_test.go b/lib/promscrape/scrapework_timing_test.go new file mode 100644 index 000000000..f8d76eab6 --- /dev/null +++ b/lib/promscrape/scrapework_timing_test.go @@ -0,0 +1,50 @@ +package promscrape + +import ( + "fmt" + "testing" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompbmarshal" +) + +func BenchmarkScrapeWorkScrapeInternal(b *testing.B) { + data := ` +vm_tcplistener_accepts_total{name="http", addr=":80"} 1443 +vm_tcplistener_accepts_total{name="https", addr=":443"} 12801 +vm_tcplistener_conns{name="http", addr=":80"} 0 +vm_tcplistener_conns{name="https", addr=":443"} 2 +vm_tcplistener_errors_total{name="http", addr=":80", type="accept"} 0 +vm_tcplistener_errors_total{name="http", addr=":80", type="close"} 0 +vm_tcplistener_errors_total{name="http", addr=":80", type="read"} 97 +vm_tcplistener_errors_total{name="http", addr=":80", type="write"} 2 +vm_tcplistener_errors_total{name="https", addr=":443", type="accept"} 0 +vm_tcplistener_errors_total{name="https", addr=":443", type="close"} 0 +vm_tcplistener_errors_total{name="https", addr=":443", type="read"} 243 +vm_tcplistener_errors_total{name="https", addr=":443", type="write"} 285 +vm_tcplistener_read_bytes_total{name="http", addr=":80"} 879339 +vm_tcplistener_read_bytes_total{name="https", addr=":443"} 19453340 +vm_tcplistener_read_calls_total{name="http", addr=":80"} 7780 +vm_tcplistener_read_calls_total{name="https", addr=":443"} 70323 +vm_tcplistener_read_timeouts_total{name="http", addr=":80"} 673 +vm_tcplistener_read_timeouts_total{name="https", addr=":443"} 12353 +vm_tcplistener_write_calls_total{name="http", addr=":80"} 3996 +vm_tcplistener_write_calls_total{name="https", addr=":443"} 132356 +` + readDataFunc := func(dst []byte) ([]byte, error) { + return append(dst, data...), nil + } + b.ReportAllocs() + b.SetBytes(int64(len(data))) + b.RunParallel(func(pb *testing.PB) { + var sw scrapeWork + sw.ReadData = readDataFunc + sw.PushData = func(wr *prompbmarshal.WriteRequest) {} + timestamp := int64(0) + for pb.Next() { + if err := sw.scrapeInternal(timestamp); err != nil { + panic(fmt.Errorf("unexpected error: %s", err)) + } + timestamp++ + } + }) +} diff --git a/lib/promscrape/statconn.go b/lib/promscrape/statconn.go new file mode 100644 index 000000000..7909899c0 --- /dev/null +++ b/lib/promscrape/statconn.go @@ -0,0 +1,71 @@ +package promscrape + +import ( + "net" + "sync/atomic" + + "github.com/VictoriaMetrics/metrics" + "github.com/valyala/fasthttp" +) + +func statDial(addr string) (net.Conn, error) { + conn, err := fasthttp.Dial(addr) + dialsTotal.Inc() + if err != nil { + dialErrors.Inc() + return nil, err + } + conns.Inc() + sc := &statConn{ + Conn: conn, + } + return sc, nil +} + +var ( + dialsTotal = metrics.NewCounter(`vm_promscrape_dials_total`) + dialErrors = metrics.NewCounter(`vm_promscrape_dial_errors_total`) + conns = metrics.NewCounter(`vm_promscrape_conns`) +) + +type statConn struct { + closed uint64 + net.Conn +} + +func (sc *statConn) Read(p []byte) (int, error) { + n, err := sc.Conn.Read(p) + connReadsTotal.Inc() + if err != nil { + connReadErrors.Inc() + } + connBytesRead.Add(n) + return n, err +} + +func (sc *statConn) Write(p []byte) (int, error) { + n, err := sc.Conn.Write(p) + connWritesTotal.Inc() + if err != nil { + connWriteErrors.Inc() + } + connBytesWritten.Add(n) + return n, err +} + +func (sc *statConn) Close() error { + err := sc.Conn.Close() + if atomic.AddUint64(&sc.closed, 1) == 1 { + conns.Dec() + } + return err +} + +var ( + connReadsTotal = metrics.NewCounter(`vm_promscrape_conn_reads_total`) + connWritesTotal = metrics.NewCounter(`vm_promscrape_conn_writes_total`) + connReadErrors = metrics.NewCounter(`vm_promscrape_conn_read_errors_total`) + connWriteErrors = metrics.NewCounter(`vm_promscrape_conn_write_errors_total`) + connBytesRead = metrics.NewCounter(`vm_promscrape_conn_bytes_read_total`) + connBytesWritten = metrics.NewCounter(`vm_promscrape_conn_bytes_written_total`) +) diff --git a/lib/promscrape/targetstatus.go b/lib/promscrape/targetstatus.go new file mode 100644 index 000000000..74e382633 --- /dev/null +++ b/lib/promscrape/targetstatus.go @@ -0,0 +1,127 @@ +package promscrape + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/promrelabel" +) + +var tsmGlobal = newTargetStatusMap() + +// WriteHumanReadableTargetsStatus writes human-readable status for all the scrape targets to w. +func WriteHumanReadableTargetsStatus(w io.Writer) { + tsmGlobal.WriteHumanReadable(w) +} + +type targetStatusMap struct { + mu sync.Mutex + m map[string]targetStatus +} + +func newTargetStatusMap() *targetStatusMap { + return &targetStatusMap{ + m: make(map[string]targetStatus), + } +} + +func (tsm *targetStatusMap) Reset() { + tsm.mu.Lock() + tsm.m = make(map[string]targetStatus) + tsm.mu.Unlock() +} + +func (tsm *targetStatusMap) Update(sw *ScrapeWork, up bool, scrapeTime, scrapeDuration int64, err error) { + tsm.mu.Lock() + tsm.m[sw.ScrapeURL] = targetStatus{ + sw: sw, + up: up, + scrapeTime: scrapeTime, + scrapeDuration: scrapeDuration, + err: err, + } + tsm.mu.Unlock() +} + +func (tsm *targetStatusMap) WriteHumanReadable(w io.Writer) { + byJob := make(map[string][]targetStatus) + tsm.mu.Lock() + for k, st := range tsm.m { + if st.getDurationFromLastScrape() > 10*st.sw.ScrapeInterval { + // Remove obsolete targets + delete(tsm.m, k) + continue + } + job := "" + label := promrelabel.GetLabelByName(st.sw.Labels, "job") + if label != nil { + job = label.Value + } + byJob[job] = append(byJob[job], st) + } + tsm.mu.Unlock() + + var jss []jobStatus + for job, statuses := range byJob { + jss = append(jss, jobStatus{ + job: job, + statuses: statuses, + }) + } + sort.Slice(jss, func(i, j int) bool { + return jss[i].job < jss[j].job + }) + + for _, js := range jss { + sts := js.statuses + sort.Slice(sts, func(i, j int) bool { + return sts[i].sw.ScrapeURL < sts[j].sw.ScrapeURL + }) + ups := 0 + for _, st := range sts { + if st.up { + ups++ + } + } + fmt.Fprintf(w, "job=%q (%d/%d up)\n", js.job, ups, len(sts)) + for _, st := range sts { + state := "up" + if !st.up { + state = "down" + } + var labels []string + for _, label := range promrelabel.FinalizeLabels(nil, st.sw.Labels) { + labels = append(labels, fmt.Sprintf("%s=%q", label.Name, label.Value)) + } + labelsStr := "{" + strings.Join(labels, ", ") + "}" + lastScrape := st.getDurationFromLastScrape() + errMsg := "" + if st.err != nil { + errMsg = st.err.Error() + } + fmt.Fprintf(w, "\tstate=%s, endpoint=%s, labels=%s, last_scrape=%.3fs ago, scrape_duration=%.3fs, error=%q\n", + state, st.sw.ScrapeURL, labelsStr, lastScrape.Seconds(), float64(st.scrapeDuration)/1000, errMsg) + } + } +} + +type jobStatus struct { + job string + statuses []targetStatus +} + +type targetStatus struct { + sw *ScrapeWork + up bool + scrapeTime int64 + scrapeDuration int64 + err error +} + +func (st *targetStatus) getDurationFromLastScrape() time.Duration { + return time.Since(time.Unix(st.scrapeTime/1000, (st.scrapeTime%1000)*1e6)) +} diff --git a/lib/promscrape/testdata/empty_target_file_sd.yml b/lib/promscrape/testdata/empty_target_file_sd.yml new file mode 100644 index 000000000..7dbbf189a --- /dev/null +++ b/lib/promscrape/testdata/empty_target_file_sd.yml @@ -0,0 +1 @@ +- targets: ["foo", ""] diff --git a/lib/promscrape/testdata/file_sd.json b/lib/promscrape/testdata/file_sd.json new file mode 100644 index 000000000..cfc35b46a --- /dev/null +++ b/lib/promscrape/testdata/file_sd.json @@ -0,0 +1,8 @@ +[ + { + "targets": ["host1", "host2"], + "labels": { + "qwe": "rty" + } + } +] diff --git a/lib/promscrape/testdata/file_sd_1.yml b/lib/promscrape/testdata/file_sd_1.yml new file mode 100644 index 000000000..a2b83e249 --- /dev/null +++ b/lib/promscrape/testdata/file_sd_1.yml @@ -0,0 +1,3 @@ +- targets: [localhost:9090] + labels: + yml: test diff --git a/lib/promscrape/testdata/password.txt b/lib/promscrape/testdata/password.txt new file mode 100644 index 000000000..1b741e65c --- /dev/null +++ b/lib/promscrape/testdata/password.txt @@ -0,0 +1 @@ +secret-pass diff --git a/lib/promscrape/testdata/prometheus.yml b/lib/promscrape/testdata/prometheus.yml new file mode 100644 index 000000000..2dc99625c --- /dev/null +++ b/lib/promscrape/testdata/prometheus.yml @@ -0,0 +1,6 @@ +scrape_configs: +- job_name: foo + file_sd_configs: + - files: ["file_sd_*.yml"] + refresh_interval: 10s + - fules: ["file_sd.json"] diff --git a/lib/promscrape/testdata/ssl-cert-snakeoil.key b/lib/promscrape/testdata/ssl-cert-snakeoil.key new file mode 100644 index 000000000..00a79a3b5 --- /dev/null +++ b/lib/promscrape/testdata/ssl-cert-snakeoil.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG +3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U +wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0 +FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf +IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg +GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF +sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2 +sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D +uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb +K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3 +YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+ +DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk +B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV +Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x +IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY +wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj +wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D +FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m +tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX +fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU +ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk +K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT +6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt +9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN +Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV +c257YgaWmjK9uB0Y2r2VxS0G +-----END PRIVATE KEY----- diff --git a/lib/promscrape/testdata/ssl-cert-snakeoil.pem b/lib/promscrape/testdata/ssl-cert-snakeoil.pem new file mode 100644 index 000000000..93e77cd95 --- /dev/null +++ b/lib/promscrape/testdata/ssl-cert-snakeoil.pem @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV +BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV +MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D +K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te ++z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij +L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1 +xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY +6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG +SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98 +L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2 +45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li +K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6 +X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI +whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd +-----END CERTIFICATE----- diff --git a/app/vminsert/common/gzip_reader.go b/lib/protoparser/common/gzip_reader.go similarity index 100% rename from app/vminsert/common/gzip_reader.go rename to lib/protoparser/common/gzip_reader.go diff --git a/app/vminsert/common/lines_reader.go b/lib/protoparser/common/lines_reader.go similarity index 100% rename from app/vminsert/common/lines_reader.go rename to lib/protoparser/common/lines_reader.go diff --git a/app/vminsert/common/lines_reader_test.go b/lib/protoparser/common/lines_reader_test.go similarity index 100% rename from app/vminsert/common/lines_reader_test.go rename to lib/protoparser/common/lines_reader_test.go diff --git a/lib/protoparser/graphite/streamparser.go b/lib/protoparser/graphite/streamparser.go new file mode 100644 index 000000000..a79d5a20e --- /dev/null +++ b/lib/protoparser/graphite/streamparser.go @@ -0,0 +1,130 @@ +package graphite + +import ( + "fmt" + "io" + "net" + "runtime" + "sync" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" + "github.com/VictoriaMetrics/metrics" +) + +// ParseStream parses Graphite lines from r and calls callback for the parsed rows. +// +// The callback can be called multiple times for streamed data from r. +// +// callback shouldn't hold rows after returning. +func ParseStream(r io.Reader, callback func(rows []Row) error) error { + ctx := getStreamContext() + defer putStreamContext(ctx) + + for ctx.Read(r) { + if err := callback(ctx.Rows.Rows); err != nil { + return err + } + } + return ctx.Error() +} + +const flushTimeout = 3 * time.Second + +func (ctx *streamContext) Read(r io.Reader) bool { + readCalls.Inc() + if ctx.err != nil { + return false + } + if c, ok := r.(net.Conn); ok { + if err := c.SetReadDeadline(time.Now().Add(flushTimeout)); err != nil { + readErrors.Inc() + ctx.err = fmt.Errorf("cannot set read deadline: %s", err) + return false + } + } + ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlock(r, ctx.reqBuf, ctx.tailBuf) + if ctx.err != nil { + if ne, ok := ctx.err.(net.Error); ok && ne.Timeout() { + // Flush the read data on timeout and try reading again. + ctx.err = nil + } else { + if ctx.err != io.EOF { + readErrors.Inc() + ctx.err = fmt.Errorf("cannot read graphite plaintext protocol data: %s", ctx.err) + } + return false + } + } + ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf)) + rowsRead.Add(len(ctx.Rows.Rows)) + + // Fill missing timestamps with the current timestamp rounded to seconds. + currentTimestamp := time.Now().Unix() + rows := ctx.Rows.Rows + for i := range rows { + r := &rows[i] + if r.Timestamp == 0 { + r.Timestamp = currentTimestamp + } + } + + // Convert timestamps from seconds to milliseconds. + for i := range rows { + rows[i].Timestamp *= 1e3 + } + + return true +} + +type streamContext struct { + Rows Rows + reqBuf []byte + tailBuf []byte + err error +} + +func (ctx *streamContext) Error() error { + if ctx.err == io.EOF { + return nil + } + return ctx.err +} + +func (ctx *streamContext) reset() { + ctx.Rows.Reset() + ctx.reqBuf = ctx.reqBuf[:0] + ctx.tailBuf = ctx.tailBuf[:0] + ctx.err = nil +} + +var ( + readCalls = metrics.NewCounter(`vm_protoparser_graphite_read_calls_total`) + readErrors = metrics.NewCounter(`vm_protoparser_graphite_read_errors_total`) + rowsRead = metrics.NewCounter(`vm_protoparser_graphite_rows_read_total`) +) + +func getStreamContext() *streamContext { + select { + case ctx := <-streamContextPoolCh: + return ctx + default: + if v := streamContextPool.Get(); v != nil { + return v.(*streamContext) + } + return &streamContext{} + } +} + +func putStreamContext(ctx *streamContext) { + ctx.reset() + select { + case streamContextPoolCh <- ctx: + default: + streamContextPool.Put(ctx) + } +} + +var streamContextPool sync.Pool +var streamContextPoolCh = make(chan *streamContext, runtime.GOMAXPROCS(-1)) diff --git a/lib/protoparser/influx/streamparser.go b/lib/protoparser/influx/streamparser.go new file mode 100644 index 000000000..eade73eac --- /dev/null +++ b/lib/protoparser/influx/streamparser.go @@ -0,0 +1,153 @@ +package influx + +import ( + "fmt" + "io" + "net/http" + "runtime" + "sync" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" + "github.com/VictoriaMetrics/metrics" +) + +// ParseStream parses req and calls callback for the parsed rows. +// +// The callback can be called multiple times for streamed data from req. +// +// callback shouldn't hold rows after returning. +func ParseStream(req *http.Request, callback func(db string, rows []Row) error) error { + readCalls.Inc() + r := req.Body + if req.Header.Get("Content-Encoding") == "gzip" { + zr, err := common.GetGzipReader(r) + if err != nil { + return fmt.Errorf("cannot read gzipped influx line protocol data: %s", err) + } + defer common.PutGzipReader(zr) + r = zr + } + + q := req.URL.Query() + tsMultiplier := int64(1e6) + switch q.Get("precision") { + case "ns": + tsMultiplier = 1e6 + case "u": + tsMultiplier = 1e3 + case "ms": + tsMultiplier = 1 + case "s": + tsMultiplier = -1e3 + case "m": + tsMultiplier = -1e3 * 60 + case "h": + tsMultiplier = -1e3 * 3600 + } + + // Read db tag from https://docs.influxdata.com/influxdb/v1.7/tools/api/#write-http-endpoint + db := q.Get("db") + + ctx := getStreamContext() + defer putStreamContext(ctx) + for ctx.Read(r, tsMultiplier) { + if err := callback(db, ctx.Rows.Rows); err != nil { + return err + } + } + return ctx.Error() +} + +func (ctx *streamContext) Read(r io.Reader, tsMultiplier int64) bool { + if ctx.err != nil { + return false + } + ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlock(r, ctx.reqBuf, ctx.tailBuf) + if ctx.err != nil { + if ctx.err != io.EOF { + readErrors.Inc() + ctx.err = fmt.Errorf("cannot read influx line protocol data: %s", ctx.err) + } + return false + } + ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf)) + rowsRead.Add(len(ctx.Rows.Rows)) + + // Adjust timestamps according to tsMultiplier + currentTs := time.Now().UnixNano() / 1e6 + if tsMultiplier >= 1 { + for i := range ctx.Rows.Rows { + row := &ctx.Rows.Rows[i] + if row.Timestamp == 0 { + row.Timestamp = currentTs + } else { + row.Timestamp /= tsMultiplier + } + } + } else if tsMultiplier < 0 { + tsMultiplier = -tsMultiplier + currentTs -= currentTs % tsMultiplier + for i := range ctx.Rows.Rows { + row := &ctx.Rows.Rows[i] + if row.Timestamp == 0 { + row.Timestamp = currentTs + } else { + row.Timestamp *= tsMultiplier + } + } + } + return true +} + +var ( + readCalls = metrics.NewCounter(`vm_protoparser_influx_read_calls_total`) + readErrors = metrics.NewCounter(`vm_protoparser_influx_read_errors_total`) + rowsRead = metrics.NewCounter(`vm_protoparser_influx_rows_read_total`) +) + +type streamContext struct { + Rows Rows + reqBuf []byte + tailBuf []byte + err error +} + +func (ctx *streamContext) Error() error { + if ctx.err == io.EOF { + return nil + } + return ctx.err +} + +func (ctx *streamContext) reset() { + ctx.Rows.Reset() + ctx.reqBuf = ctx.reqBuf[:0] + ctx.tailBuf = ctx.tailBuf[:0] + ctx.err = nil +} + +func getStreamContext() *streamContext { + select { + case ctx := <-streamContextPoolCh: + return ctx + default: + if v := streamContextPool.Get(); v != nil { + return v.(*streamContext) + } + return &streamContext{} + } +} + +func putStreamContext(ctx *streamContext) { + ctx.reset() + select { + case streamContextPoolCh <- ctx: + default: + streamContextPool.Put(ctx) + } +} + +var streamContextPool sync.Pool +var streamContextPoolCh = make(chan *streamContext, runtime.GOMAXPROCS(-1)) diff --git a/lib/protoparser/opentsdb/streamparser.go b/lib/protoparser/opentsdb/streamparser.go new file mode 100644 index 000000000..0b356147f --- /dev/null +++ b/lib/protoparser/opentsdb/streamparser.go @@ -0,0 +1,128 @@ +package opentsdb + +import ( + "fmt" + "io" + "net" + "runtime" + "sync" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" + "github.com/VictoriaMetrics/metrics" +) + +// ParseStream parses OpenTSDB lines from r and calls callback for the parsed rows. +// +// The callback can be called multiple times for streamed data from r. +// +// callback shouldn't hold rows after returning. +func ParseStream(r io.Reader, callback func(rows []Row) error) error { + ctx := getStreamContext() + defer putStreamContext(ctx) + for ctx.Read(r) { + if err := callback(ctx.Rows.Rows); err != nil { + return err + } + } + return ctx.Error() +} + +const flushTimeout = 3 * time.Second + +func (ctx *streamContext) Read(r io.Reader) bool { + readCalls.Inc() + if ctx.err != nil { + return false + } + if c, ok := r.(net.Conn); ok { + if err := c.SetReadDeadline(time.Now().Add(flushTimeout)); err != nil { + readErrors.Inc() + ctx.err = fmt.Errorf("cannot set read deadline: %s", err) + return false + } + } + ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlock(r, ctx.reqBuf, ctx.tailBuf) + if ctx.err != nil { + if ne, ok := ctx.err.(net.Error); ok && ne.Timeout() { + // Flush the read data on timeout and try reading again. + ctx.err = nil + } else { + if ctx.err != io.EOF { + readErrors.Inc() + ctx.err = fmt.Errorf("cannot read OpenTSDB put protocol data: %s", ctx.err) + } + return false + } + } + ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf)) + rowsRead.Add(len(ctx.Rows.Rows)) + + // Fill in missing timestamps + currentTimestamp := time.Now().Unix() + rows := ctx.Rows.Rows + for i := range rows { + r := &rows[i] + if r.Timestamp == 0 { + r.Timestamp = currentTimestamp + } + } + + // Convert timestamps from seconds to milliseconds + for i := range rows { + rows[i].Timestamp *= 1e3 + } + return true +} + +type streamContext struct { + Rows Rows + reqBuf []byte + tailBuf []byte + err error +} + +func (ctx *streamContext) Error() error { + if ctx.err == io.EOF { + return nil + } + return ctx.err +} + +func (ctx *streamContext) reset() { + ctx.Rows.Reset() + ctx.reqBuf = ctx.reqBuf[:0] + ctx.tailBuf = ctx.tailBuf[:0] + ctx.err = nil +} + +var ( + readCalls = metrics.NewCounter(`vm_protoparser_opentsdb_read_calls_total`) + readErrors = metrics.NewCounter(`vm_protoparser_opentsdb_read_errors_total`) + rowsRead = metrics.NewCounter(`vm_protoparser_opentsdb_rows_read_total`) +) + +func getStreamContext() *streamContext { + select { + case ctx := <-streamContextPoolCh: + return ctx + default: + if v := streamContextPool.Get(); v != nil { + return v.(*streamContext) + } + return &streamContext{} + } +} + +func putStreamContext(ctx *streamContext) { + ctx.reset() + select { + case streamContextPoolCh <- ctx: + default: + streamContextPool.Put(ctx) + } +} + +var streamContextPool sync.Pool +var streamContextPoolCh = make(chan *streamContext, runtime.GOMAXPROCS(-1)) diff --git a/lib/protoparser/opentsdbhttp/parser.go b/lib/protoparser/opentsdbhttp/parser.go index 3edabab50..9f3a1d17f 100644 --- a/lib/protoparser/opentsdbhttp/parser.go +++ b/lib/protoparser/opentsdbhttp/parser.go @@ -156,7 +156,7 @@ func unmarshalRow(dst []Row, o *fastjson.Value, tagsPool []Tag) ([]Row, []Tag) { return dst, tagsPool } -var invalidLines = metrics.NewCounter(`vm_rows_invalid_total{type="opentsdb-http"}`) +var invalidLines = metrics.NewCounter(`vm_rows_invalid_total{type="opentsdbhttp"}`) func unmarshalTags(dst []Tag, o *fastjson.Object) ([]Tag, error) { var err error diff --git a/lib/protoparser/opentsdbhttp/streamparser.go b/lib/protoparser/opentsdbhttp/streamparser.go new file mode 100644 index 000000000..61f942ca3 --- /dev/null +++ b/lib/protoparser/opentsdbhttp/streamparser.go @@ -0,0 +1,127 @@ +package opentsdbhttp + +import ( + "flag" + "fmt" + "io" + "net/http" + "runtime" + "sync" + "time" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" + "github.com/VictoriaMetrics/metrics" +) + +var maxInsertRequestSize = flag.Int("opentsdbhttp.maxInsertRequestSize", 32*1024*1024, "The maximum size of OpenTSDB HTTP put request") + +// ParseStream parses OpenTSDB http lines from req and calls callback for the parsed rows. +// +// The callback can be called multiple times for streamed data from req. +// +// callback shouldn't hold rows after returning. +func ParseStream(req *http.Request, callback func(rows []Row) error) error { + readCalls.Inc() + r := req.Body + if req.Header.Get("Content-Encoding") == "gzip" { + zr, err := common.GetGzipReader(r) + if err != nil { + readErrors.Inc() + return fmt.Errorf("cannot read gzipped http protocol data: %s", err) + } + defer common.PutGzipReader(zr) + r = zr + } + + ctx := getStreamContext() + defer putStreamContext(ctx) + + // Read the request in ctx.reqBuf + lr := io.LimitReader(r, int64(*maxInsertRequestSize)+1) + reqLen, err := ctx.reqBuf.ReadFrom(lr) + if err != nil { + readErrors.Inc() + return fmt.Errorf("cannot read HTTP OpenTSDB request: %s", err) + } + if reqLen > int64(*maxInsertRequestSize) { + readErrors.Inc() + return fmt.Errorf("too big HTTP OpenTSDB request; mustn't exceed `-opentsdbhttp.maxInsertRequestSize=%d` bytes", *maxInsertRequestSize) + } + + // Unmarshal the request to ctx.Rows + p := GetParser() + defer PutParser(p) + v, err := p.ParseBytes(ctx.reqBuf.B) + if err != nil { + unmarshalErrors.Inc() + return fmt.Errorf("cannot parse HTTP OpenTSDB json: %s", err) + } + ctx.Rows.Unmarshal(v) + rowsRead.Add(len(ctx.Rows.Rows)) + + // Fill in missing timestamps + currentTimestamp := time.Now().Unix() + rows := ctx.Rows.Rows + for i := range rows { + r := &rows[i] + if r.Timestamp == 0 { + r.Timestamp = currentTimestamp + } + } + + // Convert timestamps in seconds to milliseconds if needed. + // See http://opentsdb.net/docs/javadoc/net/opentsdb/core/Const.html#SECOND_MASK + for i := range rows { + r := &rows[i] + if r.Timestamp&secondMask == 0 { + r.Timestamp *= 1e3 + } + } + + // Insert ctx.Rows to db. + return callback(ctx.Rows.Rows) +} + +const secondMask int64 = 0x7FFFFFFF00000000 + +type streamContext struct { + Rows Rows + reqBuf bytesutil.ByteBuffer +} + +func (ctx *streamContext) reset() { + ctx.Rows.Reset() + ctx.reqBuf.Reset() +} + +var ( + readCalls = metrics.NewCounter(`vm_protoparser_opentsdbhttp_read_calls_total`) + readErrors = metrics.NewCounter(`vm_protoparser_opentsdbhttp_read_errors_total`) + rowsRead = metrics.NewCounter(`vm_protoparser_opentsdbhttp_rows_read_total`) + unmarshalErrors = metrics.NewCounter(`vm_protoparser_opentsdbhttp_unmarshal_errors_total`) +) + +func getStreamContext() *streamContext { + select { + case ctx := <-streamContextPoolCh: + return ctx + default: + if v := streamContextPool.Get(); v != nil { + return v.(*streamContext) + } + return &streamContext{} + } +} + +func putStreamContext(ctx *streamContext) { + ctx.reset() + select { + case streamContextPoolCh <- ctx: + default: + streamContextPool.Put(ctx) + } +} + +var streamContextPool sync.Pool +var streamContextPoolCh = make(chan *streamContext, runtime.GOMAXPROCS(-1)) diff --git a/lib/protoparser/prometheus/parser.go b/lib/protoparser/prometheus/parser.go index bacccffa0..f1f97b9ca 100644 --- a/lib/protoparser/prometheus/parser.go +++ b/lib/protoparser/prometheus/parser.go @@ -38,8 +38,19 @@ func (rs *Rows) Reset() { // // s must be unchanged until rs is in use. func (rs *Rows) Unmarshal(s string) { + rs.UnmarshalWithErrLogger(s, stdErrLogger) +} + +func stdErrLogger(s string) { + logger.ErrorfSkipframes(1, "%s", s) +} + +// UnmarshalWithErrLogger unmarshal Prometheus exposition text rows from s. +// +// It calls errLogger for logging parsing errors. +func (rs *Rows) UnmarshalWithErrLogger(s string, errLogger func(s string)) { noEscapes := strings.IndexByte(s, '\\') < 0 - rs.Rows, rs.tagsPool = unmarshalRows(rs.Rows[:0], s, rs.tagsPool[:0], noEscapes) + rs.Rows, rs.tagsPool = unmarshalRows(rs.Rows[:0], s, rs.tagsPool[:0], noEscapes, errLogger) } // Row is a single Prometheus row. @@ -136,20 +147,20 @@ func (r *Row) unmarshal(s string, tagsPool []Tag, noEscapes bool) ([]Tag, error) return tagsPool, nil } -func unmarshalRows(dst []Row, s string, tagsPool []Tag, noEscapes bool) ([]Row, []Tag) { +func unmarshalRows(dst []Row, s string, tagsPool []Tag, noEscapes bool, errLogger func(s string)) ([]Row, []Tag) { for len(s) > 0 { n := strings.IndexByte(s, '\n') if n < 0 { // The last line. - return unmarshalRow(dst, s, tagsPool, noEscapes) + return unmarshalRow(dst, s, tagsPool, noEscapes, errLogger) } - dst, tagsPool = unmarshalRow(dst, s[:n], tagsPool, noEscapes) + dst, tagsPool = unmarshalRow(dst, s[:n], tagsPool, noEscapes, errLogger) s = s[n+1:] } return dst, tagsPool } -func unmarshalRow(dst []Row, s string, tagsPool []Tag, noEscapes bool) ([]Row, []Tag) { +func unmarshalRow(dst []Row, s string, tagsPool []Tag, noEscapes bool, errLogger func(s string)) ([]Row, []Tag) { if len(s) > 0 && s[len(s)-1] == '\r' { s = s[:len(s)-1] } @@ -172,7 +183,8 @@ func unmarshalRow(dst []Row, s string, tagsPool []Tag, noEscapes bool) ([]Row, [ tagsPool, err = r.unmarshal(s, tagsPool, noEscapes) if err != nil { dst = dst[:len(dst)-1] - logger.Errorf("cannot unmarshal Prometheus line %q: %s", s, err) + msg := fmt.Sprintf("cannot unmarshal Prometheus line %q: %s", s, err) + errLogger(msg) invalidLines.Inc() } return dst, tagsPool diff --git a/app/vminsert/prometheus/request_handler.go b/lib/protoparser/promremotewrite/streamparser.go similarity index 59% rename from app/vminsert/prometheus/request_handler.go rename to lib/protoparser/promremotewrite/streamparser.go index d98ffe737..986528bfb 100644 --- a/app/vminsert/prometheus/request_handler.go +++ b/lib/protoparser/promremotewrite/streamparser.go @@ -1,4 +1,4 @@ -package prometheus +package promremotewrite import ( "flag" @@ -8,8 +8,6 @@ import ( "runtime" "sync" - "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/common" - "github.com/VictoriaMetrics/VictoriaMetrics/app/vminsert/concurrencylimiter" "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" "github.com/VictoriaMetrics/VictoriaMetrics/lib/prompb" "github.com/VictoriaMetrics/metrics" @@ -18,79 +16,56 @@ import ( var maxInsertRequestSize = flag.Int("maxInsertRequestSize", 32*1024*1024, "The maximum size in bytes of a single Prometheus remote_write API request") -var ( - rowsInserted = metrics.NewCounter(`vm_rows_inserted_total{type="prometheus"}`) - rowsPerInsert = metrics.NewSummary(`vm_rows_per_insert{type="prometheus"}`) -) - -// InsertHandler processes remote write for prometheus. -func InsertHandler(r *http.Request) error { - return concurrencylimiter.Do(func() error { - return insertHandlerInternal(r) - }) -} - -func insertHandlerInternal(r *http.Request) error { +// ParseStream parses Prometheus remote_write message req and calls callback for the parsed timeseries. +// +// callback shouldn't hold timeseries after returning. +func ParseStream(req *http.Request, callback func(timeseries []prompb.TimeSeries) error) error { ctx := getPushCtx() defer putPushCtx(ctx) - if err := ctx.Read(r); err != nil { + if err := ctx.Read(req); err != nil { return err } - timeseries := ctx.req.Timeseries - rowsLen := 0 - for i := range timeseries { - rowsLen += len(timeseries[i].Samples) - } - ic := &ctx.Common - ic.Reset(rowsLen) - rowsTotal := 0 - for i := range timeseries { - ts := ×eries[i] - var metricNameRaw []byte - for i := range ts.Samples { - r := &ts.Samples[i] - metricNameRaw = ic.WriteDataPointExt(metricNameRaw, ts.Labels, r.Timestamp, r.Value) - } - rowsTotal += len(ts.Samples) - } - rowsInserted.Add(rowsTotal) - rowsPerInsert.Update(float64(rowsTotal)) - return ic.FlushBufs() + return callback(ctx.wr.Timeseries) } type pushCtx struct { - Common common.InsertCtx - - req prompb.WriteRequest + wr prompb.WriteRequest reqBuf []byte } func (ctx *pushCtx) reset() { - ctx.Common.Reset(0) - ctx.req.Reset() + ctx.wr.Reset() ctx.reqBuf = ctx.reqBuf[:0] } func (ctx *pushCtx) Read(r *http.Request) error { - prometheusReadCalls.Inc() - + readCalls.Inc() var err error ctx.reqBuf, err = readSnappy(ctx.reqBuf[:0], r.Body) if err != nil { - prometheusReadErrors.Inc() + readErrors.Inc() return fmt.Errorf("cannot read prompb.WriteRequest: %s", err) } - if err = ctx.req.Unmarshal(ctx.reqBuf); err != nil { - prometheusUnmarshalErrors.Inc() + if err = ctx.wr.Unmarshal(ctx.reqBuf); err != nil { + unmarshalErrors.Inc() return fmt.Errorf("cannot unmarshal prompb.WriteRequest with size %d bytes: %s", len(ctx.reqBuf), err) } + + rows := 0 + tss := ctx.wr.Timeseries + for i := range tss { + rows += len(tss[i].Samples) + } + rowsRead.Add(rows) + return nil } var ( - prometheusReadCalls = metrics.NewCounter(`vm_read_calls_total{name="prometheus"}`) - prometheusReadErrors = metrics.NewCounter(`vm_read_errors_total{name="prometheus"}`) - prometheusUnmarshalErrors = metrics.NewCounter(`vm_unmarshal_errors_total{name="prometheus"}`) + readCalls = metrics.NewCounter(`vm_protoparser_promremotewrite_read_calls_total`) + readErrors = metrics.NewCounter(`vm_protoparser_promremotewrite_read_errors_total`) + rowsRead = metrics.NewCounter(`vm_protoparser_promremotewrite_rows_read_total`) + unmarshalErrors = metrics.NewCounter(`vm_protoparser_promremotewrite_unmarshal_errors`) ) func getPushCtx() *pushCtx { diff --git a/app/vminsert/vmimport/parser.go b/lib/protoparser/vmimport/parser.go similarity index 100% rename from app/vminsert/vmimport/parser.go rename to lib/protoparser/vmimport/parser.go diff --git a/app/vminsert/vmimport/parser_test.go b/lib/protoparser/vmimport/parser_test.go similarity index 100% rename from app/vminsert/vmimport/parser_test.go rename to lib/protoparser/vmimport/parser_test.go diff --git a/app/vminsert/vmimport/parser_timing_test.go b/lib/protoparser/vmimport/parser_timing_test.go similarity index 100% rename from app/vminsert/vmimport/parser_timing_test.go rename to lib/protoparser/vmimport/parser_timing_test.go diff --git a/lib/protoparser/vmimport/streamparser.go b/lib/protoparser/vmimport/streamparser.go new file mode 100644 index 000000000..aef23624e --- /dev/null +++ b/lib/protoparser/vmimport/streamparser.go @@ -0,0 +1,111 @@ +package vmimport + +import ( + "flag" + "fmt" + "io" + "net/http" + "runtime" + "sync" + + "github.com/VictoriaMetrics/VictoriaMetrics/lib/bytesutil" + "github.com/VictoriaMetrics/VictoriaMetrics/lib/protoparser/common" + "github.com/VictoriaMetrics/metrics" +) + +var maxLineLen = flag.Int("import.maxLineLen", 100*1024*1024, "The maximum length in bytes of a single line accepted by /api/v1/import") + +// ParseStream parses /api/v1/import lines from req and calls callback for the parsed rows. +// +// The callback can be called multiple times for streamed data from req. +// +// callback shouldn't hold rows after returning. +func ParseStream(req *http.Request, callback func(rows []Row) error) error { + readCalls.Inc() + r := req.Body + if req.Header.Get("Content-Encoding") == "gzip" { + zr, err := common.GetGzipReader(r) + if err != nil { + return fmt.Errorf("cannot read gzipped vmimport data: %s", err) + } + defer common.PutGzipReader(zr) + r = zr + } + + ctx := getStreamContext() + defer putStreamContext(ctx) + for ctx.Read(r) { + if err := callback(ctx.Rows.Rows); err != nil { + return err + } + } + return ctx.Error() +} + +func (ctx *streamContext) Read(r io.Reader) bool { + if ctx.err != nil { + return false + } + ctx.reqBuf, ctx.tailBuf, ctx.err = common.ReadLinesBlockExt(r, ctx.reqBuf, ctx.tailBuf, *maxLineLen) + if ctx.err != nil { + if ctx.err != io.EOF { + readErrors.Inc() + ctx.err = fmt.Errorf("cannot read vmimport data: %s", ctx.err) + } + return false + } + ctx.Rows.Unmarshal(bytesutil.ToUnsafeString(ctx.reqBuf)) + rowsRead.Add(len(ctx.Rows.Rows)) + return true +} + +var ( + readCalls = metrics.NewCounter(`vm_protoparser_vmimport_read_calls_total`) + readErrors = metrics.NewCounter(`vm_protoparser_vmimport_read_errors_total`) + rowsRead = metrics.NewCounter(`vm_protoparser_vmimport_rows_read_total`) +) + +type streamContext struct { + Rows Rows + reqBuf []byte + tailBuf []byte + err error +} + +func (ctx *streamContext) Error() error { + if ctx.err == io.EOF { + return nil + } + return ctx.err +} + +func (ctx *streamContext) reset() { + ctx.Rows.Reset() + ctx.reqBuf = ctx.reqBuf[:0] + ctx.tailBuf = ctx.tailBuf[:0] + ctx.err = nil +} + +func getStreamContext() *streamContext { + select { + case ctx := <-streamContextPoolCh: + return ctx + default: + if v := streamContextPool.Get(); v != nil { + return v.(*streamContext) + } + return &streamContext{} + } +} + +func putStreamContext(ctx *streamContext) { + ctx.reset() + select { + case streamContextPoolCh <- ctx: + default: + streamContextPool.Put(ctx) + } +} + +var streamContextPool sync.Pool +var streamContextPoolCh = make(chan *streamContext, runtime.GOMAXPROCS(-1)) diff --git a/app/vminsert/concurrencylimiter/concurrencylimiter.go b/lib/writeconcurrencylimiter/concurrencylimiter.go similarity index 92% rename from app/vminsert/concurrencylimiter/concurrencylimiter.go rename to lib/writeconcurrencylimiter/concurrencylimiter.go index 079bf03f4..1e2c94bfe 100644 --- a/app/vminsert/concurrencylimiter/concurrencylimiter.go +++ b/lib/writeconcurrencylimiter/concurrencylimiter.go @@ -1,4 +1,4 @@ -package concurrencylimiter +package writeconcurrencylimiter import ( "flag" @@ -54,8 +54,7 @@ func Do(f func() error) error { concurrencyLimitTimeout.Inc() return &httpserver.ErrorWithStatusCode{ Err: fmt.Errorf("cannot handle more than %d concurrent inserts during %s; possible solutions: "+ - "increase `-insert.maxQueueDuration`, increase `-maxConcurrentInserts`, "+ - "decrease `-search.maxConcurrentRequests`, increase server capacity", *maxConcurrentInserts, *maxQueueDuration), + "increase `-insert.maxQueueDuration`, increase `-maxConcurrentInserts`, increase server capacity", *maxConcurrentInserts, *maxQueueDuration), StatusCode: http.StatusServiceUnavailable, } } diff --git a/vendor/github.com/klauspost/compress/zlib/reader.go b/vendor/github.com/klauspost/compress/zlib/reader.go new file mode 100644 index 000000000..d9091e831 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/reader.go @@ -0,0 +1,183 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package zlib implements reading and writing of zlib format compressed data, +as specified in RFC 1950. + +The implementation provides filters that uncompress during reading +and compress during writing. For example, to write compressed data +to a buffer: + + var b bytes.Buffer + w := zlib.NewWriter(&b) + w.Write([]byte("hello, world\n")) + w.Close() + +and to read that data back: + + r, err := zlib.NewReader(&b) + io.Copy(os.Stdout, r) + r.Close() +*/ +package zlib + +import ( + "bufio" + "errors" + "hash" + "hash/adler32" + "io" + + "github.com/klauspost/compress/flate" +) + +const zlibDeflate = 8 + +var ( + // ErrChecksum is returned when reading ZLIB data that has an invalid checksum. + ErrChecksum = errors.New("zlib: invalid checksum") + // ErrDictionary is returned when reading ZLIB data that has an invalid dictionary. + ErrDictionary = errors.New("zlib: invalid dictionary") + // ErrHeader is returned when reading ZLIB data that has an invalid header. + ErrHeader = errors.New("zlib: invalid header") +) + +type reader struct { + r flate.Reader + decompressor io.ReadCloser + digest hash.Hash32 + err error + scratch [4]byte +} + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// NewReader creates a new ReadCloser. +// Reads from the returned ReadCloser read and decompress data from r. +// If r does not implement io.ByteReader, the decompressor may read more +// data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser when done. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) (io.ReadCloser, error) { + return NewReaderDict(r, nil) +} + +// NewReaderDict is like NewReader but uses a preset dictionary. +// NewReaderDict ignores the dictionary if the compressed data does not refer to it. +// If the compressed data refers to a different dictionary, NewReaderDict returns ErrDictionary. +// +// The ReadCloser returned by NewReaderDict also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) (io.ReadCloser, error) { + z := new(reader) + err := z.Reset(r, dict) + if err != nil { + return nil, err + } + return z, nil +} + +func (z *reader) Read(p []byte) (int, error) { + if z.err != nil { + return 0, z.err + } + + var n int + n, z.err = z.decompressor.Read(p) + z.digest.Write(p[0:n]) + if z.err != io.EOF { + // In the normal case we return here. + return n, z.err + } + + // Finished file; check checksum. + if _, err := io.ReadFull(z.r, z.scratch[0:4]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + z.err = err + return n, z.err + } + // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). + checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) + if checksum != z.digest.Sum32() { + z.err = ErrChecksum + return n, z.err + } + return n, io.EOF +} + +// Calling Close does not close the wrapped io.Reader originally passed to NewReader. +// In order for the ZLIB checksum to be verified, the reader must be +// fully consumed until the io.EOF. +func (z *reader) Close() error { + if z.err != nil && z.err != io.EOF { + return z.err + } + z.err = z.decompressor.Close() + return z.err +} + +func (z *reader) Reset(r io.Reader, dict []byte) error { + *z = reader{decompressor: z.decompressor, digest: z.digest} + if fr, ok := r.(flate.Reader); ok { + z.r = fr + } else { + z.r = bufio.NewReader(r) + } + + // Read the header (RFC 1950 section 2.2.). + _, z.err = io.ReadFull(z.r, z.scratch[0:2]) + if z.err != nil { + if z.err == io.EOF { + z.err = io.ErrUnexpectedEOF + } + return z.err + } + h := uint(z.scratch[0])<<8 | uint(z.scratch[1]) + if (z.scratch[0]&0x0f != zlibDeflate) || (h%31 != 0) { + z.err = ErrHeader + return z.err + } + haveDict := z.scratch[1]&0x20 != 0 + if haveDict { + _, z.err = io.ReadFull(z.r, z.scratch[0:4]) + if z.err != nil { + if z.err == io.EOF { + z.err = io.ErrUnexpectedEOF + } + return z.err + } + checksum := uint32(z.scratch[0])<<24 | uint32(z.scratch[1])<<16 | uint32(z.scratch[2])<<8 | uint32(z.scratch[3]) + if checksum != adler32.Checksum(dict) { + z.err = ErrDictionary + return z.err + } + } + + if z.decompressor == nil { + if haveDict { + z.decompressor = flate.NewReaderDict(z.r, dict) + } else { + z.decompressor = flate.NewReader(z.r) + } + } else { + z.decompressor.(flate.Resetter).Reset(z.r, dict) + } + + if z.digest != nil { + z.digest.Reset() + } else { + z.digest = adler32.New() + } + return nil +} diff --git a/vendor/github.com/klauspost/compress/zlib/writer.go b/vendor/github.com/klauspost/compress/zlib/writer.go new file mode 100644 index 000000000..605816ba4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zlib/writer.go @@ -0,0 +1,201 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package zlib + +import ( + "fmt" + "hash" + "hash/adler32" + "io" + + "github.com/klauspost/compress/flate" +) + +// These constants are copied from the flate package, so that code that imports +// "compress/zlib" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly +) + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + w io.Writer + level int + dict []byte + compressor *flate.Writer + digest hash.Hash32 + err error + scratch [4]byte + wroteHeader bool +} + +// NewWriter creates a new Writer. +// Writes to the returned Writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevelDict(w, DefaultCompression, nil) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, HuffmanOnly +// or any integer value between BestSpeed and BestCompression inclusive. +// The error returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + return NewWriterLevelDict(w, level, nil) +} + +// NewWriterLevelDict is like NewWriterLevel but specifies a dictionary to +// compress with. +// +// The dictionary may be nil. If not, its contents should not be modified until +// the Writer is closed. +func NewWriterLevelDict(w io.Writer, level int, dict []byte) (*Writer, error) { + if level < HuffmanOnly || level > BestCompression { + return nil, fmt.Errorf("zlib: invalid compression level: %d", level) + } + return &Writer{ + w: w, + level: level, + dict: dict, + }, nil +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriterLevel or NewWriterLevelDict, but instead writing +// to w. +func (z *Writer) Reset(w io.Writer) { + z.w = w + // z.level and z.dict left unchanged. + if z.compressor != nil { + z.compressor.Reset(w) + } + if z.digest != nil { + z.digest.Reset() + } + z.err = nil + z.scratch = [4]byte{} + z.wroteHeader = false +} + +// writeHeader writes the ZLIB header. +func (z *Writer) writeHeader() (err error) { + z.wroteHeader = true + // ZLIB has a two-byte header (as documented in RFC 1950). + // The first four bits is the CINFO (compression info), which is 7 for the default deflate window size. + // The next four bits is the CM (compression method), which is 8 for deflate. + z.scratch[0] = 0x78 + // The next two bits is the FLEVEL (compression level). The four values are: + // 0=fastest, 1=fast, 2=default, 3=best. + // The next bit, FDICT, is set if a dictionary is given. + // The final five FCHECK bits form a mod-31 checksum. + switch z.level { + case -2, 0, 1: + z.scratch[1] = 0 << 6 + case 2, 3, 4, 5: + z.scratch[1] = 1 << 6 + case 6, -1: + z.scratch[1] = 2 << 6 + case 7, 8, 9: + z.scratch[1] = 3 << 6 + default: + panic("unreachable") + } + if z.dict != nil { + z.scratch[1] |= 1 << 5 + } + z.scratch[1] += uint8(31 - (uint16(z.scratch[0])<<8+uint16(z.scratch[1]))%31) + if _, err = z.w.Write(z.scratch[0:2]); err != nil { + return err + } + if z.dict != nil { + // The next four bytes are the Adler-32 checksum of the dictionary. + checksum := adler32.Checksum(z.dict) + z.scratch[0] = uint8(checksum >> 24) + z.scratch[1] = uint8(checksum >> 16) + z.scratch[2] = uint8(checksum >> 8) + z.scratch[3] = uint8(checksum >> 0) + if _, err = z.w.Write(z.scratch[0:4]); err != nil { + return err + } + } + if z.compressor == nil { + // Initialize deflater unless the Writer is being reused + // after a Reset call. + z.compressor, err = flate.NewWriterDict(z.w, z.level, z.dict) + if err != nil { + return err + } + z.digest = adler32.New() + } + return nil +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed until the Writer is closed or +// explicitly flushed. +func (z *Writer) Write(p []byte) (n int, err error) { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return 0, z.err + } + if len(p) == 0 { + return 0, nil + } + n, err = z.compressor.Write(p) + if err != nil { + z.err = err + return + } + z.digest.Write(p) + return +} + +// Flush flushes the Writer to its underlying io.Writer. +func (z *Writer) Flush() error { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return z.err + } + z.err = z.compressor.Flush() + return z.err +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.wroteHeader { + z.err = z.writeHeader() + } + if z.err != nil { + return z.err + } + z.err = z.compressor.Close() + if z.err != nil { + return z.err + } + checksum := z.digest.Sum32() + // ZLIB (RFC 1950) is big-endian, unlike GZIP (RFC 1952). + z.scratch[0] = uint8(checksum >> 24) + z.scratch[1] = uint8(checksum >> 16) + z.scratch[2] = uint8(checksum >> 8) + z.scratch[3] = uint8(checksum >> 0) + _, z.err = z.w.Write(z.scratch[0:4]) + return z.err +} diff --git a/vendor/github.com/valyala/fasthttp/.gitignore b/vendor/github.com/valyala/fasthttp/.gitignore new file mode 100644 index 000000000..7b58ce45b --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/.gitignore @@ -0,0 +1,3 @@ +tags +*.pprof +*.fasthttp.gz diff --git a/vendor/github.com/valyala/fasthttp/.travis.yml b/vendor/github.com/valyala/fasthttp/.travis.yml new file mode 100644 index 000000000..b262c6941 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/.travis.yml @@ -0,0 +1,55 @@ +language: go + +# Docker is required for fuzzit regression tests +services: + - docker + +dist: bionic + +os: + - linux + - osx +go: + - tip + - 1.13.x + - 1.12.x + - 1.11.x + - 1.10.x + - 1.9.x + +matrix: + allow_failures: + - tip + fast_finish: true + +env: + global: + secure: "v/F0oI9zE9mcpEp4AVdHzSSHbe5ZFtH6B0i/BiUXKdQRQ10+JMPDOFRJQti7yxjMwltyd/QSFmR50Fl108sQYpo4xdlEXMHp2Y6OAN6crrp6PuHbLYgDWu3df/cH7/BqDyIq1uX8KZEeQssnygYN8hN4tpJCUg+NIb40Lm57Zsodt8DVjjyDWQQFDL7soNyAwGwQIqEyJsn+NUieXWEB1Qnt0xUtPIReuLlrwXR8wC1nLEjG9yz4ftDHHQdhVbO2b+xGWyaJ7QB5ixztaQP8Jnny6kSW9j6zEhJVuzdZ6d3xz23ibCbzSXBHdIUEI9u6ifQj8BYXr8fFS0FB3++IxgAYSs3ybZ+qEwuAxSBBm6YNW+3FrfDknVwTQscjKqnXPisjUqaRC9b31hke0tXzBq1488hE+wxMXeDM4LwWT5IMEO2gz0WGQXxmdVit72DIjCZxJkf1TvZZ0YH7Y//6wJTYYP9xulsy4gqu8CuFdWiF3fiGc3p5DTIS75nJ/Yy76Sa1pRPASKCujfLxtHE6Mt0XKvSolIXklYIzBkjN6vn80N6JIrqtqlimBGPW/Ec6+dwbmRe2AcOKRl4y7pZsGYhJhqdue1mucUYO/e2QeBZJGkqqG+zF5AW0v8x29BHvMwViAonc8o9eelkJ8khYzc/Qeq05pZnR/N/Pqfc+68k=" + +before_install: + - go get -t -v ./... + +jobs: + include: + - stage: test + script: + # build test for supported platforms + - GOOS=linux go build + - GOOS=darwin go build + - GOOS=freebsd go build + - GOOS=windows go build + - GOARCH=386 go build + + # run tests on a standard platform + - go test -v ./... + + # run tests with the race detector as well + - go test -race -v ./... + - stage: fuzzit.dev + os: + - linux + go: + - 1.13 + script: + - if [ "$TRAVIS_PULL_REQUEST" = "false" ]; then ./fuzzit.sh fuzzing; fi + - if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then ./fuzzit.sh local-regression; fi diff --git a/vendor/github.com/valyala/fasthttp/LICENSE b/vendor/github.com/valyala/fasthttp/LICENSE new file mode 100644 index 000000000..24a50469e --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/LICENSE @@ -0,0 +1,9 @@ +The MIT License (MIT) + +Copyright (c) 2015-present Aliaksandr Valialkin, VertaMedia, Kirill Danshin, Erik Dubbelboer, FastHTTP Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/valyala/fasthttp/README.md b/vendor/github.com/valyala/fasthttp/README.md new file mode 100644 index 000000000..95270a3c1 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/README.md @@ -0,0 +1,585 @@ +# fasthttp [![Build Status](https://travis-ci.org/valyala/fasthttp.svg?branch=master)](https://travis-ci.org/valyala/fasthttp?branch=master) [![GoDoc](https://godoc.org/github.com/valyala/fasthttp?status.svg)](http://godoc.org/github.com/valyala/fasthttp) [![fuzzit](https://app.fuzzit.dev/badge?org_id=fasthttp&branch=master)](https://fuzzit.dev) [![Go Report](https://goreportcard.com/badge/github.com/valyala/fasthttp)](https://goreportcard.com/report/github.com/valyala/fasthttp) [![Sourcegraph](https://sourcegraph.com/github.com/valyala/fasthttp/-/badge.svg)](https://sourcegraph.com/github.com/valyala/fasthttp?badge) + +![FastHTTP – Fastest and reliable HTTP implementation in Go](https://github.com/fasthttp/docs-assets/raw/master/banner@0.5.png) + +Fast HTTP implementation for Go. + +Currently fasthttp is successfully used by [VertaMedia](https://vertamedia.com/) +in a production serving up to 200K rps from more than 1.5M concurrent keep-alive +connections per physical server. + +[TechEmpower Benchmark round 18 results](https://www.techempower.com/benchmarks/#section=data-r18&hw=ph&test=plaintext) + +[Server Benchmarks](#http-server-performance-comparison-with-nethttp) + +[Client Benchmarks](#http-client-comparison-with-nethttp) + +[Install](#install) + +[Documentation](https://godoc.org/github.com/valyala/fasthttp) + +[Examples from docs](https://godoc.org/github.com/valyala/fasthttp#pkg-examples) + +[Code examples](examples) + +[Awesome fasthttp tools](https://github.com/fasthttp) + +[Switching from net/http to fasthttp](#switching-from-nethttp-to-fasthttp) + +[Fasthttp best practices](#fasthttp-best-practices) + +[Tricks with byte buffers](#tricks-with-byte-buffers) + +[Related projects](#related-projects) + +[FAQ](#faq) + +# HTTP server performance comparison with [net/http](https://golang.org/pkg/net/http/) + +In short, fasthttp server is up to 10 times faster than net/http. +Below are benchmark results. + +*GOMAXPROCS=1* + +net/http server: +``` +$ GOMAXPROCS=1 go test -bench=NetHTTPServerGet -benchmem -benchtime=10s +BenchmarkNetHTTPServerGet1ReqPerConn 1000000 12052 ns/op 2297 B/op 29 allocs/op +BenchmarkNetHTTPServerGet2ReqPerConn 1000000 12278 ns/op 2327 B/op 24 allocs/op +BenchmarkNetHTTPServerGet10ReqPerConn 2000000 8903 ns/op 2112 B/op 19 allocs/op +BenchmarkNetHTTPServerGet10KReqPerConn 2000000 8451 ns/op 2058 B/op 18 allocs/op +BenchmarkNetHTTPServerGet1ReqPerConn10KClients 500000 26733 ns/op 3229 B/op 29 allocs/op +BenchmarkNetHTTPServerGet2ReqPerConn10KClients 1000000 23351 ns/op 3211 B/op 24 allocs/op +BenchmarkNetHTTPServerGet10ReqPerConn10KClients 1000000 13390 ns/op 2483 B/op 19 allocs/op +BenchmarkNetHTTPServerGet100ReqPerConn10KClients 1000000 13484 ns/op 2171 B/op 18 allocs/op +``` + +fasthttp server: +``` +$ GOMAXPROCS=1 go test -bench=kServerGet -benchmem -benchtime=10s +BenchmarkServerGet1ReqPerConn 10000000 1559 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet2ReqPerConn 10000000 1248 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10ReqPerConn 20000000 797 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10KReqPerConn 20000000 716 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet1ReqPerConn10KClients 10000000 1974 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet2ReqPerConn10KClients 10000000 1352 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10ReqPerConn10KClients 20000000 789 ns/op 2 B/op 0 allocs/op +BenchmarkServerGet100ReqPerConn10KClients 20000000 604 ns/op 0 B/op 0 allocs/op +``` + +*GOMAXPROCS=4* + +net/http server: +``` +$ GOMAXPROCS=4 go test -bench=NetHTTPServerGet -benchmem -benchtime=10s +BenchmarkNetHTTPServerGet1ReqPerConn-4 3000000 4529 ns/op 2389 B/op 29 allocs/op +BenchmarkNetHTTPServerGet2ReqPerConn-4 5000000 3896 ns/op 2418 B/op 24 allocs/op +BenchmarkNetHTTPServerGet10ReqPerConn-4 5000000 3145 ns/op 2160 B/op 19 allocs/op +BenchmarkNetHTTPServerGet10KReqPerConn-4 5000000 3054 ns/op 2065 B/op 18 allocs/op +BenchmarkNetHTTPServerGet1ReqPerConn10KClients-4 1000000 10321 ns/op 3710 B/op 30 allocs/op +BenchmarkNetHTTPServerGet2ReqPerConn10KClients-4 2000000 7556 ns/op 3296 B/op 24 allocs/op +BenchmarkNetHTTPServerGet10ReqPerConn10KClients-4 5000000 3905 ns/op 2349 B/op 19 allocs/op +BenchmarkNetHTTPServerGet100ReqPerConn10KClients-4 5000000 3435 ns/op 2130 B/op 18 allocs/op +``` + +fasthttp server: +``` +$ GOMAXPROCS=4 go test -bench=kServerGet -benchmem -benchtime=10s +BenchmarkServerGet1ReqPerConn-4 10000000 1141 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet2ReqPerConn-4 20000000 707 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10ReqPerConn-4 30000000 341 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10KReqPerConn-4 50000000 310 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet1ReqPerConn10KClients-4 10000000 1119 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet2ReqPerConn10KClients-4 20000000 644 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet10ReqPerConn10KClients-4 30000000 346 ns/op 0 B/op 0 allocs/op +BenchmarkServerGet100ReqPerConn10KClients-4 50000000 282 ns/op 0 B/op 0 allocs/op +``` + +# HTTP client comparison with net/http + +In short, fasthttp client is up to 10 times faster than net/http. +Below are benchmark results. + +*GOMAXPROCS=1* + +net/http client: +``` +$ GOMAXPROCS=1 go test -bench='HTTPClient(Do|GetEndToEnd)' -benchmem -benchtime=10s +BenchmarkNetHTTPClientDoFastServer 1000000 12567 ns/op 2616 B/op 35 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1TCP 200000 67030 ns/op 5028 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd10TCP 300000 51098 ns/op 5031 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd100TCP 300000 45096 ns/op 5026 B/op 55 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1Inmemory 500000 24779 ns/op 5035 B/op 57 allocs/op +BenchmarkNetHTTPClientGetEndToEnd10Inmemory 1000000 26425 ns/op 5035 B/op 57 allocs/op +BenchmarkNetHTTPClientGetEndToEnd100Inmemory 500000 28515 ns/op 5045 B/op 57 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1000Inmemory 500000 39511 ns/op 5096 B/op 56 allocs/op +``` + +fasthttp client: +``` +$ GOMAXPROCS=1 go test -bench='kClient(Do|GetEndToEnd)' -benchmem -benchtime=10s +BenchmarkClientDoFastServer 20000000 865 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1TCP 1000000 18711 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd10TCP 1000000 14664 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd100TCP 1000000 14043 ns/op 1 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1Inmemory 5000000 3965 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd10Inmemory 3000000 4060 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd100Inmemory 5000000 3396 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1000Inmemory 5000000 3306 ns/op 2 B/op 0 allocs/op +``` + +*GOMAXPROCS=4* + +net/http client: +``` +$ GOMAXPROCS=4 go test -bench='HTTPClient(Do|GetEndToEnd)' -benchmem -benchtime=10s +BenchmarkNetHTTPClientDoFastServer-4 2000000 8774 ns/op 2619 B/op 35 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1TCP-4 500000 22951 ns/op 5047 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd10TCP-4 1000000 19182 ns/op 5037 B/op 55 allocs/op +BenchmarkNetHTTPClientGetEndToEnd100TCP-4 1000000 16535 ns/op 5031 B/op 55 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1Inmemory-4 1000000 14495 ns/op 5038 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd10Inmemory-4 1000000 10237 ns/op 5034 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd100Inmemory-4 1000000 10125 ns/op 5045 B/op 56 allocs/op +BenchmarkNetHTTPClientGetEndToEnd1000Inmemory-4 1000000 11132 ns/op 5136 B/op 56 allocs/op +``` + +fasthttp client: +``` +$ GOMAXPROCS=4 go test -bench='kClient(Do|GetEndToEnd)' -benchmem -benchtime=10s +BenchmarkClientDoFastServer-4 50000000 397 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1TCP-4 2000000 7388 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd10TCP-4 2000000 6689 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd100TCP-4 3000000 4927 ns/op 1 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1Inmemory-4 10000000 1604 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd10Inmemory-4 10000000 1458 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd100Inmemory-4 10000000 1329 ns/op 0 B/op 0 allocs/op +BenchmarkClientGetEndToEnd1000Inmemory-4 10000000 1316 ns/op 5 B/op 0 allocs/op +``` + + +# Install + +``` +go get -u github.com/valyala/fasthttp +``` + + +# Switching from net/http to fasthttp + +Unfortunately, fasthttp doesn't provide API identical to net/http. +See the [FAQ](#faq) for details. +There is [net/http -> fasthttp handler converter](https://godoc.org/github.com/valyala/fasthttp/fasthttpadaptor), +but it is better to write fasthttp request handlers by hand in order to use +all of the fasthttp advantages (especially high performance :) ). + +Important points: + +* Fasthttp works with [RequestHandler functions](https://godoc.org/github.com/valyala/fasthttp#RequestHandler) +instead of objects implementing [Handler interface](https://golang.org/pkg/net/http/#Handler). +Fortunately, it is easy to pass bound struct methods to fasthttp: + + ```go + type MyHandler struct { + foobar string + } + + // request handler in net/http style, i.e. method bound to MyHandler struct. + func (h *MyHandler) HandleFastHTTP(ctx *fasthttp.RequestCtx) { + // notice that we may access MyHandler properties here - see h.foobar. + fmt.Fprintf(ctx, "Hello, world! Requested path is %q. Foobar is %q", + ctx.Path(), h.foobar) + } + + // request handler in fasthttp style, i.e. just plain function. + func fastHTTPHandler(ctx *fasthttp.RequestCtx) { + fmt.Fprintf(ctx, "Hi there! RequestURI is %q", ctx.RequestURI()) + } + + // pass bound struct method to fasthttp + myHandler := &MyHandler{ + foobar: "foobar", + } + fasthttp.ListenAndServe(":8080", myHandler.HandleFastHTTP) + + // pass plain function to fasthttp + fasthttp.ListenAndServe(":8081", fastHTTPHandler) + ``` + +* The [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler) +accepts only one argument - [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx). +It contains all the functionality required for http request processing +and response writing. Below is an example of a simple request handler conversion +from net/http to fasthttp. + + ```go + // net/http request handler + requestHandler := func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case "/foo": + fooHandler(w, r) + case "/bar": + barHandler(w, r) + default: + http.Error(w, "Unsupported path", http.StatusNotFound) + } + } + ``` + + ```go + // the corresponding fasthttp request handler + requestHandler := func(ctx *fasthttp.RequestCtx) { + switch string(ctx.Path()) { + case "/foo": + fooHandler(ctx) + case "/bar": + barHandler(ctx) + default: + ctx.Error("Unsupported path", fasthttp.StatusNotFound) + } + } + ``` + +* Fasthttp allows setting response headers and writing response body +in an arbitrary order. There is no 'headers first, then body' restriction +like in net/http. The following code is valid for fasthttp: + + ```go + requestHandler := func(ctx *fasthttp.RequestCtx) { + // set some headers and status code first + ctx.SetContentType("foo/bar") + ctx.SetStatusCode(fasthttp.StatusOK) + + // then write the first part of body + fmt.Fprintf(ctx, "this is the first part of body\n") + + // then set more headers + ctx.Response.Header.Set("Foo-Bar", "baz") + + // then write more body + fmt.Fprintf(ctx, "this is the second part of body\n") + + // then override already written body + ctx.SetBody([]byte("this is completely new body contents")) + + // then update status code + ctx.SetStatusCode(fasthttp.StatusNotFound) + + // basically, anything may be updated many times before + // returning from RequestHandler. + // + // Unlike net/http fasthttp doesn't put response to the wire until + // returning from RequestHandler. + } + ``` + +* Fasthttp doesn't provide [ServeMux](https://golang.org/pkg/net/http/#ServeMux), +but there are more powerful third-party routers and web frameworks +with fasthttp support: + + * [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing) + * [fasthttprouter](https://github.com/buaazp/fasthttprouter) + * [lu](https://github.com/vincentLiuxiang/lu) + * [atreugo](https://github.com/savsgio/atreugo) + + Net/http code with simple ServeMux is trivially converted to fasthttp code: + + ```go + // net/http code + + m := &http.ServeMux{} + m.HandleFunc("/foo", fooHandlerFunc) + m.HandleFunc("/bar", barHandlerFunc) + m.Handle("/baz", bazHandler) + + http.ListenAndServe(":80", m) + ``` + + ```go + // the corresponding fasthttp code + m := func(ctx *fasthttp.RequestCtx) { + switch string(ctx.Path()) { + case "/foo": + fooHandlerFunc(ctx) + case "/bar": + barHandlerFunc(ctx) + case "/baz": + bazHandler.HandlerFunc(ctx) + default: + ctx.Error("not found", fasthttp.StatusNotFound) + } + } + + fasthttp.ListenAndServe(":80", m) + ``` + +* net/http -> fasthttp conversion table: + + * All the pseudocode below assumes w, r and ctx have these types: + ```go + var ( + w http.ResponseWriter + r *http.Request + ctx *fasthttp.RequestCtx + ) + ``` + * r.Body -> [ctx.PostBody()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostBody) + * r.URL.Path -> [ctx.Path()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Path) + * r.URL -> [ctx.URI()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.URI) + * r.Method -> [ctx.Method()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Method) + * r.Header -> [ctx.Request.Header](https://godoc.org/github.com/valyala/fasthttp#RequestHeader) + * r.Header.Get() -> [ctx.Request.Header.Peek()](https://godoc.org/github.com/valyala/fasthttp#RequestHeader.Peek) + * r.Host -> [ctx.Host()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Host) + * r.Form -> [ctx.QueryArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.QueryArgs) + + [ctx.PostArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostArgs) + * r.PostForm -> [ctx.PostArgs()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostArgs) + * r.FormValue() -> [ctx.FormValue()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.FormValue) + * r.FormFile() -> [ctx.FormFile()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.FormFile) + * r.MultipartForm -> [ctx.MultipartForm()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.MultipartForm) + * r.RemoteAddr -> [ctx.RemoteAddr()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.RemoteAddr) + * r.RequestURI -> [ctx.RequestURI()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.RequestURI) + * r.TLS -> [ctx.IsTLS()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.IsTLS) + * r.Cookie() -> [ctx.Request.Header.Cookie()](https://godoc.org/github.com/valyala/fasthttp#RequestHeader.Cookie) + * r.Referer() -> [ctx.Referer()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Referer) + * r.UserAgent() -> [ctx.UserAgent()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.UserAgent) + * w.Header() -> [ctx.Response.Header](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader) + * w.Header().Set() -> [ctx.Response.Header.Set()](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader.Set) + * w.Header().Set("Content-Type") -> [ctx.SetContentType()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetContentType) + * w.Header().Set("Set-Cookie") -> [ctx.Response.Header.SetCookie()](https://godoc.org/github.com/valyala/fasthttp#ResponseHeader.SetCookie) + * w.Write() -> [ctx.Write()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Write), + [ctx.SetBody()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBody), + [ctx.SetBodyStream()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBodyStream), + [ctx.SetBodyStreamWriter()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetBodyStreamWriter) + * w.WriteHeader() -> [ctx.SetStatusCode()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.SetStatusCode) + * w.(http.Hijacker).Hijack() -> [ctx.Hijack()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack) + * http.Error() -> [ctx.Error()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Error) + * http.FileServer() -> [fasthttp.FSHandler()](https://godoc.org/github.com/valyala/fasthttp#FSHandler), + [fasthttp.FS](https://godoc.org/github.com/valyala/fasthttp#FS) + * http.ServeFile() -> [fasthttp.ServeFile()](https://godoc.org/github.com/valyala/fasthttp#ServeFile) + * http.Redirect() -> [ctx.Redirect()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Redirect) + * http.NotFound() -> [ctx.NotFound()](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.NotFound) + * http.StripPrefix() -> [fasthttp.PathRewriteFunc](https://godoc.org/github.com/valyala/fasthttp#PathRewriteFunc) + +* *VERY IMPORTANT!* Fasthttp disallows holding references +to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx) or to its' +members after returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler). +Otherwise [data races](http://blog.golang.org/race-detector) are inevitable. +Carefully inspect all the net/http request handlers converted to fasthttp whether +they retain references to RequestCtx or to its' members after returning. +RequestCtx provides the following _band aids_ for this case: + + * Wrap RequestHandler into [TimeoutHandler](https://godoc.org/github.com/valyala/fasthttp#TimeoutHandler). + * Call [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError) + before returning from RequestHandler if there are references to RequestCtx or to its' members. + See [the example](https://godoc.org/github.com/valyala/fasthttp#example-RequestCtx-TimeoutError) + for more details. + +Use this brilliant tool - [race detector](http://blog.golang.org/race-detector) - +for detecting and eliminating data races in your program. If you detected +data race related to fasthttp in your program, then there is high probability +you forgot calling [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError) +before returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler). + +* Blind switching from net/http to fasthttp won't give you performance boost. +While fasthttp is optimized for speed, its' performance may be easily saturated +by slow [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler). +So [profile](http://blog.golang.org/profiling-go-programs) and optimize your +code after switching to fasthttp. For instance, use [quicktemplate](https://github.com/valyala/quicktemplate) +instead of [html/template](https://golang.org/pkg/html/template/). + +* See also [fasthttputil](https://godoc.org/github.com/valyala/fasthttp/fasthttputil), +[fasthttpadaptor](https://godoc.org/github.com/valyala/fasthttp/fasthttpadaptor) and +[expvarhandler](https://godoc.org/github.com/valyala/fasthttp/expvarhandler). + + +# Performance optimization tips for multi-core systems + +* Use [reuseport](https://godoc.org/github.com/valyala/fasthttp/reuseport) listener. +* Run a separate server instance per CPU core with GOMAXPROCS=1. +* Pin each server instance to a separate CPU core using [taskset](http://linux.die.net/man/1/taskset). +* Ensure the interrupts of multiqueue network card are evenly distributed between CPU cores. + See [this article](https://blog.cloudflare.com/how-to-achieve-low-latency/) for details. +* Use Go 1.13 as it provides some considerable performance improvements. + + +# Fasthttp best practices + +* Do not allocate objects and `[]byte` buffers - just reuse them as much + as possible. Fasthttp API design encourages this. +* [sync.Pool](https://golang.org/pkg/sync/#Pool) is your best friend. +* [Profile your program](http://blog.golang.org/profiling-go-programs) + in production. + `go tool pprof --alloc_objects your-program mem.pprof` usually gives better + insights for optimization opportunities than `go tool pprof your-program cpu.pprof`. +* Write [tests and benchmarks](https://golang.org/pkg/testing/) for hot paths. +* Avoid conversion between `[]byte` and `string`, since this may result in memory + allocation+copy. Fasthttp API provides functions for both `[]byte` and `string` - + use these functions instead of converting manually between `[]byte` and `string`. + There are some exceptions - see [this wiki page](https://github.com/golang/go/wiki/CompilerOptimizations#string-and-byte) + for more details. +* Verify your tests and production code under + [race detector](https://golang.org/doc/articles/race_detector.html) on a regular basis. +* Prefer [quicktemplate](https://github.com/valyala/quicktemplate) instead of + [html/template](https://golang.org/pkg/html/template/) in your webserver. + + +# Tricks with `[]byte` buffers + +The following tricks are used by fasthttp. Use them in your code too. + +* Standard Go functions accept nil buffers +```go +var ( + // both buffers are uninitialized + dst []byte + src []byte +) +dst = append(dst, src...) // is legal if dst is nil and/or src is nil +copy(dst, src) // is legal if dst is nil and/or src is nil +(string(src) == "") // is true if src is nil +(len(src) == 0) // is true if src is nil +src = src[:0] // works like a charm with nil src + +// this for loop doesn't panic if src is nil +for i, ch := range src { + doSomething(i, ch) +} +``` + +So throw away nil checks for `[]byte` buffers from you code. For example, +```go +srcLen := 0 +if src != nil { + srcLen = len(src) +} +``` + +becomes + +```go +srcLen := len(src) +``` + +* String may be appended to `[]byte` buffer with `append` +```go +dst = append(dst, "foobar"...) +``` + +* `[]byte` buffer may be extended to its' capacity. +```go +buf := make([]byte, 100) +a := buf[:10] // len(a) == 10, cap(a) == 100. +b := a[:100] // is valid, since cap(a) == 100. +``` + +* All fasthttp functions accept nil `[]byte` buffer +```go +statusCode, body, err := fasthttp.Get(nil, "http://google.com/") +uintBuf := fasthttp.AppendUint(nil, 1234) +``` + +# Related projects + + * [fasthttp](https://github.com/fasthttp) - various useful + helpers for projects based on fasthttp. + * [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing) - fast and + powerful routing package for fasthttp servers. + * [fasthttprouter](https://github.com/buaazp/fasthttprouter) - a high + performance fasthttp request router that scales well. + * [gramework](https://github.com/gramework/gramework) - a web framework made by one of fasthttp maintainers + * [lu](https://github.com/vincentLiuxiang/lu) - a high performance + go middleware web framework which is based on fasthttp. + * [websocket](https://github.com/fasthttp/websocket) - Gorilla-based + websocket implementation for fasthttp. + * [fasthttpsession](https://github.com/phachon/fasthttpsession) - a fast and powerful session package for fasthttp servers. + * [atreugo](https://github.com/savsgio/atreugo) - High performance and extensible micro web framework with zero memory allocations in hot paths. + * [kratgo](https://github.com/savsgio/kratgo) - Simple, lightweight and ultra-fast HTTP Cache to speed up your websites. + * [kit-plugins](https://github.com/wencan/kit-plugins/tree/master/transport/fasthttp) - go-kit transport implementation for fasthttp. + + +# FAQ + +* *Why creating yet another http package instead of optimizing net/http?* + + Because net/http API limits many optimization opportunities. + For example: + * net/http Request object lifetime isn't limited by request handler execution + time. So the server must create a new request object per each request instead + of reusing existing objects like fasthttp does. + * net/http headers are stored in a `map[string][]string`. So the server + must parse all the headers, convert them from `[]byte` to `string` and put + them into the map before calling user-provided request handler. + This all requires unnecessary memory allocations avoided by fasthttp. + * net/http client API requires creating a new response object per each request. + +* *Why fasthttp API is incompatible with net/http?* + + Because net/http API limits many optimization opportunities. See the answer + above for more details. Also certain net/http API parts are suboptimal + for use: + * Compare [net/http connection hijacking](https://golang.org/pkg/net/http/#Hijacker) + to [fasthttp connection hijacking](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack). + * Compare [net/http Request.Body reading](https://golang.org/pkg/net/http/#Request) + to [fasthttp request body reading](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.PostBody). + +* *Why fasthttp doesn't support HTTP/2.0 and WebSockets?* + + [HTTP/2.0 support](https://github.com/fasthttp/http2) is in progress. [WebSockets](https://github.com/fasthttp/websockets) has been done already. + Third parties also may use [RequestCtx.Hijack](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.Hijack) + for implementing these goodies. + +* *Are there known net/http advantages comparing to fasthttp?* + + Yes: + * net/http supports [HTTP/2.0 starting from go1.6](https://http2.golang.org/). + * net/http API is stable, while fasthttp API constantly evolves. + * net/http handles more HTTP corner cases. + * net/http should contain less bugs, since it is used and tested by much + wider audience. + * net/http works on Go older than 1.5. + +* *Why fasthttp API prefers returning `[]byte` instead of `string`?* + + Because `[]byte` to `string` conversion isn't free - it requires memory + allocation and copy. Feel free wrapping returned `[]byte` result into + `string()` if you prefer working with strings instead of byte slices. + But be aware that this has non-zero overhead. + +* *Which GO versions are supported by fasthttp?* + + Go1.5+. Older versions won't be supported, since their standard package + [miss useful functions](https://github.com/valyala/fasthttp/issues/5). + + **NOTE**: Go 1.9.7 is the oldest tested version. We recommend you to update as soon as you can. As of 1.11.3 we will drop 1.9.x support. + +* *Please provide real benchmark data and server information* + + See [this issue](https://github.com/valyala/fasthttp/issues/4). + +* *Are there plans to add request routing to fasthttp?* + + There are no plans to add request routing into fasthttp. + Use third-party routers and web frameworks with fasthttp support: + + * [fasthttp-routing](https://github.com/qiangxue/fasthttp-routing) + * [fasthttprouter](https://github.com/buaazp/fasthttprouter) + * [gramework](https://github.com/gramework/gramework) + * [lu](https://github.com/vincentLiuxiang/lu) + * [atreugo](https://github.com/savsgio/atreugo) + + See also [this issue](https://github.com/valyala/fasthttp/issues/9) for more info. + +* *I detected data race in fasthttp!* + + Cool! [File a bug](https://github.com/valyala/fasthttp/issues/new). But before + doing this check the following in your code: + + * Make sure there are no references to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx) + or to its' members after returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler). + * Make sure you call [TimeoutError](https://godoc.org/github.com/valyala/fasthttp#RequestCtx.TimeoutError) + before returning from [RequestHandler](https://godoc.org/github.com/valyala/fasthttp#RequestHandler) + if there are references to [RequestCtx](https://godoc.org/github.com/valyala/fasthttp#RequestCtx) + or to its' members, which may be accessed by other goroutines. + +* *I didn't find an answer for my question here* + + Try exploring [these questions](https://github.com/valyala/fasthttp/issues?q=label%3Aquestion). diff --git a/vendor/github.com/valyala/fasthttp/SECURITY.md b/vendor/github.com/valyala/fasthttp/SECURITY.md new file mode 100644 index 000000000..2beab1714 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/SECURITY.md @@ -0,0 +1,115 @@ +### TL;DR + +We use a simplified version of [Golang Security Policy](https://golang.org/security). +For example, for now we skip CVE assignment. + +### Reporting a Security Bug + +Please report to us any issues you find. This document explains how to do that and what to expect in return. + +All security bugs in our releases should be reported by email to oss-security@highload.solutions. +This mail is delivered to a small security team. +Your email will be acknowledged within 24 hours, and you'll receive a more detailed response +to your email within 72 hours indicating the next steps in handling your report. +For critical problems, you can encrypt your report using our PGP key (listed below). + +Please use a descriptive subject line for your report email. +After the initial reply to your report, the security team will +endeavor to keep you informed of the progress being made towards a fix and full announcement. +These updates will be sent at least every five days. +In reality, this is more likely to be every 24-48 hours. + +If you have not received a reply to your email within 48 hours or you have not heard from the security +team for the past five days please contact us by email to developers@highload.solutions or by Telegram message +to [our support](https://t.me/highload_support). +Please note that developers@highload.solutions list includes all developers, who may be outside our opensource security team. +When escalating on this list, please do not disclose the details of the issue. +Simply state that you're trying to reach a member of the security team. + +### Flagging Existing Issues as Security-related + +If you believe that an existing issue is security-related, we ask that you send an email to oss-security@highload.solutions. +The email should include the issue ID and a short description of why it should be handled according to this security policy. + +### Disclosure Process + +Our project uses the following disclosure process: + +- Once the security report is received it is assigned a primary handler. This person coordinates the fix and release process. +- The issue is confirmed and a list of affected software is determined. +- Code is audited to find any potential similar problems. +- Fixes are prepared for the two most recent major releases and the head/master revision. These fixes are not yet committed to the public repository. +- To notify users, a new issue without security details is submitted to our GitHub repository. +- Three working days following this notification, the fixes are applied to the public repository and a new release is issued. +- On the date that the fixes are applied, announcement is published in the issue. + +This process can take some time, especially when coordination is required with maintainers of other projects. +Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow +the process described above to ensure that disclosures are handled consistently. + +### Receiving Security Updates +The best way to receive security announcements is to subscribe ("Watch") to our repository. +Any GitHub issues pertaining to a security issue will be prefixed with [security]. + +### Comments on This Policy +If you have any suggestions to improve this policy, please send an email to oss-security@highload.solutions for discussion. + +### PGP Key for oss-security@highload.solutions + +We accept PGP-encrypted email, but the majority of the security team are not regular PGP users +so it's somewhat inconvenient. Please only use PGP for critical security reports. + +``` +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQINBFzdjYUBEACa3YN+QVSlnXofUjxr+YrmIaF+da0IUq+TRM4aqUXALsemEdGh +yIl7Z6qOOy1d2kPe6t//H9l/92lJ1X7i6aEBK4n/pnPZkwbpy9gGpebgvTZFvcbe +mFhF6k1FM35D8TxneJSjizPyGhJPqcr5qccqf8R64TlQx5Ud1JqT2l8P1C5N7gNS +lEYXq1h4zBCvTWk1wdeLRRPx7Bn6xrgmyu/k61dLoJDvpvWNATVFDA67oTrPgzTW +xtLbbk/xm0mK4a8zMzIpNyz1WkaJW9+4HFXaL+yKlsx7iHe2O7VlGoqS0kdeQup4 +1HIw/P7yc0jBlNMLUzpuA6ElYUwESWsnCI71YY1x4rKgI+GqH1mWwgn7tteuXQtb +Zj0vEdjK3IKIOSbzbzAvSbDt8F1+o7EMtdy1eUysjKSQgFkDlT6JRmYvEup5/IoG +iknh/InQq9RmGFKii6pXWWoltC0ebfCwYOXvymyDdr/hYDqJeHS9Tenpy86Doaaf +HGf5nIFAMB2G5ctNpBwzNXR2MAWkeHQgdr5a1xmog0hS125usjnUTet3QeCyo4kd +gVouoOroMcqFFUXdYaMH4c3KWz0afhTmIaAsFFOv/eMdadVA4QyExTJf3TAoQ+kH +lKDlbOAIxEZWRPDFxMRixaVPQC+VxhBcaQ+yNoaUkM0V2m8u8sDBpzi1OQARAQAB +tDxPU1MgU2VjdXJpdHksIEhpZ2hsb2FkIExURCA8b3NzLXNlY3VyaXR5QGhpZ2hs +b2FkLnNvbHV0aW9ucz6JAlQEEwEIAD4WIQRljYp380uKq2g8TeqsQcvu+Qp2TAUC +XN2NhQIbAwUJB4YfgAULCQgHAgYVCgkICwIEFgIDAQIeAQIXgAAKCRCsQcvu+Qp2 +TKmED/96YoQoOjD28blFFrigvAsiNcNNZoX9I0dX1lNpD83fBJf+/9i+x4jqUnI5 +5XK/DFTDbhpw8kQBpxS9eEuIYnuo0RdLLp1ctNWTlpwfyHn92mGddl/uBdYHUuUk +cjhIQcFaCcWRY+EpamDlv1wmZ83IwBr8Hu5FS+/Msyw1TBvtTRVKW1KoGYMYoXLk +BzIglRPwn821B6s4BvK/RJnZkrmHMBZBfYMf+iSMSYd2yPmfT8wbcAjgjLfQa28U +gbt4u9xslgKjuM83IqwFfEXBnm7su3OouGWqc+62mQTsbnK65zRFnx6GXRXC1BAi +6m9Tm1PU0IiINz66ainquspkXYeHjd9hTwfR3BdFnzBTRRM01cKMFabWbLj8j0p8 +fF4g9cxEdiLrzEF7Yz4WY0mI4Cpw4eJZfsHMc07Jn7QxfJhIoq+rqBOtEmTjnxMh +aWeykoXMHlZN4K0ZrAytozVH1D4bugWA9Zuzi9U3F9hrVVABm11yyhd2iSqI6/FR +GcCFOCBW1kEJbzoEguub+BV8LDi8ldljHalvur5k/VFhoDBxniYNsKmiCLVCmDWs +/nF84hCReAOJt0vDGwqHe3E2BFFPbKwdJLRNkjxBY0c/pvaV+JxbWQmaxDZNeIFV +hFcVGp48HNY3qLWZdsQIfT9m1masJFLVuq8Wx7bYs8Et5eFnH7kCDQRc3Y2FARAA +2DJWAxABydyIdCxgFNdqnYyWS46vh2DmLmRMqgasNlD0ozG4S9bszBsgnUI2Xs06 +J76kFRh8MMHcu9I4lUKCQzfrA4uHkiOK5wvNCaWP+C6JUYNHsqPwk/ILO3gtQ/Ws +LLf/PW3rJZVOZB+WY8iaYc20l5vukTaVw4qbEi9dtLkJvVpNHt//+jayXU6s3ew1 +2X5xdwyAZxaxlnzFaY/Xo/qR+bZhVFC0T9pAECnHv9TVhFGp0JE9ipPGnro5xTIS +LttdAkzv4AuSVTIgWgTkh8nN8t7STJqfPEv0I12nmmYHMUyTYOurkfskF3jY2x6x +8l02NQ4d5KdC3ReV1j51swrGcZCwsWNp51jnEXKwo+B0NM5OmoRrNJgF2iDgLehs +hP00ljU7cB8/1/7kdHZStYaUHICFOFqHzg415FlYm+jpY0nJp/b9BAO0d0/WYnEe +Xjihw8EVBAqzEt4kay1BQonZAypeYnGBJr7vNvdiP+mnRwly5qZSGiInxGvtZZFt +zL1E3osiF+muQxFcM63BeGdJeYXy+MoczkWa4WNggfcHlGAZkMYiv28zpr4PfrK9 +mvj4Nu8s71PE9pPpBoZcNDf9v1sHuu96jDSITsPx5YMvvKZWhzJXFKzk6YgAsNH/ +MF0G+/qmKJZpCdvtHKpYM1uHX85H81CwWJFfBPthyD8AEQEAAYkCPAQYAQgAJhYh +BGWNinfzS4qraDxN6qxBy+75CnZMBQJc3Y2FAhsMBQkHhh+AAAoJEKxBy+75CnZM +Rn8P/RyL1bhU4Q4WpvmlkepCAwNA0G3QvnKcSZNHEPE5h7H3IyrA/qy16A9eOsgm +sthsHYlo5A5lRIy4wPHkFCClMrMHdKuoS72//qgw+oOrBcwb7Te+Nas+ewhaJ7N9 +vAX06vDH9bLl52CPbtats5+eBpePgP3HDPxd7CWHxq9bzJTbzqsTkN7JvoovR2dP +itPJDij7QYLYVEM1t7QxUVpVwAjDi/kCtC9ts5L+V0snF2n3bHZvu04EXdpvxOQI +pG/7Q+/WoI8NU6Bb/FA3tJGYIhSwI3SY+5XV/TAZttZaYSh2SD8vhc+eo+gW9sAN +xa+VESBQCht9+tKIwEwHs1efoRgFdbwwJ2c+33+XydQ6yjdXoX1mn2uyCr82jorZ +xTzbkY04zr7oZ+0fLpouOFg/mrSL4w2bWEhdHuyoVthLBjnRme0wXCaS3g3mYdLG +nSUkogOGOOvvvBtoq/vfx0Eu79piUtw5D8yQSrxLDuz8GxCrVRZ0tYIHb26aTE9G +cDsW/Lg5PjcY/LgVNEWOxDQDFVurlImnlVJFb3q+NrWvPbgeIEWwJDCay/z25SEH +k3bSOXLp8YGRnlkWUmoeL4g/CCK52iAAlfscZNoKMILhBnbCoD657jpa5GQKJj/U +Q8kjgr7kwV/RSosNV9HCPj30mVyiCQ1xg+ZLzMKXVCuBWd+G +=lnt2 +-----END PGP PUBLIC KEY BLOCK----- +``` diff --git a/vendor/github.com/valyala/fasthttp/TODO b/vendor/github.com/valyala/fasthttp/TODO new file mode 100644 index 000000000..ce7505f1c --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/TODO @@ -0,0 +1,4 @@ +- SessionClient with referer and cookies support. +- ProxyHandler similar to FSHandler. +- WebSockets. See https://tools.ietf.org/html/rfc6455 . +- HTTP/2.0. See https://tools.ietf.org/html/rfc7540 . diff --git a/vendor/github.com/valyala/fasthttp/args.go b/vendor/github.com/valyala/fasthttp/args.go new file mode 100644 index 000000000..b3ad7e087 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/args.go @@ -0,0 +1,588 @@ +package fasthttp + +import ( + "bytes" + "errors" + "io" + "sort" + "sync" + + "github.com/valyala/bytebufferpool" +) + +const ( + argsNoValue = true + argsHasValue = false +) + +// AcquireArgs returns an empty Args object from the pool. +// +// The returned Args may be returned to the pool with ReleaseArgs +// when no longer needed. This allows reducing GC load. +func AcquireArgs() *Args { + return argsPool.Get().(*Args) +} + +// ReleaseArgs returns the object acquired via AcquireArgs to the pool. +// +// Do not access the released Args object, otherwise data races may occur. +func ReleaseArgs(a *Args) { + a.Reset() + argsPool.Put(a) +} + +var argsPool = &sync.Pool{ + New: func() interface{} { + return &Args{} + }, +} + +// Args represents query arguments. +// +// It is forbidden copying Args instances. Create new instances instead +// and use CopyTo(). +// +// Args instance MUST NOT be used from concurrently running goroutines. +type Args struct { + noCopy noCopy //nolint:unused,structcheck + + args []argsKV + buf []byte +} + +type argsKV struct { + key []byte + value []byte + noValue bool +} + +// Reset clears query args. +func (a *Args) Reset() { + a.args = a.args[:0] +} + +// CopyTo copies all args to dst. +func (a *Args) CopyTo(dst *Args) { + dst.Reset() + dst.args = copyArgs(dst.args, a.args) +} + +// VisitAll calls f for each existing arg. +// +// f must not retain references to key and value after returning. +// Make key and/or value copies if you need storing them after returning. +func (a *Args) VisitAll(f func(key, value []byte)) { + visitArgs(a.args, f) +} + +// Len returns the number of query args. +func (a *Args) Len() int { + return len(a.args) +} + +// Parse parses the given string containing query args. +func (a *Args) Parse(s string) { + a.buf = append(a.buf[:0], s...) + a.ParseBytes(a.buf) +} + +// ParseBytes parses the given b containing query args. +func (a *Args) ParseBytes(b []byte) { + a.Reset() + + var s argsScanner + s.b = b + + var kv *argsKV + a.args, kv = allocArg(a.args) + for s.next(kv) { + if len(kv.key) > 0 || len(kv.value) > 0 { + a.args, kv = allocArg(a.args) + } + } + a.args = releaseArg(a.args) +} + +// String returns string representation of query args. +func (a *Args) String() string { + return string(a.QueryString()) +} + +// QueryString returns query string for the args. +// +// The returned value is valid until the next call to Args methods. +func (a *Args) QueryString() []byte { + a.buf = a.AppendBytes(a.buf[:0]) + return a.buf +} + +// Sort sorts Args by key and then value using 'f' as comparison function. +// +// For example args.Sort(bytes.Compare) +func (a *Args) Sort(f func(x, y []byte) int) { + sort.SliceStable(a.args, func(i, j int) bool { + n := f(a.args[i].key, a.args[j].key) + if n == 0 { + return f(a.args[i].value, a.args[j].value) == -1 + } + return n == -1 + }) +} + +// AppendBytes appends query string to dst and returns the extended dst. +func (a *Args) AppendBytes(dst []byte) []byte { + for i, n := 0, len(a.args); i < n; i++ { + kv := &a.args[i] + dst = AppendQuotedArg(dst, kv.key) + if !kv.noValue { + dst = append(dst, '=') + if len(kv.value) > 0 { + dst = AppendQuotedArg(dst, kv.value) + } + } + if i+1 < n { + dst = append(dst, '&') + } + } + return dst +} + +// WriteTo writes query string to w. +// +// WriteTo implements io.WriterTo interface. +func (a *Args) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(a.QueryString()) + return int64(n), err +} + +// Del deletes argument with the given key from query args. +func (a *Args) Del(key string) { + a.args = delAllArgs(a.args, key) +} + +// DelBytes deletes argument with the given key from query args. +func (a *Args) DelBytes(key []byte) { + a.args = delAllArgs(a.args, b2s(key)) +} + +// Add adds 'key=value' argument. +// +// Multiple values for the same key may be added. +func (a *Args) Add(key, value string) { + a.args = appendArg(a.args, key, value, argsHasValue) +} + +// AddBytesK adds 'key=value' argument. +// +// Multiple values for the same key may be added. +func (a *Args) AddBytesK(key []byte, value string) { + a.args = appendArg(a.args, b2s(key), value, argsHasValue) +} + +// AddBytesV adds 'key=value' argument. +// +// Multiple values for the same key may be added. +func (a *Args) AddBytesV(key string, value []byte) { + a.args = appendArg(a.args, key, b2s(value), argsHasValue) +} + +// AddBytesKV adds 'key=value' argument. +// +// Multiple values for the same key may be added. +func (a *Args) AddBytesKV(key, value []byte) { + a.args = appendArg(a.args, b2s(key), b2s(value), argsHasValue) +} + +// AddNoValue adds only 'key' as argument without the '='. +// +// Multiple values for the same key may be added. +func (a *Args) AddNoValue(key string) { + a.args = appendArg(a.args, key, "", argsNoValue) +} + +// AddBytesKNoValue adds only 'key' as argument without the '='. +// +// Multiple values for the same key may be added. +func (a *Args) AddBytesKNoValue(key []byte) { + a.args = appendArg(a.args, b2s(key), "", argsNoValue) +} + +// Set sets 'key=value' argument. +func (a *Args) Set(key, value string) { + a.args = setArg(a.args, key, value, argsHasValue) +} + +// SetBytesK sets 'key=value' argument. +func (a *Args) SetBytesK(key []byte, value string) { + a.args = setArg(a.args, b2s(key), value, argsHasValue) +} + +// SetBytesV sets 'key=value' argument. +func (a *Args) SetBytesV(key string, value []byte) { + a.args = setArg(a.args, key, b2s(value), argsHasValue) +} + +// SetBytesKV sets 'key=value' argument. +func (a *Args) SetBytesKV(key, value []byte) { + a.args = setArgBytes(a.args, key, value, argsHasValue) +} + +// SetNoValue sets only 'key' as argument without the '='. +// +// Only key in argumemt, like key1&key2 +func (a *Args) SetNoValue(key string) { + a.args = setArg(a.args, key, "", argsNoValue) +} + +// SetBytesKNoValue sets 'key' argument. +func (a *Args) SetBytesKNoValue(key []byte) { + a.args = setArg(a.args, b2s(key), "", argsNoValue) +} + +// Peek returns query arg value for the given key. +// +// Returned value is valid until the next Args call. +func (a *Args) Peek(key string) []byte { + return peekArgStr(a.args, key) +} + +// PeekBytes returns query arg value for the given key. +// +// Returned value is valid until the next Args call. +func (a *Args) PeekBytes(key []byte) []byte { + return peekArgBytes(a.args, key) +} + +// PeekMulti returns all the arg values for the given key. +func (a *Args) PeekMulti(key string) [][]byte { + var values [][]byte + a.VisitAll(func(k, v []byte) { + if string(k) == key { + values = append(values, v) + } + }) + return values +} + +// PeekMultiBytes returns all the arg values for the given key. +func (a *Args) PeekMultiBytes(key []byte) [][]byte { + return a.PeekMulti(b2s(key)) +} + +// Has returns true if the given key exists in Args. +func (a *Args) Has(key string) bool { + return hasArg(a.args, key) +} + +// HasBytes returns true if the given key exists in Args. +func (a *Args) HasBytes(key []byte) bool { + return hasArg(a.args, b2s(key)) +} + +// ErrNoArgValue is returned when Args value with the given key is missing. +var ErrNoArgValue = errors.New("no Args value for the given key") + +// GetUint returns uint value for the given key. +func (a *Args) GetUint(key string) (int, error) { + value := a.Peek(key) + if len(value) == 0 { + return -1, ErrNoArgValue + } + return ParseUint(value) +} + +// SetUint sets uint value for the given key. +func (a *Args) SetUint(key string, value int) { + bb := bytebufferpool.Get() + bb.B = AppendUint(bb.B[:0], value) + a.SetBytesV(key, bb.B) + bytebufferpool.Put(bb) +} + +// SetUintBytes sets uint value for the given key. +func (a *Args) SetUintBytes(key []byte, value int) { + a.SetUint(b2s(key), value) +} + +// GetUintOrZero returns uint value for the given key. +// +// Zero (0) is returned on error. +func (a *Args) GetUintOrZero(key string) int { + n, err := a.GetUint(key) + if err != nil { + n = 0 + } + return n +} + +// GetUfloat returns ufloat value for the given key. +func (a *Args) GetUfloat(key string) (float64, error) { + value := a.Peek(key) + if len(value) == 0 { + return -1, ErrNoArgValue + } + return ParseUfloat(value) +} + +// GetUfloatOrZero returns ufloat value for the given key. +// +// Zero (0) is returned on error. +func (a *Args) GetUfloatOrZero(key string) float64 { + f, err := a.GetUfloat(key) + if err != nil { + f = 0 + } + return f +} + +// GetBool returns boolean value for the given key. +// +// true is returned for "1", "t", "T", "true", "TRUE", "True", "y", "yes", "Y", "YES", "Yes", +// otherwise false is returned. +func (a *Args) GetBool(key string) bool { + switch b2s(a.Peek(key)) { + // Support the same true cases as strconv.ParseBool + // See: https://github.com/golang/go/blob/4e1b11e2c9bdb0ddea1141eed487be1a626ff5be/src/strconv/atob.go#L12 + // and Y and Yes versions. + case "1", "t", "T", "true", "TRUE", "True", "y", "yes", "Y", "YES", "Yes": + return true + default: + return false + } +} + +func visitArgs(args []argsKV, f func(k, v []byte)) { + for i, n := 0, len(args); i < n; i++ { + kv := &args[i] + f(kv.key, kv.value) + } +} + +func copyArgs(dst, src []argsKV) []argsKV { + if cap(dst) < len(src) { + tmp := make([]argsKV, len(src)) + copy(tmp, dst) + dst = tmp + } + n := len(src) + dst = dst[:n] + for i := 0; i < n; i++ { + dstKV := &dst[i] + srcKV := &src[i] + dstKV.key = append(dstKV.key[:0], srcKV.key...) + if srcKV.noValue { + dstKV.value = dstKV.value[:0] + } else { + dstKV.value = append(dstKV.value[:0], srcKV.value...) + } + dstKV.noValue = srcKV.noValue + } + return dst +} + +func delAllArgsBytes(args []argsKV, key []byte) []argsKV { + return delAllArgs(args, b2s(key)) +} + +func delAllArgs(args []argsKV, key string) []argsKV { + for i, n := 0, len(args); i < n; i++ { + kv := &args[i] + if key == string(kv.key) { + tmp := *kv + copy(args[i:], args[i+1:]) + n-- + args[n] = tmp + args = args[:n] + } + } + return args +} + +func setArgBytes(h []argsKV, key, value []byte, noValue bool) []argsKV { + return setArg(h, b2s(key), b2s(value), noValue) +} + +func setArg(h []argsKV, key, value string, noValue bool) []argsKV { + n := len(h) + for i := 0; i < n; i++ { + kv := &h[i] + if key == string(kv.key) { + if noValue { + kv.value = kv.value[:0] + } else { + kv.value = append(kv.value[:0], value...) + } + kv.noValue = noValue + return h + } + } + return appendArg(h, key, value, noValue) +} + +func appendArgBytes(h []argsKV, key, value []byte, noValue bool) []argsKV { + return appendArg(h, b2s(key), b2s(value), noValue) +} + +func appendArg(args []argsKV, key, value string, noValue bool) []argsKV { + var kv *argsKV + args, kv = allocArg(args) + kv.key = append(kv.key[:0], key...) + if noValue { + kv.value = kv.value[:0] + } else { + kv.value = append(kv.value[:0], value...) + } + kv.noValue = noValue + return args +} + +func allocArg(h []argsKV) ([]argsKV, *argsKV) { + n := len(h) + if cap(h) > n { + h = h[:n+1] + } else { + h = append(h, argsKV{}) + } + return h, &h[n] +} + +func releaseArg(h []argsKV) []argsKV { + return h[:len(h)-1] +} + +func hasArg(h []argsKV, key string) bool { + for i, n := 0, len(h); i < n; i++ { + kv := &h[i] + if key == string(kv.key) { + return true + } + } + return false +} + +func peekArgBytes(h []argsKV, k []byte) []byte { + for i, n := 0, len(h); i < n; i++ { + kv := &h[i] + if bytes.Equal(kv.key, k) { + return kv.value + } + } + return nil +} + +func peekArgStr(h []argsKV, k string) []byte { + for i, n := 0, len(h); i < n; i++ { + kv := &h[i] + if string(kv.key) == k { + return kv.value + } + } + return nil +} + +type argsScanner struct { + b []byte +} + +func (s *argsScanner) next(kv *argsKV) bool { + if len(s.b) == 0 { + return false + } + kv.noValue = argsHasValue + + isKey := true + k := 0 + for i, c := range s.b { + switch c { + case '=': + if isKey { + isKey = false + kv.key = decodeArgAppend(kv.key[:0], s.b[:i]) + k = i + 1 + } + case '&': + if isKey { + kv.key = decodeArgAppend(kv.key[:0], s.b[:i]) + kv.value = kv.value[:0] + kv.noValue = argsNoValue + } else { + kv.value = decodeArgAppend(kv.value[:0], s.b[k:i]) + } + s.b = s.b[i+1:] + return true + } + } + + if isKey { + kv.key = decodeArgAppend(kv.key[:0], s.b) + kv.value = kv.value[:0] + kv.noValue = argsNoValue + } else { + kv.value = decodeArgAppend(kv.value[:0], s.b[k:]) + } + s.b = s.b[len(s.b):] + return true +} + +func decodeArgAppend(dst, src []byte) []byte { + if bytes.IndexByte(src, '%') < 0 && bytes.IndexByte(src, '+') < 0 { + // fast path: src doesn't contain encoded chars + return append(dst, src...) + } + + // slow path + for i := 0; i < len(src); i++ { + c := src[i] + if c == '%' { + if i+2 >= len(src) { + return append(dst, src[i:]...) + } + x2 := hex2intTable[src[i+2]] + x1 := hex2intTable[src[i+1]] + if x1 == 16 || x2 == 16 { + dst = append(dst, '%') + } else { + dst = append(dst, x1<<4|x2) + i += 2 + } + } else if c == '+' { + dst = append(dst, ' ') + } else { + dst = append(dst, c) + } + } + return dst +} + +// decodeArgAppendNoPlus is almost identical to decodeArgAppend, but it doesn't +// substitute '+' with ' '. +// +// The function is copy-pasted from decodeArgAppend due to the performance +// reasons only. +func decodeArgAppendNoPlus(dst, src []byte) []byte { + if bytes.IndexByte(src, '%') < 0 { + // fast path: src doesn't contain encoded chars + return append(dst, src...) + } + + // slow path + for i := 0; i < len(src); i++ { + c := src[i] + if c == '%' { + if i+2 >= len(src) { + return append(dst, src[i:]...) + } + x2 := hex2intTable[src[i+2]] + x1 := hex2intTable[src[i+1]] + if x1 == 16 || x2 == 16 { + dst = append(dst, '%') + } else { + dst = append(dst, x1<<4|x2) + i += 2 + } + } else { + dst = append(dst, c) + } + } + return dst +} diff --git a/vendor/github.com/valyala/fasthttp/bytesconv.go b/vendor/github.com/valyala/fasthttp/bytesconv.go new file mode 100644 index 000000000..e8fbabbb4 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/bytesconv.go @@ -0,0 +1,385 @@ +//go:generate go run bytesconv_table_gen.go + +package fasthttp + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "math" + "net" + "reflect" + "strings" + "sync" + "time" + "unsafe" +) + +// AppendHTMLEscape appends html-escaped s to dst and returns the extended dst. +func AppendHTMLEscape(dst []byte, s string) []byte { + if strings.IndexByte(s, '<') < 0 && + strings.IndexByte(s, '>') < 0 && + strings.IndexByte(s, '"') < 0 && + strings.IndexByte(s, '\'') < 0 { + + // fast path - nothing to escape + return append(dst, s...) + } + + // slow path + var prev int + var sub string + for i, n := 0, len(s); i < n; i++ { + sub = "" + switch s[i] { + case '<': + sub = "<" + case '>': + sub = ">" + case '"': + sub = """ + case '\'': + sub = "'" + } + if len(sub) > 0 { + dst = append(dst, s[prev:i]...) + dst = append(dst, sub...) + prev = i + 1 + } + } + return append(dst, s[prev:]...) +} + +// AppendHTMLEscapeBytes appends html-escaped s to dst and returns +// the extended dst. +func AppendHTMLEscapeBytes(dst, s []byte) []byte { + return AppendHTMLEscape(dst, b2s(s)) +} + +// AppendIPv4 appends string representation of the given ip v4 to dst +// and returns the extended dst. +func AppendIPv4(dst []byte, ip net.IP) []byte { + ip = ip.To4() + if ip == nil { + return append(dst, "non-v4 ip passed to AppendIPv4"...) + } + + dst = AppendUint(dst, int(ip[0])) + for i := 1; i < 4; i++ { + dst = append(dst, '.') + dst = AppendUint(dst, int(ip[i])) + } + return dst +} + +var errEmptyIPStr = errors.New("empty ip address string") + +// ParseIPv4 parses ip address from ipStr into dst and returns the extended dst. +func ParseIPv4(dst net.IP, ipStr []byte) (net.IP, error) { + if len(ipStr) == 0 { + return dst, errEmptyIPStr + } + if len(dst) < net.IPv4len { + dst = make([]byte, net.IPv4len) + } + copy(dst, net.IPv4zero) + dst = dst.To4() + if dst == nil { + panic("BUG: dst must not be nil") + } + + b := ipStr + for i := 0; i < 3; i++ { + n := bytes.IndexByte(b, '.') + if n < 0 { + return dst, fmt.Errorf("cannot find dot in ipStr %q", ipStr) + } + v, err := ParseUint(b[:n]) + if err != nil { + return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err) + } + if v > 255 { + return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v) + } + dst[i] = byte(v) + b = b[n+1:] + } + v, err := ParseUint(b) + if err != nil { + return dst, fmt.Errorf("cannot parse ipStr %q: %s", ipStr, err) + } + if v > 255 { + return dst, fmt.Errorf("cannot parse ipStr %q: ip part cannot exceed 255: parsed %d", ipStr, v) + } + dst[3] = byte(v) + + return dst, nil +} + +// AppendHTTPDate appends HTTP-compliant (RFC1123) representation of date +// to dst and returns the extended dst. +func AppendHTTPDate(dst []byte, date time.Time) []byte { + dst = date.In(time.UTC).AppendFormat(dst, time.RFC1123) + copy(dst[len(dst)-3:], strGMT) + return dst +} + +// ParseHTTPDate parses HTTP-compliant (RFC1123) date. +func ParseHTTPDate(date []byte) (time.Time, error) { + return time.Parse(time.RFC1123, b2s(date)) +} + +// AppendUint appends n to dst and returns the extended dst. +func AppendUint(dst []byte, n int) []byte { + if n < 0 { + panic("BUG: int must be positive") + } + + var b [20]byte + buf := b[:] + i := len(buf) + var q int + for n >= 10 { + i-- + q = n / 10 + buf[i] = '0' + byte(n-q*10) + n = q + } + i-- + buf[i] = '0' + byte(n) + + dst = append(dst, buf[i:]...) + return dst +} + +// ParseUint parses uint from buf. +func ParseUint(buf []byte) (int, error) { + v, n, err := parseUintBuf(buf) + if n != len(buf) { + return -1, errUnexpectedTrailingChar + } + return v, err +} + +var ( + errEmptyInt = errors.New("empty integer") + errUnexpectedFirstChar = errors.New("unexpected first char found. Expecting 0-9") + errUnexpectedTrailingChar = errors.New("unexpected trailing char found. Expecting 0-9") + errTooLongInt = errors.New("too long int") +) + +func parseUintBuf(b []byte) (int, int, error) { + n := len(b) + if n == 0 { + return -1, 0, errEmptyInt + } + v := 0 + for i := 0; i < n; i++ { + c := b[i] + k := c - '0' + if k > 9 { + if i == 0 { + return -1, i, errUnexpectedFirstChar + } + return v, i, nil + } + // Test for overflow. + if v*10 < v { + return -1, i, errTooLongInt + } + v = 10*v + int(k) + } + return v, n, nil +} + +var ( + errEmptyFloat = errors.New("empty float number") + errDuplicateFloatPoint = errors.New("duplicate point found in float number") + errUnexpectedFloatEnd = errors.New("unexpected end of float number") + errInvalidFloatExponent = errors.New("invalid float number exponent") + errUnexpectedFloatChar = errors.New("unexpected char found in float number") +) + +// ParseUfloat parses unsigned float from buf. +func ParseUfloat(buf []byte) (float64, error) { + if len(buf) == 0 { + return -1, errEmptyFloat + } + b := buf + var v uint64 + var offset = 1.0 + var pointFound bool + for i, c := range b { + if c < '0' || c > '9' { + if c == '.' { + if pointFound { + return -1, errDuplicateFloatPoint + } + pointFound = true + continue + } + if c == 'e' || c == 'E' { + if i+1 >= len(b) { + return -1, errUnexpectedFloatEnd + } + b = b[i+1:] + minus := -1 + switch b[0] { + case '+': + b = b[1:] + minus = 1 + case '-': + b = b[1:] + default: + minus = 1 + } + vv, err := ParseUint(b) + if err != nil { + return -1, errInvalidFloatExponent + } + return float64(v) * offset * math.Pow10(minus*int(vv)), nil + } + return -1, errUnexpectedFloatChar + } + v = 10*v + uint64(c-'0') + if pointFound { + offset /= 10 + } + } + return float64(v) * offset, nil +} + +var ( + errEmptyHexNum = errors.New("empty hex number") + errTooLargeHexNum = errors.New("too large hex number") +) + +func readHexInt(r *bufio.Reader) (int, error) { + n := 0 + i := 0 + var k int + for { + c, err := r.ReadByte() + if err != nil { + if err == io.EOF && i > 0 { + return n, nil + } + return -1, err + } + k = int(hex2intTable[c]) + if k == 16 { + if i == 0 { + return -1, errEmptyHexNum + } + if err := r.UnreadByte(); err != nil { + return -1, err + } + return n, nil + } + if i >= maxHexIntChars { + return -1, errTooLargeHexNum + } + n = (n << 4) | k + i++ + } +} + +var hexIntBufPool sync.Pool + +func writeHexInt(w *bufio.Writer, n int) error { + if n < 0 { + panic("BUG: int must be positive") + } + + v := hexIntBufPool.Get() + if v == nil { + v = make([]byte, maxHexIntChars+1) + } + buf := v.([]byte) + i := len(buf) - 1 + for { + buf[i] = lowerhex[n&0xf] + n >>= 4 + if n == 0 { + break + } + i-- + } + _, err := w.Write(buf[i:]) + hexIntBufPool.Put(v) + return err +} + +const ( + upperhex = "0123456789ABCDEF" + lowerhex = "0123456789abcdef" +) + +func lowercaseBytes(b []byte) { + for i := 0; i < len(b); i++ { + p := &b[i] + *p = toLowerTable[*p] + } +} + +// b2s converts byte slice to a string without memory allocation. +// See https://groups.google.com/forum/#!msg/Golang-Nuts/ENgbUzYvCuU/90yGx7GUAgAJ . +// +// Note it may break if string and/or slice header will change +// in the future go versions. +func b2s(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} + +// s2b converts string to a byte slice without memory allocation. +// +// Note it may break if string and/or slice header will change +// in the future go versions. +func s2b(s string) (b []byte) { + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + sh := *(*reflect.StringHeader)(unsafe.Pointer(&s)) + bh.Data = sh.Data + bh.Len = sh.Len + bh.Cap = sh.Len + return b +} + +// AppendUnquotedArg appends url-decoded src to dst and returns appended dst. +// +// dst may point to src. In this case src will be overwritten. +func AppendUnquotedArg(dst, src []byte) []byte { + return decodeArgAppend(dst, src) +} + +// AppendQuotedArg appends url-encoded src to dst and returns appended dst. +func AppendQuotedArg(dst, src []byte) []byte { + for _, c := range src { + switch { + case c == ' ': + dst = append(dst, '+') + case quotedArgShouldEscapeTable[int(c)] != 0: + dst = append(dst, '%', upperhex[c>>4], upperhex[c&0xf]) + default: + dst = append(dst, c) + } + } + return dst +} + +func appendQuotedPath(dst, src []byte) []byte { + // Fix issue in https://github.com/golang/go/issues/11202 + if len(src) == 1 && src[0] == '*' { + return append(dst, '*') + } + + for _, c := range src { + if quotedPathShouldEscapeTable[int(c)] != 0 { + dst = append(dst, '%', upperhex[c>>4], upperhex[c&15]) + } else { + dst = append(dst, c) + } + } + return dst +} diff --git a/vendor/github.com/valyala/fasthttp/bytesconv_32.go b/vendor/github.com/valyala/fasthttp/bytesconv_32.go new file mode 100644 index 000000000..7fd6f5f12 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/bytesconv_32.go @@ -0,0 +1,7 @@ +// +build !amd64,!arm64,!ppc64 + +package fasthttp + +const ( + maxHexIntChars = 7 +) diff --git a/vendor/github.com/valyala/fasthttp/bytesconv_64.go b/vendor/github.com/valyala/fasthttp/bytesconv_64.go new file mode 100644 index 000000000..edf7309c2 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/bytesconv_64.go @@ -0,0 +1,7 @@ +// +build amd64 arm64 ppc64 + +package fasthttp + +const ( + maxHexIntChars = 15 +) diff --git a/vendor/github.com/valyala/fasthttp/bytesconv_table.go b/vendor/github.com/valyala/fasthttp/bytesconv_table.go new file mode 100644 index 000000000..78a12a30b --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/bytesconv_table.go @@ -0,0 +1,10 @@ +package fasthttp + +// Code generated by go run bytesconv_table_gen.go; DO NOT EDIT. +// See bytesconv_table_gen.go for more information about these tables. + +const hex2intTable = "\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x00\x01\x02\x03\x04\x05\x06\a\b\t\x10\x10\x10\x10\x10\x10\x10\n\v\f\r\x0e\x0f\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\n\v\f\r\x0e\x0f\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10\x10" +const toLowerTable = "\x00\x01\x02\x03\x04\x05\x06\a\b\t\n\v\f\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@abcdefghijklmnopqrstuvwxyz[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\u007f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" +const toUpperTable = "\x00\x01\x02\x03\x04\x05\x06\a\b\t\n\v\f\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`ABCDEFGHIJKLMNOPQRSTUVWXYZ{|}~\u007f\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff" +const quotedArgShouldEscapeTable = "\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01" +const quotedPathShouldEscapeTable = "\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\x01\x00\x01\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x01\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01" diff --git a/vendor/github.com/valyala/fasthttp/client.go b/vendor/github.com/valyala/fasthttp/client.go new file mode 100644 index 000000000..961d6c051 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/client.go @@ -0,0 +1,2370 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Do performs the given http request and fills the given http response. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func Do(req *Request, resp *Response) error { + return defaultClient.Do(req, resp) +} + +// DoTimeout performs the given request and waits for response during +// the given timeout duration. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned during +// the given timeout. +// +// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +// +// Warning: DoTimeout does not terminate the request itself. The request will +// continue in the background and the response will be discarded. +// If requests take too long and the connection pool gets filled up please +// try using a Client and setting a ReadTimeout. +func DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + return defaultClient.DoTimeout(req, resp, timeout) +} + +// DoDeadline performs the given request and waits for response until +// the given deadline. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned until +// the given deadline. +// +// ErrNoFreeConns is returned if all DefaultMaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return defaultClient.DoDeadline(req, resp, deadline) +} + +// Get returns the status code and body of url. +// +// The contents of dst will be replaced by the body and returned, if the dst +// is too small a new slice will be allocated. +// +// The function follows redirects. Use Do* for manually handling redirects. +func Get(dst []byte, url string) (statusCode int, body []byte, err error) { + return defaultClient.Get(dst, url) +} + +// GetTimeout returns the status code and body of url. +// +// The contents of dst will be replaced by the body and returned, if the dst +// is too small a new slice will be allocated. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// during the given timeout. +func GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) { + return defaultClient.GetTimeout(dst, url, timeout) +} + +// GetDeadline returns the status code and body of url. +// +// The contents of dst will be replaced by the body and returned, if the dst +// is too small a new slice will be allocated. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// until the given deadline. +func GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) { + return defaultClient.GetDeadline(dst, url, deadline) +} + +// Post sends POST request to the given url with the given POST arguments. +// +// The contents of dst will be replaced by the body and returned, if the dst +// is too small a new slice will be allocated. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// Empty POST body is sent if postArgs is nil. +func Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) { + return defaultClient.Post(dst, url, postArgs) +} + +var defaultClient Client + +// Client implements http client. +// +// Copying Client by value is prohibited. Create new instance instead. +// +// It is safe calling Client methods from concurrently running goroutines. +type Client struct { + noCopy noCopy //nolint:unused,structcheck + + // Client name. Used in User-Agent request header. + // + // Default client name is used if not set. + Name string + + // NoDefaultUserAgentHeader when set to true, causes the default + // User-Agent header to be excluded from the Request. + NoDefaultUserAgentHeader bool + + // Callback for establishing new connections to hosts. + // + // Default Dial is used if not set. + Dial DialFunc + + // Attempt to connect to both ipv4 and ipv6 addresses if set to true. + // + // This option is used only if default TCP dialer is used, + // i.e. if Dial is blank. + // + // By default client connects only to ipv4 addresses, + // since unfortunately ipv6 remains broken in many networks worldwide :) + DialDualStack bool + + // TLS config for https connections. + // + // Default TLS config is used if not set. + TLSConfig *tls.Config + + // Maximum number of connections per each host which may be established. + // + // DefaultMaxConnsPerHost is used if not set. + MaxConnsPerHost int + + // Idle keep-alive connections are closed after this duration. + // + // By default idle connections are closed + // after DefaultMaxIdleConnDuration. + MaxIdleConnDuration time.Duration + + // Keep-alive connections are closed after this duration. + // + // By default connection duration is unlimited. + MaxConnDuration time.Duration + + // Maximum number of attempts for idempotent calls + // + // DefaultMaxIdemponentCallAttempts is used if not set. + MaxIdemponentCallAttempts int + + // Per-connection buffer size for responses' reading. + // This also limits the maximum header size. + // + // Default buffer size is used if 0. + ReadBufferSize int + + // Per-connection buffer size for requests' writing. + // + // Default buffer size is used if 0. + WriteBufferSize int + + // Maximum duration for full response reading (including body). + // + // By default response read timeout is unlimited. + ReadTimeout time.Duration + + // Maximum duration for full request writing (including body). + // + // By default request write timeout is unlimited. + WriteTimeout time.Duration + + // Maximum response body size. + // + // The client returns ErrBodyTooLarge if this limit is greater than 0 + // and response body is greater than the limit. + // + // By default response body size is unlimited. + MaxResponseBodySize int + + // Header names are passed as-is without normalization + // if this option is set. + // + // Disabled header names' normalization may be useful only for proxying + // responses to other clients expecting case-sensitive + // header names. See https://github.com/valyala/fasthttp/issues/57 + // for details. + // + // By default request and response header names are normalized, i.e. + // The first letter and the first letters following dashes + // are uppercased, while all the other letters are lowercased. + // Examples: + // + // * HOST -> Host + // * content-type -> Content-Type + // * cONTENT-lenGTH -> Content-Length + DisableHeaderNamesNormalizing bool + + // Path values are sent as-is without normalization + // + // Disabled path normalization may be useful for proxying incoming requests + // to servers that are expecting paths to be forwarded as-is. + // + // By default path values are normalized, i.e. + // extra slashes are removed, special characters are encoded. + DisablePathNormalizing bool + + mLock sync.Mutex + m map[string]*HostClient + ms map[string]*HostClient +} + +// Get returns the status code and body of url. +// +// The contents of dst will be replaced by the body and returned, if the dst +// is too small a new slice will be allocated. +// +// The function follows redirects. Use Do* for manually handling redirects. +func (c *Client) Get(dst []byte, url string) (statusCode int, body []byte, err error) { + return clientGetURL(dst, url, c) +} + +// GetTimeout returns the status code and body of url. +// +// The contents of dst will be replaced by the body and returned, if the dst +// is too small a new slice will be allocated. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// during the given timeout. +func (c *Client) GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) { + return clientGetURLTimeout(dst, url, timeout, c) +} + +// GetDeadline returns the status code and body of url. +// +// The contents of dst will be replaced by the body and returned, if the dst +// is too small a new slice will be allocated. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// until the given deadline. +func (c *Client) GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) { + return clientGetURLDeadline(dst, url, deadline, c) +} + +// Post sends POST request to the given url with the given POST arguments. +// +// The contents of dst will be replaced by the body and returned, if the dst +// is too small a new slice will be allocated. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// Empty POST body is sent if postArgs is nil. +func (c *Client) Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) { + return clientPostURL(dst, url, postArgs, c) +} + +// DoTimeout performs the given request and waits for response during +// the given timeout duration. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned during +// the given timeout. +// +// ErrNoFreeConns is returned if all Client.MaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +// +// Warning: DoTimeout does not terminate the request itself. The request will +// continue in the background and the response will be discarded. +// If requests take too long and the connection pool gets filled up please +// try setting a ReadTimeout. +func (c *Client) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + return clientDoTimeout(req, resp, timeout, c) +} + +// DoDeadline performs the given request and waits for response until +// the given deadline. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned until +// the given deadline. +// +// ErrNoFreeConns is returned if all Client.MaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *Client) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return clientDoDeadline(req, resp, deadline, c) +} + +// Do performs the given http request and fills the given http response. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// Client determines the server to be requested in the following order: +// +// - from RequestURI if it contains full url with scheme and host; +// - from Host header otherwise. +// +// Response is ignored if resp is nil. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// ErrNoFreeConns is returned if all Client.MaxConnsPerHost connections +// to the requested host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *Client) Do(req *Request, resp *Response) error { + uri := req.URI() + host := uri.Host() + + isTLS := false + scheme := uri.Scheme() + if bytes.Equal(scheme, strHTTPS) { + isTLS = true + } else if !bytes.Equal(scheme, strHTTP) { + return fmt.Errorf("unsupported protocol %q. http and https are supported", scheme) + } + + startCleaner := false + + c.mLock.Lock() + m := c.m + if isTLS { + m = c.ms + } + if m == nil { + m = make(map[string]*HostClient) + if isTLS { + c.ms = m + } else { + c.m = m + } + } + hc := m[string(host)] + if hc == nil { + hc = &HostClient{ + Addr: addMissingPort(string(host), isTLS), + Name: c.Name, + NoDefaultUserAgentHeader: c.NoDefaultUserAgentHeader, + Dial: c.Dial, + DialDualStack: c.DialDualStack, + IsTLS: isTLS, + TLSConfig: c.TLSConfig, + MaxConns: c.MaxConnsPerHost, + MaxIdleConnDuration: c.MaxIdleConnDuration, + MaxConnDuration: c.MaxConnDuration, + MaxIdemponentCallAttempts: c.MaxIdemponentCallAttempts, + ReadBufferSize: c.ReadBufferSize, + WriteBufferSize: c.WriteBufferSize, + ReadTimeout: c.ReadTimeout, + WriteTimeout: c.WriteTimeout, + MaxResponseBodySize: c.MaxResponseBodySize, + DisableHeaderNamesNormalizing: c.DisableHeaderNamesNormalizing, + DisablePathNormalizing: c.DisablePathNormalizing, + } + m[string(host)] = hc + if len(m) == 1 { + startCleaner = true + } + } + c.mLock.Unlock() + + if startCleaner { + go c.mCleaner(m) + } + + return hc.Do(req, resp) +} + +func (c *Client) mCleaner(m map[string]*HostClient) { + mustStop := false + + for { + c.mLock.Lock() + for k, v := range m { + v.connsLock.Lock() + shouldRemove := v.connsCount == 0 + v.connsLock.Unlock() + + if shouldRemove { + delete(m, k) + } + } + if len(m) == 0 { + mustStop = true + } + c.mLock.Unlock() + + if mustStop { + break + } + time.Sleep(10 * time.Second) + } +} + +// DefaultMaxConnsPerHost is the maximum number of concurrent connections +// http client may establish per host by default (i.e. if +// Client.MaxConnsPerHost isn't set). +const DefaultMaxConnsPerHost = 512 + +// DefaultMaxIdleConnDuration is the default duration before idle keep-alive +// connection is closed. +const DefaultMaxIdleConnDuration = 10 * time.Second + +// DefaultMaxIdemponentCallAttempts is the default idempotent calls attempts count. +const DefaultMaxIdemponentCallAttempts = 5 + +// DialFunc must establish connection to addr. +// +// There is no need in establishing TLS (SSL) connection for https. +// The client automatically converts connection to TLS +// if HostClient.IsTLS is set. +// +// TCP address passed to DialFunc always contains host and port. +// Example TCP addr values: +// +// - foobar.com:80 +// - foobar.com:443 +// - foobar.com:8080 +type DialFunc func(addr string) (net.Conn, error) + +// HostClient balances http requests among hosts listed in Addr. +// +// HostClient may be used for balancing load among multiple upstream hosts. +// While multiple addresses passed to HostClient.Addr may be used for balancing +// load among them, it would be better using LBClient instead, since HostClient +// may unevenly balance load among upstream hosts. +// +// It is forbidden copying HostClient instances. Create new instances instead. +// +// It is safe calling HostClient methods from concurrently running goroutines. +type HostClient struct { + noCopy noCopy //nolint:unused,structcheck + + // Comma-separated list of upstream HTTP server host addresses, + // which are passed to Dial in a round-robin manner. + // + // Each address may contain port if default dialer is used. + // For example, + // + // - foobar.com:80 + // - foobar.com:443 + // - foobar.com:8080 + Addr string + + // Client name. Used in User-Agent request header. + Name string + + // NoDefaultUserAgentHeader when set to true, causes the default + // User-Agent header to be excluded from the Request. + NoDefaultUserAgentHeader bool + + // Callback for establishing new connection to the host. + // + // Default Dial is used if not set. + Dial DialFunc + + // Attempt to connect to both ipv4 and ipv6 host addresses + // if set to true. + // + // This option is used only if default TCP dialer is used, + // i.e. if Dial is blank. + // + // By default client connects only to ipv4 addresses, + // since unfortunately ipv6 remains broken in many networks worldwide :) + DialDualStack bool + + // Whether to use TLS (aka SSL or HTTPS) for host connections. + IsTLS bool + + // Optional TLS config. + TLSConfig *tls.Config + + // Maximum number of connections which may be established to all hosts + // listed in Addr. + // + // You can change this value while the HostClient is being used + // using HostClient.SetMaxConns(value) + // + // DefaultMaxConnsPerHost is used if not set. + MaxConns int + + // Keep-alive connections are closed after this duration. + // + // By default connection duration is unlimited. + MaxConnDuration time.Duration + + // Idle keep-alive connections are closed after this duration. + // + // By default idle connections are closed + // after DefaultMaxIdleConnDuration. + MaxIdleConnDuration time.Duration + + // Maximum number of attempts for idempotent calls + // + // DefaultMaxIdemponentCallAttempts is used if not set. + MaxIdemponentCallAttempts int + + // Per-connection buffer size for responses' reading. + // This also limits the maximum header size. + // + // Default buffer size is used if 0. + ReadBufferSize int + + // Per-connection buffer size for requests' writing. + // + // Default buffer size is used if 0. + WriteBufferSize int + + // Maximum duration for full response reading (including body). + // + // By default response read timeout is unlimited. + ReadTimeout time.Duration + + // Maximum duration for full request writing (including body). + // + // By default request write timeout is unlimited. + WriteTimeout time.Duration + + // Maximum response body size. + // + // The client returns ErrBodyTooLarge if this limit is greater than 0 + // and response body is greater than the limit. + // + // By default response body size is unlimited. + MaxResponseBodySize int + + // Header names are passed as-is without normalization + // if this option is set. + // + // Disabled header names' normalization may be useful only for proxying + // responses to other clients expecting case-sensitive + // header names. See https://github.com/valyala/fasthttp/issues/57 + // for details. + // + // By default request and response header names are normalized, i.e. + // The first letter and the first letters following dashes + // are uppercased, while all the other letters are lowercased. + // Examples: + // + // * HOST -> Host + // * content-type -> Content-Type + // * cONTENT-lenGTH -> Content-Length + DisableHeaderNamesNormalizing bool + + // Path values are sent as-is without normalization + // + // Disabled path normalization may be useful for proxying incoming requests + // to servers that are expecting paths to be forwarded as-is. + // + // By default path values are normalized, i.e. + // extra slashes are removed, special characters are encoded. + DisablePathNormalizing bool + + clientName atomic.Value + lastUseTime uint32 + + connsLock sync.Mutex + connsCount int + conns []*clientConn + + addrsLock sync.Mutex + addrs []string + addrIdx uint32 + + tlsConfigMap map[string]*tls.Config + tlsConfigMapLock sync.Mutex + + readerPool sync.Pool + writerPool sync.Pool + + pendingRequests int32 + + connsCleanerRun bool +} + +type clientConn struct { + c net.Conn + + createdTime time.Time + lastUseTime time.Time +} + +var startTimeUnix = time.Now().Unix() + +// LastUseTime returns time the client was last used +func (c *HostClient) LastUseTime() time.Time { + n := atomic.LoadUint32(&c.lastUseTime) + return time.Unix(startTimeUnix+int64(n), 0) +} + +// Get returns the status code and body of url. +// +// The contents of dst will be replaced by the body and returned, if the dst +// is too small a new slice will be allocated. +// +// The function follows redirects. Use Do* for manually handling redirects. +func (c *HostClient) Get(dst []byte, url string) (statusCode int, body []byte, err error) { + return clientGetURL(dst, url, c) +} + +// GetTimeout returns the status code and body of url. +// +// The contents of dst will be replaced by the body and returned, if the dst +// is too small a new slice will be allocated. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// during the given timeout. +func (c *HostClient) GetTimeout(dst []byte, url string, timeout time.Duration) (statusCode int, body []byte, err error) { + return clientGetURLTimeout(dst, url, timeout, c) +} + +// GetDeadline returns the status code and body of url. +// +// The contents of dst will be replaced by the body and returned, if the dst +// is too small a new slice will be allocated. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// ErrTimeout error is returned if url contents couldn't be fetched +// until the given deadline. +func (c *HostClient) GetDeadline(dst []byte, url string, deadline time.Time) (statusCode int, body []byte, err error) { + return clientGetURLDeadline(dst, url, deadline, c) +} + +// Post sends POST request to the given url with the given POST arguments. +// +// The contents of dst will be replaced by the body and returned, if the dst +// is too small a new slice will be allocated. +// +// The function follows redirects. Use Do* for manually handling redirects. +// +// Empty POST body is sent if postArgs is nil. +func (c *HostClient) Post(dst []byte, url string, postArgs *Args) (statusCode int, body []byte, err error) { + return clientPostURL(dst, url, postArgs, c) +} + +type clientDoer interface { + Do(req *Request, resp *Response) error +} + +func clientGetURL(dst []byte, url string, c clientDoer) (statusCode int, body []byte, err error) { + req := AcquireRequest() + + statusCode, body, err = doRequestFollowRedirects(req, dst, url, c) + + ReleaseRequest(req) + return statusCode, body, err +} + +func clientGetURLTimeout(dst []byte, url string, timeout time.Duration, c clientDoer) (statusCode int, body []byte, err error) { + deadline := time.Now().Add(timeout) + return clientGetURLDeadline(dst, url, deadline, c) +} + +type clientURLResponse struct { + statusCode int + body []byte + err error +} + +func clientGetURLDeadline(dst []byte, url string, deadline time.Time, c clientDoer) (statusCode int, body []byte, err error) { + timeout := -time.Since(deadline) + if timeout <= 0 { + return 0, dst, ErrTimeout + } + + var ch chan clientURLResponse + chv := clientURLResponseChPool.Get() + if chv == nil { + chv = make(chan clientURLResponse, 1) + } + ch = chv.(chan clientURLResponse) + + req := AcquireRequest() + + // Note that the request continues execution on ErrTimeout until + // client-specific ReadTimeout exceeds. This helps limiting load + // on slow hosts by MaxConns* concurrent requests. + // + // Without this 'hack' the load on slow host could exceed MaxConns* + // concurrent requests, since timed out requests on client side + // usually continue execution on the host. + go func() { + statusCodeCopy, bodyCopy, errCopy := doRequestFollowRedirects(req, dst, url, c) + ch <- clientURLResponse{ + statusCode: statusCodeCopy, + body: bodyCopy, + err: errCopy, + } + }() + + tc := AcquireTimer(timeout) + select { + case resp := <-ch: + ReleaseRequest(req) + clientURLResponseChPool.Put(chv) + statusCode = resp.statusCode + body = resp.body + err = resp.err + case <-tc.C: + body = dst + err = ErrTimeout + } + ReleaseTimer(tc) + + return statusCode, body, err +} + +var clientURLResponseChPool sync.Pool + +func clientPostURL(dst []byte, url string, postArgs *Args, c clientDoer) (statusCode int, body []byte, err error) { + req := AcquireRequest() + req.Header.SetMethodBytes(strPost) + req.Header.SetContentTypeBytes(strPostArgsContentType) + if postArgs != nil { + if _, err := postArgs.WriteTo(req.BodyWriter()); err != nil { + return 0, nil, err + } + } + + statusCode, body, err = doRequestFollowRedirects(req, dst, url, c) + + ReleaseRequest(req) + return statusCode, body, err +} + +var ( + errMissingLocation = errors.New("missing Location header for http redirect") + errTooManyRedirects = errors.New("too many redirects detected when doing the request") +) + +const maxRedirectsCount = 16 + +func doRequestFollowRedirects(req *Request, dst []byte, url string, c clientDoer) (statusCode int, body []byte, err error) { + resp := AcquireResponse() + bodyBuf := resp.bodyBuffer() + resp.keepBodyBuffer = true + oldBody := bodyBuf.B + bodyBuf.B = dst + scheme := req.uri.Scheme() + req.schemaUpdate = false + + redirectsCount := 0 + for { + // In case redirect to different scheme + if redirectsCount > 0 && !bytes.Equal(scheme, req.uri.Scheme()) { + if strings.HasPrefix(url, string(strHTTPS)) { + req.isTLS = true + req.uri.SetSchemeBytes(strHTTPS) + } else { + req.isTLS = false + req.uri.SetSchemeBytes(strHTTP) + } + scheme = req.uri.Scheme() + req.schemaUpdate = true + } + + req.parsedURI = false + req.Header.host = req.Header.host[:0] + req.SetRequestURI(url) + + if err = c.Do(req, resp); err != nil { + break + } + statusCode = resp.Header.StatusCode() + if !StatusCodeIsRedirect(statusCode) { + break + } + + redirectsCount++ + if redirectsCount > maxRedirectsCount { + err = errTooManyRedirects + break + } + location := resp.Header.peek(strLocation) + if len(location) == 0 { + err = errMissingLocation + break + } + url = getRedirectURL(url, location) + } + + body = bodyBuf.B + bodyBuf.B = oldBody + resp.keepBodyBuffer = false + ReleaseResponse(resp) + + return statusCode, body, err +} + +func getRedirectURL(baseURL string, location []byte) string { + u := AcquireURI() + u.Update(baseURL) + u.UpdateBytes(location) + redirectURL := u.String() + ReleaseURI(u) + return redirectURL +} + +// StatusCodeIsRedirect returns true if the status code indicates a redirect. +func StatusCodeIsRedirect(statusCode int) bool { + return statusCode == StatusMovedPermanently || + statusCode == StatusFound || + statusCode == StatusSeeOther || + statusCode == StatusTemporaryRedirect || + statusCode == StatusPermanentRedirect +} + +var ( + requestPool sync.Pool + responsePool sync.Pool +) + +// AcquireRequest returns an empty Request instance from request pool. +// +// The returned Request instance may be passed to ReleaseRequest when it is +// no longer needed. This allows Request recycling, reduces GC pressure +// and usually improves performance. +func AcquireRequest() *Request { + v := requestPool.Get() + if v == nil { + return &Request{} + } + return v.(*Request) +} + +// ReleaseRequest returns req acquired via AcquireRequest to request pool. +// +// It is forbidden accessing req and/or its' members after returning +// it to request pool. +func ReleaseRequest(req *Request) { + req.Reset() + requestPool.Put(req) +} + +// AcquireResponse returns an empty Response instance from response pool. +// +// The returned Response instance may be passed to ReleaseResponse when it is +// no longer needed. This allows Response recycling, reduces GC pressure +// and usually improves performance. +func AcquireResponse() *Response { + v := responsePool.Get() + if v == nil { + return &Response{} + } + return v.(*Response) +} + +// ReleaseResponse return resp acquired via AcquireResponse to response pool. +// +// It is forbidden accessing resp and/or its' members after returning +// it to response pool. +func ReleaseResponse(resp *Response) { + resp.Reset() + responsePool.Put(resp) +} + +// DoTimeout performs the given request and waits for response during +// the given timeout duration. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned during +// the given timeout. +// +// ErrNoFreeConns is returned if all HostClient.MaxConns connections +// to the host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +// +// Warning: DoTimeout does not terminate the request itself. The request will +// continue in the background and the response will be discarded. +// If requests take too long and the connection pool gets filled up please +// try setting a ReadTimeout. +func (c *HostClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + return clientDoTimeout(req, resp, timeout, c) +} + +// DoDeadline performs the given request and waits for response until +// the given deadline. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned until +// the given deadline. +// +// ErrNoFreeConns is returned if all HostClient.MaxConns connections +// to the host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *HostClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return clientDoDeadline(req, resp, deadline, c) +} + +func clientDoTimeout(req *Request, resp *Response, timeout time.Duration, c clientDoer) error { + deadline := time.Now().Add(timeout) + return clientDoDeadline(req, resp, deadline, c) +} + +func clientDoDeadline(req *Request, resp *Response, deadline time.Time, c clientDoer) error { + timeout := -time.Since(deadline) + if timeout <= 0 { + return ErrTimeout + } + + var ch chan error + chv := errorChPool.Get() + if chv == nil { + chv = make(chan error, 1) + } + ch = chv.(chan error) + + // Make req and resp copies, since on timeout they no longer + // may be accessed. + reqCopy := AcquireRequest() + req.copyToSkipBody(reqCopy) + swapRequestBody(req, reqCopy) + respCopy := AcquireResponse() + if resp != nil { + // Not calling resp.copyToSkipBody(respCopy) here to avoid + // unexpected messing with headers + respCopy.SkipBody = resp.SkipBody + } + + // Note that the request continues execution on ErrTimeout until + // client-specific ReadTimeout exceeds. This helps limiting load + // on slow hosts by MaxConns* concurrent requests. + // + // Without this 'hack' the load on slow host could exceed MaxConns* + // concurrent requests, since timed out requests on client side + // usually continue execution on the host. + + var mu sync.Mutex + var timedout bool + + go func() { + errDo := c.Do(reqCopy, respCopy) + mu.Lock() + { + if !timedout { + if resp != nil { + respCopy.copyToSkipBody(resp) + swapResponseBody(resp, respCopy) + } + swapRequestBody(reqCopy, req) + ch <- errDo + } + } + mu.Unlock() + + ReleaseResponse(respCopy) + ReleaseRequest(reqCopy) + }() + + tc := AcquireTimer(timeout) + var err error + select { + case err = <-ch: + case <-tc.C: + mu.Lock() + { + timedout = true + err = ErrTimeout + } + mu.Unlock() + } + ReleaseTimer(tc) + + select { + case <-ch: + default: + } + errorChPool.Put(chv) + + return err +} + +var errorChPool sync.Pool + +// Do performs the given http request and sets the corresponding response. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// ErrNoFreeConns is returned if all HostClient.MaxConns connections +// to the host are busy. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *HostClient) Do(req *Request, resp *Response) error { + var err error + var retry bool + maxAttempts := c.MaxIdemponentCallAttempts + if maxAttempts <= 0 { + maxAttempts = DefaultMaxIdemponentCallAttempts + } + attempts := 0 + + atomic.AddInt32(&c.pendingRequests, 1) + for { + retry, err = c.do(req, resp) + if err == nil || !retry { + break + } + + if !isIdempotent(req) { + // Retry non-idempotent requests if the server closes + // the connection before sending the response. + // + // This case is possible if the server closes the idle + // keep-alive connection on timeout. + // + // Apache and nginx usually do this. + if err != io.EOF { + break + } + } + attempts++ + if attempts >= maxAttempts { + break + } + } + atomic.AddInt32(&c.pendingRequests, -1) + + if err == io.EOF { + err = ErrConnectionClosed + } + return err +} + +// PendingRequests returns the current number of requests the client +// is executing. +// +// This function may be used for balancing load among multiple HostClient +// instances. +func (c *HostClient) PendingRequests() int { + return int(atomic.LoadInt32(&c.pendingRequests)) +} + +func isIdempotent(req *Request) bool { + return req.Header.IsGet() || req.Header.IsHead() || req.Header.IsPut() +} + +func (c *HostClient) do(req *Request, resp *Response) (bool, error) { + nilResp := false + if resp == nil { + nilResp = true + resp = AcquireResponse() + } + + ok, err := c.doNonNilReqResp(req, resp) + + if nilResp { + ReleaseResponse(resp) + } + + return ok, err +} + +func (c *HostClient) doNonNilReqResp(req *Request, resp *Response) (bool, error) { + if req == nil { + panic("BUG: req cannot be nil") + } + if resp == nil { + panic("BUG: resp cannot be nil") + } + + atomic.StoreUint32(&c.lastUseTime, uint32(time.Now().Unix()-startTimeUnix)) + + // Free up resources occupied by response before sending the request, + // so the GC may reclaim these resources (e.g. response body). + + // backing up SkipBody in case it was set explicitly + customSkipBody := resp.SkipBody + resp.Reset() + resp.SkipBody = customSkipBody + + if c.DisablePathNormalizing { + req.URI().DisablePathNormalizing = true + } + + // If we detected a redirect to another schema + if req.schemaUpdate { + c.IsTLS = bytes.Equal(req.URI().Scheme(), strHTTPS) + c.Addr = addMissingPort(string(req.Host()), c.IsTLS) + c.addrIdx = 0 + c.addrs = nil + req.schemaUpdate = false + req.SetConnectionClose() + } + + cc, err := c.acquireConn() + if err != nil { + return false, err + } + conn := cc.c + + resp.parseNetConn(conn) + + if c.WriteTimeout > 0 { + // Set Deadline every time, since golang has fixed the performance issue + // See https://github.com/golang/go/issues/15133#issuecomment-271571395 for details + currentTime := time.Now() + if err = conn.SetWriteDeadline(currentTime.Add(c.WriteTimeout)); err != nil { + c.closeConn(cc) + return true, err + } + } + + resetConnection := false + if c.MaxConnDuration > 0 && time.Since(cc.createdTime) > c.MaxConnDuration && !req.ConnectionClose() { + req.SetConnectionClose() + resetConnection = true + } + + userAgentOld := req.Header.UserAgent() + if len(userAgentOld) == 0 { + req.Header.userAgent = c.getClientName() + } + bw := c.acquireWriter(conn) + err = req.Write(bw) + + if resetConnection { + req.Header.ResetConnectionClose() + } + + if err == nil { + err = bw.Flush() + } + if err != nil { + c.releaseWriter(bw) + c.closeConn(cc) + return true, err + } + c.releaseWriter(bw) + + if c.ReadTimeout > 0 { + // Set Deadline every time, since golang has fixed the performance issue + // See https://github.com/golang/go/issues/15133#issuecomment-271571395 for details + currentTime := time.Now() + if err = conn.SetReadDeadline(currentTime.Add(c.ReadTimeout)); err != nil { + c.closeConn(cc) + return true, err + } + } + + if customSkipBody || !req.Header.IsGet() && req.Header.IsHead() { + resp.SkipBody = true + } + if c.DisableHeaderNamesNormalizing { + resp.Header.DisableNormalizing() + } + + br := c.acquireReader(conn) + if err = resp.ReadLimitBody(br, c.MaxResponseBodySize); err != nil { + c.releaseReader(br) + c.closeConn(cc) + // Don't retry in case of ErrBodyTooLarge since we will just get the same again. + retry := err != ErrBodyTooLarge + return retry, err + } + c.releaseReader(br) + + if resetConnection || req.ConnectionClose() || resp.ConnectionClose() { + c.closeConn(cc) + } else { + c.releaseConn(cc) + } + + return false, err +} + +var ( + // ErrNoFreeConns is returned when no free connections available + // to the given host. + // + // Increase the allowed number of connections per host if you + // see this error. + ErrNoFreeConns = errors.New("no free connections available to host") + + // ErrConnectionClosed may be returned from client methods if the server + // closes connection before returning the first response byte. + // + // If you see this error, then either fix the server by returning + // 'Connection: close' response header before closing the connection + // or add 'Connection: close' request header before sending requests + // to broken server. + ErrConnectionClosed = errors.New("the server closed connection before returning the first response byte. " + + "Make sure the server returns 'Connection: close' response header before closing the connection") +) + +type timeoutError struct { +} + +func (e *timeoutError) Error() string { + return "timeout" +} + +// Only implement the Timeout() function of the net.Error interface. +// This allows for checks like: +// +// if x, ok := err.(interface{ Timeout() bool }); ok && x.Timeout() { +func (e *timeoutError) Timeout() bool { + return true +} + +var ( + // ErrTimeout is returned from timed out calls. + ErrTimeout = &timeoutError{} +) + +// SetMaxConns sets up the maximum number of connections which may be established to all hosts listed in Addr. +func (c *HostClient) SetMaxConns(newMaxConns int) { + c.connsLock.Lock() + c.MaxConns = newMaxConns + c.connsLock.Unlock() +} + +func (c *HostClient) acquireConn() (*clientConn, error) { + var cc *clientConn + createConn := false + startCleaner := false + + var n int + c.connsLock.Lock() + n = len(c.conns) + if n == 0 { + maxConns := c.MaxConns + if maxConns <= 0 { + maxConns = DefaultMaxConnsPerHost + } + if c.connsCount < maxConns { + c.connsCount++ + createConn = true + if !c.connsCleanerRun { + startCleaner = true + c.connsCleanerRun = true + } + } + } else { + n-- + cc = c.conns[n] + c.conns[n] = nil + c.conns = c.conns[:n] + } + c.connsLock.Unlock() + + if cc != nil { + return cc, nil + } + if !createConn { + return nil, ErrNoFreeConns + } + + if startCleaner { + go c.connsCleaner() + } + + conn, err := c.dialHostHard() + if err != nil { + c.decConnsCount() + return nil, err + } + cc = acquireClientConn(conn) + + return cc, nil +} + +func (c *HostClient) connsCleaner() { + var ( + scratch []*clientConn + maxIdleConnDuration = c.MaxIdleConnDuration + ) + if maxIdleConnDuration <= 0 { + maxIdleConnDuration = DefaultMaxIdleConnDuration + } + for { + currentTime := time.Now() + + // Determine idle connections to be closed. + c.connsLock.Lock() + conns := c.conns + n := len(conns) + i := 0 + for i < n && currentTime.Sub(conns[i].lastUseTime) > maxIdleConnDuration { + i++ + } + sleepFor := maxIdleConnDuration + if i < n { + // + 1 so we actually sleep past the expiration time and not up to it. + // Otherwise the > check above would still fail. + sleepFor = maxIdleConnDuration - currentTime.Sub(conns[i].lastUseTime) + 1 + } + scratch = append(scratch[:0], conns[:i]...) + if i > 0 { + m := copy(conns, conns[i:]) + for i = m; i < n; i++ { + conns[i] = nil + } + c.conns = conns[:m] + } + c.connsLock.Unlock() + + // Close idle connections. + for i, cc := range scratch { + c.closeConn(cc) + scratch[i] = nil + } + + // Determine whether to stop the connsCleaner. + c.connsLock.Lock() + mustStop := c.connsCount == 0 + if mustStop { + c.connsCleanerRun = false + } + c.connsLock.Unlock() + if mustStop { + break + } + + time.Sleep(sleepFor) + } +} + +func (c *HostClient) closeConn(cc *clientConn) { + c.decConnsCount() + cc.c.Close() + releaseClientConn(cc) +} + +func (c *HostClient) decConnsCount() { + c.connsLock.Lock() + c.connsCount-- + c.connsLock.Unlock() +} + +func acquireClientConn(conn net.Conn) *clientConn { + v := clientConnPool.Get() + if v == nil { + v = &clientConn{} + } + cc := v.(*clientConn) + cc.c = conn + cc.createdTime = time.Now() + return cc +} + +func releaseClientConn(cc *clientConn) { + // Reset all fields. + *cc = clientConn{} + clientConnPool.Put(cc) +} + +var clientConnPool sync.Pool + +func (c *HostClient) releaseConn(cc *clientConn) { + cc.lastUseTime = time.Now() + c.connsLock.Lock() + c.conns = append(c.conns, cc) + c.connsLock.Unlock() +} + +func (c *HostClient) acquireWriter(conn net.Conn) *bufio.Writer { + v := c.writerPool.Get() + if v == nil { + n := c.WriteBufferSize + if n <= 0 { + n = defaultWriteBufferSize + } + return bufio.NewWriterSize(conn, n) + } + bw := v.(*bufio.Writer) + bw.Reset(conn) + return bw +} + +func (c *HostClient) releaseWriter(bw *bufio.Writer) { + c.writerPool.Put(bw) +} + +func (c *HostClient) acquireReader(conn net.Conn) *bufio.Reader { + v := c.readerPool.Get() + if v == nil { + n := c.ReadBufferSize + if n <= 0 { + n = defaultReadBufferSize + } + return bufio.NewReaderSize(conn, n) + } + br := v.(*bufio.Reader) + br.Reset(conn) + return br +} + +func (c *HostClient) releaseReader(br *bufio.Reader) { + c.readerPool.Put(br) +} + +func newClientTLSConfig(c *tls.Config, addr string) *tls.Config { + if c == nil { + c = &tls.Config{} + } else { + // TODO: substitute this with c.Clone() after go1.8 becomes mainstream :) + c = &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + + // Do not copy ClientAuth, since it is server-related stuff + // Do not copy ClientCAs, since it is server-related stuff + + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + + // Do not copy PreferServerCipherSuites - this is server stuff + + SessionTicketsDisabled: c.SessionTicketsDisabled, + + // Do not copy SessionTicketKey - this is server stuff + + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } + } + + if c.ClientSessionCache == nil { + c.ClientSessionCache = tls.NewLRUClientSessionCache(0) + } + + if len(c.ServerName) == 0 { + serverName := tlsServerName(addr) + if serverName == "*" { + c.InsecureSkipVerify = true + } else { + c.ServerName = serverName + } + } + return c +} + +func tlsServerName(addr string) string { + if !strings.Contains(addr, ":") { + return addr + } + host, _, err := net.SplitHostPort(addr) + if err != nil { + return "*" + } + return host +} + +func (c *HostClient) nextAddr() string { + c.addrsLock.Lock() + if c.addrs == nil { + c.addrs = strings.Split(c.Addr, ",") + } + addr := c.addrs[0] + if len(c.addrs) > 1 { + addr = c.addrs[c.addrIdx%uint32(len(c.addrs))] + c.addrIdx++ + } + c.addrsLock.Unlock() + return addr +} + +func (c *HostClient) dialHostHard() (conn net.Conn, err error) { + // attempt to dial all the available hosts before giving up. + + c.addrsLock.Lock() + n := len(c.addrs) + c.addrsLock.Unlock() + + if n == 0 { + // It looks like c.addrs isn't initialized yet. + n = 1 + } + + timeout := c.ReadTimeout + c.WriteTimeout + if timeout <= 0 { + timeout = DefaultDialTimeout + } + deadline := time.Now().Add(timeout) + for n > 0 { + addr := c.nextAddr() + tlsConfig := c.cachedTLSConfig(addr) + conn, err = dialAddr(addr, c.Dial, c.DialDualStack, c.IsTLS, tlsConfig, c.WriteTimeout) + if err == nil { + return conn, nil + } + if time.Since(deadline) >= 0 { + break + } + n-- + } + return nil, err +} + +func (c *HostClient) cachedTLSConfig(addr string) *tls.Config { + if !c.IsTLS { + return nil + } + + c.tlsConfigMapLock.Lock() + if c.tlsConfigMap == nil { + c.tlsConfigMap = make(map[string]*tls.Config) + } + cfg := c.tlsConfigMap[addr] + if cfg == nil { + cfg = newClientTLSConfig(c.TLSConfig, addr) + c.tlsConfigMap[addr] = cfg + } + c.tlsConfigMapLock.Unlock() + + return cfg +} + +// ErrTLSHandshakeTimeout indicates there is a timeout from tls handshake. +var ErrTLSHandshakeTimeout = errors.New("tls handshake timed out") + +var timeoutErrorChPool sync.Pool + +func tlsClientHandshake(rawConn net.Conn, tlsConfig *tls.Config, timeout time.Duration) (net.Conn, error) { + tc := AcquireTimer(timeout) + defer ReleaseTimer(tc) + + var ch chan error + chv := timeoutErrorChPool.Get() + if chv == nil { + chv = make(chan error) + } + ch = chv.(chan error) + defer timeoutErrorChPool.Put(chv) + + conn := tls.Client(rawConn, tlsConfig) + + go func() { + ch <- conn.Handshake() + }() + + select { + case <-tc.C: + rawConn.Close() + <-ch + return nil, ErrTLSHandshakeTimeout + case err := <-ch: + if err != nil { + rawConn.Close() + return nil, err + } + return conn, nil + } +} + +func dialAddr(addr string, dial DialFunc, dialDualStack, isTLS bool, tlsConfig *tls.Config, timeout time.Duration) (net.Conn, error) { + if dial == nil { + if dialDualStack { + dial = DialDualStack + } else { + dial = Dial + } + addr = addMissingPort(addr, isTLS) + } + conn, err := dial(addr) + if err != nil { + return nil, err + } + if conn == nil { + panic("BUG: DialFunc returned (nil, nil)") + } + if isTLS { + if timeout == 0 { + return tls.Client(conn, tlsConfig), nil + } + return tlsClientHandshake(conn, tlsConfig, timeout) + } + return conn, nil +} + +func (c *HostClient) getClientName() []byte { + v := c.clientName.Load() + var clientName []byte + if v == nil { + clientName = []byte(c.Name) + if len(clientName) == 0 && !c.NoDefaultUserAgentHeader { + clientName = defaultUserAgent + } + c.clientName.Store(clientName) + } else { + clientName = v.([]byte) + } + return clientName +} + +func addMissingPort(addr string, isTLS bool) string { + n := strings.Index(addr, ":") + if n >= 0 { + return addr + } + port := 80 + if isTLS { + port = 443 + } + return net.JoinHostPort(addr, strconv.Itoa(port)) +} + +// PipelineClient pipelines requests over a limited set of concurrent +// connections to the given Addr. +// +// This client may be used in highly loaded HTTP-based RPC systems for reducing +// context switches and network level overhead. +// See https://en.wikipedia.org/wiki/HTTP_pipelining for details. +// +// It is forbidden copying PipelineClient instances. Create new instances +// instead. +// +// It is safe calling PipelineClient methods from concurrently running +// goroutines. +type PipelineClient struct { + noCopy noCopy //nolint:unused,structcheck + + // Address of the host to connect to. + Addr string + + // The maximum number of concurrent connections to the Addr. + // + // A single connection is used by default. + MaxConns int + + // The maximum number of pending pipelined requests over + // a single connection to Addr. + // + // DefaultMaxPendingRequests is used by default. + MaxPendingRequests int + + // The maximum delay before sending pipelined requests as a batch + // to the server. + // + // By default requests are sent immediately to the server. + MaxBatchDelay time.Duration + + // Callback for connection establishing to the host. + // + // Default Dial is used if not set. + Dial DialFunc + + // Attempt to connect to both ipv4 and ipv6 host addresses + // if set to true. + // + // This option is used only if default TCP dialer is used, + // i.e. if Dial is blank. + // + // By default client connects only to ipv4 addresses, + // since unfortunately ipv6 remains broken in many networks worldwide :) + DialDualStack bool + + // Whether to use TLS (aka SSL or HTTPS) for host connections. + IsTLS bool + + // Optional TLS config. + TLSConfig *tls.Config + + // Idle connection to the host is closed after this duration. + // + // By default idle connection is closed after + // DefaultMaxIdleConnDuration. + MaxIdleConnDuration time.Duration + + // Buffer size for responses' reading. + // This also limits the maximum header size. + // + // Default buffer size is used if 0. + ReadBufferSize int + + // Buffer size for requests' writing. + // + // Default buffer size is used if 0. + WriteBufferSize int + + // Maximum duration for full response reading (including body). + // + // By default response read timeout is unlimited. + ReadTimeout time.Duration + + // Maximum duration for full request writing (including body). + // + // By default request write timeout is unlimited. + WriteTimeout time.Duration + + // Logger for logging client errors. + // + // By default standard logger from log package is used. + Logger Logger + + connClients []*pipelineConnClient + connClientsLock sync.Mutex +} + +type pipelineConnClient struct { + noCopy noCopy //nolint:unused,structcheck + + Addr string + MaxPendingRequests int + MaxBatchDelay time.Duration + Dial DialFunc + DialDualStack bool + IsTLS bool + TLSConfig *tls.Config + MaxIdleConnDuration time.Duration + ReadBufferSize int + WriteBufferSize int + ReadTimeout time.Duration + WriteTimeout time.Duration + Logger Logger + + workPool sync.Pool + + chLock sync.Mutex + chW chan *pipelineWork + chR chan *pipelineWork + + tlsConfigLock sync.Mutex + tlsConfig *tls.Config +} + +type pipelineWork struct { + reqCopy Request + respCopy Response + req *Request + resp *Response + t *time.Timer + deadline time.Time + err error + done chan struct{} +} + +// DoTimeout performs the given request and waits for response during +// the given timeout duration. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned during +// the given timeout. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +// +// Warning: DoTimeout does not terminate the request itself. The request will +// continue in the background and the response will be discarded. +// If requests take too long and the connection pool gets filled up please +// try setting a ReadTimeout. +func (c *PipelineClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + return c.DoDeadline(req, resp, time.Now().Add(timeout)) +} + +// DoDeadline performs the given request and waits for response until +// the given deadline. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. +// +// Response is ignored if resp is nil. +// +// ErrTimeout is returned if the response wasn't returned until +// the given deadline. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *PipelineClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return c.getConnClient().DoDeadline(req, resp, deadline) +} + +func (c *pipelineConnClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + c.init() + + timeout := -time.Since(deadline) + if timeout < 0 { + return ErrTimeout + } + + w := acquirePipelineWork(&c.workPool, timeout) + w.req = &w.reqCopy + w.resp = &w.respCopy + + // Make a copy of the request in order to avoid data races on timeouts + req.copyToSkipBody(&w.reqCopy) + swapRequestBody(req, &w.reqCopy) + + // Put the request to outgoing queue + select { + case c.chW <- w: + // Fast path: len(c.ch) < cap(c.ch) + default: + // Slow path + select { + case c.chW <- w: + case <-w.t.C: + releasePipelineWork(&c.workPool, w) + return ErrTimeout + } + } + + // Wait for the response + var err error + select { + case <-w.done: + if resp != nil { + w.respCopy.copyToSkipBody(resp) + swapResponseBody(resp, &w.respCopy) + } + err = w.err + releasePipelineWork(&c.workPool, w) + case <-w.t.C: + err = ErrTimeout + } + + return err +} + +// Do performs the given http request and sets the corresponding response. +// +// Request must contain at least non-zero RequestURI with full url (including +// scheme and host) or non-zero Host header + RequestURI. +// +// The function doesn't follow redirects. Use Get* for following redirects. +// +// Response is ignored if resp is nil. +// +// It is recommended obtaining req and resp via AcquireRequest +// and AcquireResponse in performance-critical code. +func (c *PipelineClient) Do(req *Request, resp *Response) error { + return c.getConnClient().Do(req, resp) +} + +func (c *pipelineConnClient) Do(req *Request, resp *Response) error { + c.init() + + w := acquirePipelineWork(&c.workPool, 0) + w.req = req + if resp != nil { + w.resp = resp + } else { + w.resp = &w.respCopy + } + + // Put the request to outgoing queue + select { + case c.chW <- w: + default: + // Try substituting the oldest w with the current one. + select { + case wOld := <-c.chW: + wOld.err = ErrPipelineOverflow + wOld.done <- struct{}{} + default: + } + select { + case c.chW <- w: + default: + releasePipelineWork(&c.workPool, w) + return ErrPipelineOverflow + } + } + + // Wait for the response + <-w.done + err := w.err + + releasePipelineWork(&c.workPool, w) + + return err +} + +func (c *PipelineClient) getConnClient() *pipelineConnClient { + c.connClientsLock.Lock() + cc := c.getConnClientUnlocked() + c.connClientsLock.Unlock() + return cc +} + +func (c *PipelineClient) getConnClientUnlocked() *pipelineConnClient { + if len(c.connClients) == 0 { + return c.newConnClient() + } + + // Return the client with the minimum number of pending requests. + minCC := c.connClients[0] + minReqs := minCC.PendingRequests() + if minReqs == 0 { + return minCC + } + for i := 1; i < len(c.connClients); i++ { + cc := c.connClients[i] + reqs := cc.PendingRequests() + if reqs == 0 { + return cc + } + if reqs < minReqs { + minCC = cc + minReqs = reqs + } + } + + maxConns := c.MaxConns + if maxConns <= 0 { + maxConns = 1 + } + if len(c.connClients) < maxConns { + return c.newConnClient() + } + return minCC +} + +func (c *PipelineClient) newConnClient() *pipelineConnClient { + cc := &pipelineConnClient{ + Addr: c.Addr, + MaxPendingRequests: c.MaxPendingRequests, + MaxBatchDelay: c.MaxBatchDelay, + Dial: c.Dial, + DialDualStack: c.DialDualStack, + IsTLS: c.IsTLS, + TLSConfig: c.TLSConfig, + MaxIdleConnDuration: c.MaxIdleConnDuration, + ReadBufferSize: c.ReadBufferSize, + WriteBufferSize: c.WriteBufferSize, + ReadTimeout: c.ReadTimeout, + WriteTimeout: c.WriteTimeout, + Logger: c.Logger, + } + c.connClients = append(c.connClients, cc) + return cc +} + +// ErrPipelineOverflow may be returned from PipelineClient.Do* +// if the requests' queue is overflown. +var ErrPipelineOverflow = errors.New("pipelined requests' queue has been overflown. Increase MaxConns and/or MaxPendingRequests") + +// DefaultMaxPendingRequests is the default value +// for PipelineClient.MaxPendingRequests. +const DefaultMaxPendingRequests = 1024 + +func (c *pipelineConnClient) init() { + c.chLock.Lock() + if c.chR == nil { + maxPendingRequests := c.MaxPendingRequests + if maxPendingRequests <= 0 { + maxPendingRequests = DefaultMaxPendingRequests + } + c.chR = make(chan *pipelineWork, maxPendingRequests) + if c.chW == nil { + c.chW = make(chan *pipelineWork, maxPendingRequests) + } + go func() { + if err := c.worker(); err != nil { + c.logger().Printf("error in PipelineClient(%q): %s", c.Addr, err) + if netErr, ok := err.(net.Error); ok && netErr.Temporary() { + // Throttle client reconnections on temporary errors + time.Sleep(time.Second) + } + } + + c.chLock.Lock() + // Do not reset c.chW to nil, since it may contain + // pending requests, which could be served on the next + // connection to the host. + c.chR = nil + c.chLock.Unlock() + }() + } + c.chLock.Unlock() +} + +func (c *pipelineConnClient) worker() error { + tlsConfig := c.cachedTLSConfig() + conn, err := dialAddr(c.Addr, c.Dial, c.DialDualStack, c.IsTLS, tlsConfig, c.WriteTimeout) + if err != nil { + return err + } + + // Start reader and writer + stopW := make(chan struct{}) + doneW := make(chan error) + go func() { + doneW <- c.writer(conn, stopW) + }() + stopR := make(chan struct{}) + doneR := make(chan error) + go func() { + doneR <- c.reader(conn, stopR) + }() + + // Wait until reader and writer are stopped + select { + case err = <-doneW: + conn.Close() + close(stopR) + <-doneR + case err = <-doneR: + conn.Close() + close(stopW) + <-doneW + } + + // Notify pending readers + for len(c.chR) > 0 { + w := <-c.chR + w.err = errPipelineConnStopped + w.done <- struct{}{} + } + + return err +} + +func (c *pipelineConnClient) cachedTLSConfig() *tls.Config { + if !c.IsTLS { + return nil + } + + c.tlsConfigLock.Lock() + cfg := c.tlsConfig + if cfg == nil { + cfg = newClientTLSConfig(c.TLSConfig, c.Addr) + c.tlsConfig = cfg + } + c.tlsConfigLock.Unlock() + + return cfg +} + +func (c *pipelineConnClient) writer(conn net.Conn, stopCh <-chan struct{}) error { + writeBufferSize := c.WriteBufferSize + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + bw := bufio.NewWriterSize(conn, writeBufferSize) + defer bw.Flush() + chR := c.chR + chW := c.chW + writeTimeout := c.WriteTimeout + + maxIdleConnDuration := c.MaxIdleConnDuration + if maxIdleConnDuration <= 0 { + maxIdleConnDuration = DefaultMaxIdleConnDuration + } + maxBatchDelay := c.MaxBatchDelay + + var ( + stopTimer = time.NewTimer(time.Hour) + flushTimer = time.NewTimer(time.Hour) + flushTimerCh <-chan time.Time + instantTimerCh = make(chan time.Time) + + w *pipelineWork + err error + ) + close(instantTimerCh) + for { + againChW: + select { + case w = <-chW: + // Fast path: len(chW) > 0 + default: + // Slow path + stopTimer.Reset(maxIdleConnDuration) + select { + case w = <-chW: + case <-stopTimer.C: + return nil + case <-stopCh: + return nil + case <-flushTimerCh: + if err = bw.Flush(); err != nil { + return err + } + flushTimerCh = nil + goto againChW + } + } + + if !w.deadline.IsZero() && time.Since(w.deadline) >= 0 { + w.err = ErrTimeout + w.done <- struct{}{} + continue + } + + w.resp.parseNetConn(conn) + + if writeTimeout > 0 { + // Set Deadline every time, since golang has fixed the performance issue + // See https://github.com/golang/go/issues/15133#issuecomment-271571395 for details + currentTime := time.Now() + if err = conn.SetWriteDeadline(currentTime.Add(writeTimeout)); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + } + if err = w.req.Write(bw); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + if flushTimerCh == nil && (len(chW) == 0 || len(chR) == cap(chR)) { + if maxBatchDelay > 0 { + flushTimer.Reset(maxBatchDelay) + flushTimerCh = flushTimer.C + } else { + flushTimerCh = instantTimerCh + } + } + + againChR: + select { + case chR <- w: + // Fast path: len(chR) < cap(chR) + default: + // Slow path + select { + case chR <- w: + case <-stopCh: + w.err = errPipelineConnStopped + w.done <- struct{}{} + return nil + case <-flushTimerCh: + if err = bw.Flush(); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + flushTimerCh = nil + goto againChR + } + } + } +} + +func (c *pipelineConnClient) reader(conn net.Conn, stopCh <-chan struct{}) error { + readBufferSize := c.ReadBufferSize + if readBufferSize <= 0 { + readBufferSize = defaultReadBufferSize + } + br := bufio.NewReaderSize(conn, readBufferSize) + chR := c.chR + readTimeout := c.ReadTimeout + + var ( + w *pipelineWork + err error + ) + for { + select { + case w = <-chR: + // Fast path: len(chR) > 0 + default: + // Slow path + select { + case w = <-chR: + case <-stopCh: + return nil + } + } + + if readTimeout > 0 { + // Set Deadline every time, since golang has fixed the performance issue + // See https://github.com/golang/go/issues/15133#issuecomment-271571395 for details + currentTime := time.Now() + if err = conn.SetReadDeadline(currentTime.Add(readTimeout)); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + } + if err = w.resp.Read(br); err != nil { + w.err = err + w.done <- struct{}{} + return err + } + + w.done <- struct{}{} + } +} + +func (c *pipelineConnClient) logger() Logger { + if c.Logger != nil { + return c.Logger + } + return defaultLogger +} + +// PendingRequests returns the current number of pending requests pipelined +// to the server. +// +// This number may exceed MaxPendingRequests*MaxConns by up to two times, since +// each connection to the server may keep up to MaxPendingRequests requests +// in the queue before sending them to the server. +// +// This function may be used for balancing load among multiple PipelineClient +// instances. +func (c *PipelineClient) PendingRequests() int { + c.connClientsLock.Lock() + n := 0 + for _, cc := range c.connClients { + n += cc.PendingRequests() + } + c.connClientsLock.Unlock() + return n +} + +func (c *pipelineConnClient) PendingRequests() int { + c.init() + + c.chLock.Lock() + n := len(c.chR) + len(c.chW) + c.chLock.Unlock() + return n +} + +var errPipelineConnStopped = errors.New("pipeline connection has been stopped") + +func acquirePipelineWork(pool *sync.Pool, timeout time.Duration) *pipelineWork { + v := pool.Get() + if v == nil { + v = &pipelineWork{ + done: make(chan struct{}, 1), + } + } + w := v.(*pipelineWork) + if timeout > 0 { + if w.t == nil { + w.t = time.NewTimer(timeout) + } else { + w.t.Reset(timeout) + } + w.deadline = time.Now().Add(timeout) + } else { + w.deadline = zeroTime + } + return w +} + +func releasePipelineWork(pool *sync.Pool, w *pipelineWork) { + if w.t != nil { + w.t.Stop() + } + w.reqCopy.Reset() + w.respCopy.Reset() + w.req = nil + w.resp = nil + w.err = nil + pool.Put(w) +} diff --git a/vendor/github.com/valyala/fasthttp/coarseTime.go b/vendor/github.com/valyala/fasthttp/coarseTime.go new file mode 100644 index 000000000..4679df689 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/coarseTime.go @@ -0,0 +1,13 @@ +package fasthttp + +import ( + "time" +) + +// CoarseTimeNow returns the current time truncated to the nearest second. +// +// Deprecated: This is slower than calling time.Now() directly. +// This is now time.Now().Truncate(time.Second) shortcut. +func CoarseTimeNow() time.Time { + return time.Now().Truncate(time.Second) +} diff --git a/vendor/github.com/valyala/fasthttp/compress.go b/vendor/github.com/valyala/fasthttp/compress.go new file mode 100644 index 000000000..6550c9e1f --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/compress.go @@ -0,0 +1,438 @@ +package fasthttp + +import ( + "bytes" + "fmt" + "io" + "os" + "sync" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/compress/gzip" + "github.com/klauspost/compress/zlib" + "github.com/valyala/bytebufferpool" + "github.com/valyala/fasthttp/stackless" +) + +// Supported compression levels. +const ( + CompressNoCompression = flate.NoCompression + CompressBestSpeed = flate.BestSpeed + CompressBestCompression = flate.BestCompression + CompressDefaultCompression = 6 // flate.DefaultCompression + CompressHuffmanOnly = -2 // flate.HuffmanOnly +) + +func acquireGzipReader(r io.Reader) (*gzip.Reader, error) { + v := gzipReaderPool.Get() + if v == nil { + return gzip.NewReader(r) + } + zr := v.(*gzip.Reader) + if err := zr.Reset(r); err != nil { + return nil, err + } + return zr, nil +} + +func releaseGzipReader(zr *gzip.Reader) { + zr.Close() + gzipReaderPool.Put(zr) +} + +var gzipReaderPool sync.Pool + +func acquireFlateReader(r io.Reader) (io.ReadCloser, error) { + v := flateReaderPool.Get() + if v == nil { + zr, err := zlib.NewReader(r) + if err != nil { + return nil, err + } + return zr, nil + } + zr := v.(io.ReadCloser) + if err := resetFlateReader(zr, r); err != nil { + return nil, err + } + return zr, nil +} + +func releaseFlateReader(zr io.ReadCloser) { + zr.Close() + flateReaderPool.Put(zr) +} + +func resetFlateReader(zr io.ReadCloser, r io.Reader) error { + zrr, ok := zr.(zlib.Resetter) + if !ok { + panic("BUG: zlib.Reader doesn't implement zlib.Resetter???") + } + return zrr.Reset(r, nil) +} + +var flateReaderPool sync.Pool + +func acquireStacklessGzipWriter(w io.Writer, level int) stackless.Writer { + nLevel := normalizeCompressLevel(level) + p := stacklessGzipWriterPoolMap[nLevel] + v := p.Get() + if v == nil { + return stackless.NewWriter(w, func(w io.Writer) stackless.Writer { + return acquireRealGzipWriter(w, level) + }) + } + sw := v.(stackless.Writer) + sw.Reset(w) + return sw +} + +func releaseStacklessGzipWriter(sw stackless.Writer, level int) { + sw.Close() + nLevel := normalizeCompressLevel(level) + p := stacklessGzipWriterPoolMap[nLevel] + p.Put(sw) +} + +func acquireRealGzipWriter(w io.Writer, level int) *gzip.Writer { + nLevel := normalizeCompressLevel(level) + p := realGzipWriterPoolMap[nLevel] + v := p.Get() + if v == nil { + zw, err := gzip.NewWriterLevel(w, level) + if err != nil { + panic(fmt.Sprintf("BUG: unexpected error from gzip.NewWriterLevel(%d): %s", level, err)) + } + return zw + } + zw := v.(*gzip.Writer) + zw.Reset(w) + return zw +} + +func releaseRealGzipWriter(zw *gzip.Writer, level int) { + zw.Close() + nLevel := normalizeCompressLevel(level) + p := realGzipWriterPoolMap[nLevel] + p.Put(zw) +} + +var ( + stacklessGzipWriterPoolMap = newCompressWriterPoolMap() + realGzipWriterPoolMap = newCompressWriterPoolMap() +) + +// AppendGzipBytesLevel appends gzipped src to dst using the given +// compression level and returns the resulting dst. +// +// Supported compression levels are: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// * CompressHuffmanOnly +func AppendGzipBytesLevel(dst, src []byte, level int) []byte { + w := &byteSliceWriter{dst} + WriteGzipLevel(w, src, level) //nolint:errcheck + return w.b +} + +// WriteGzipLevel writes gzipped p to w using the given compression level +// and returns the number of compressed bytes written to w. +// +// Supported compression levels are: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// * CompressHuffmanOnly +func WriteGzipLevel(w io.Writer, p []byte, level int) (int, error) { + switch w.(type) { + case *byteSliceWriter, + *bytes.Buffer, + *bytebufferpool.ByteBuffer: + // These writers don't block, so we can just use stacklessWriteGzip + ctx := &compressCtx{ + w: w, + p: p, + level: level, + } + stacklessWriteGzip(ctx) + return len(p), nil + default: + zw := acquireStacklessGzipWriter(w, level) + n, err := zw.Write(p) + releaseStacklessGzipWriter(zw, level) + return n, err + } +} + +var stacklessWriteGzip = stackless.NewFunc(nonblockingWriteGzip) + +func nonblockingWriteGzip(ctxv interface{}) { + ctx := ctxv.(*compressCtx) + zw := acquireRealGzipWriter(ctx.w, ctx.level) + + _, err := zw.Write(ctx.p) + if err != nil { + panic(fmt.Sprintf("BUG: gzip.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err)) + } + + releaseRealGzipWriter(zw, ctx.level) +} + +// WriteGzip writes gzipped p to w and returns the number of compressed +// bytes written to w. +func WriteGzip(w io.Writer, p []byte) (int, error) { + return WriteGzipLevel(w, p, CompressDefaultCompression) +} + +// AppendGzipBytes appends gzipped src to dst and returns the resulting dst. +func AppendGzipBytes(dst, src []byte) []byte { + return AppendGzipBytesLevel(dst, src, CompressDefaultCompression) +} + +// WriteGunzip writes ungzipped p to w and returns the number of uncompressed +// bytes written to w. +func WriteGunzip(w io.Writer, p []byte) (int, error) { + r := &byteSliceReader{p} + zr, err := acquireGzipReader(r) + if err != nil { + return 0, err + } + n, err := copyZeroAlloc(w, zr) + releaseGzipReader(zr) + nn := int(n) + if int64(nn) != n { + return 0, fmt.Errorf("too much data gunzipped: %d", n) + } + return nn, err +} + +// AppendGunzipBytes appends gunzipped src to dst and returns the resulting dst. +func AppendGunzipBytes(dst, src []byte) ([]byte, error) { + w := &byteSliceWriter{dst} + _, err := WriteGunzip(w, src) + return w.b, err +} + +// AppendDeflateBytesLevel appends deflated src to dst using the given +// compression level and returns the resulting dst. +// +// Supported compression levels are: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// * CompressHuffmanOnly +func AppendDeflateBytesLevel(dst, src []byte, level int) []byte { + w := &byteSliceWriter{dst} + WriteDeflateLevel(w, src, level) //nolint:errcheck + return w.b +} + +// WriteDeflateLevel writes deflated p to w using the given compression level +// and returns the number of compressed bytes written to w. +// +// Supported compression levels are: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// * CompressHuffmanOnly +func WriteDeflateLevel(w io.Writer, p []byte, level int) (int, error) { + switch w.(type) { + case *byteSliceWriter, + *bytes.Buffer, + *bytebufferpool.ByteBuffer: + // These writers don't block, so we can just use stacklessWriteDeflate + ctx := &compressCtx{ + w: w, + p: p, + level: level, + } + stacklessWriteDeflate(ctx) + return len(p), nil + default: + zw := acquireStacklessDeflateWriter(w, level) + n, err := zw.Write(p) + releaseStacklessDeflateWriter(zw, level) + return n, err + } +} + +var stacklessWriteDeflate = stackless.NewFunc(nonblockingWriteDeflate) + +func nonblockingWriteDeflate(ctxv interface{}) { + ctx := ctxv.(*compressCtx) + zw := acquireRealDeflateWriter(ctx.w, ctx.level) + + _, err := zw.Write(ctx.p) + if err != nil { + panic(fmt.Sprintf("BUG: zlib.Writer.Write for len(p)=%d returned unexpected error: %s", len(ctx.p), err)) + } + + releaseRealDeflateWriter(zw, ctx.level) +} + +type compressCtx struct { + w io.Writer + p []byte + level int +} + +// WriteDeflate writes deflated p to w and returns the number of compressed +// bytes written to w. +func WriteDeflate(w io.Writer, p []byte) (int, error) { + return WriteDeflateLevel(w, p, CompressDefaultCompression) +} + +// AppendDeflateBytes appends deflated src to dst and returns the resulting dst. +func AppendDeflateBytes(dst, src []byte) []byte { + return AppendDeflateBytesLevel(dst, src, CompressDefaultCompression) +} + +// WriteInflate writes inflated p to w and returns the number of uncompressed +// bytes written to w. +func WriteInflate(w io.Writer, p []byte) (int, error) { + r := &byteSliceReader{p} + zr, err := acquireFlateReader(r) + if err != nil { + return 0, err + } + n, err := copyZeroAlloc(w, zr) + releaseFlateReader(zr) + nn := int(n) + if int64(nn) != n { + return 0, fmt.Errorf("too much data inflated: %d", n) + } + return nn, err +} + +// AppendInflateBytes appends inflated src to dst and returns the resulting dst. +func AppendInflateBytes(dst, src []byte) ([]byte, error) { + w := &byteSliceWriter{dst} + _, err := WriteInflate(w, src) + return w.b, err +} + +type byteSliceWriter struct { + b []byte +} + +func (w *byteSliceWriter) Write(p []byte) (int, error) { + w.b = append(w.b, p...) + return len(p), nil +} + +type byteSliceReader struct { + b []byte +} + +func (r *byteSliceReader) Read(p []byte) (int, error) { + if len(r.b) == 0 { + return 0, io.EOF + } + n := copy(p, r.b) + r.b = r.b[n:] + return n, nil +} + +func acquireStacklessDeflateWriter(w io.Writer, level int) stackless.Writer { + nLevel := normalizeCompressLevel(level) + p := stacklessDeflateWriterPoolMap[nLevel] + v := p.Get() + if v == nil { + return stackless.NewWriter(w, func(w io.Writer) stackless.Writer { + return acquireRealDeflateWriter(w, level) + }) + } + sw := v.(stackless.Writer) + sw.Reset(w) + return sw +} + +func releaseStacklessDeflateWriter(sw stackless.Writer, level int) { + sw.Close() + nLevel := normalizeCompressLevel(level) + p := stacklessDeflateWriterPoolMap[nLevel] + p.Put(sw) +} + +func acquireRealDeflateWriter(w io.Writer, level int) *zlib.Writer { + nLevel := normalizeCompressLevel(level) + p := realDeflateWriterPoolMap[nLevel] + v := p.Get() + if v == nil { + zw, err := zlib.NewWriterLevel(w, level) + if err != nil { + panic(fmt.Sprintf("BUG: unexpected error from zlib.NewWriterLevel(%d): %s", level, err)) + } + return zw + } + zw := v.(*zlib.Writer) + zw.Reset(w) + return zw +} + +func releaseRealDeflateWriter(zw *zlib.Writer, level int) { + zw.Close() + nLevel := normalizeCompressLevel(level) + p := realDeflateWriterPoolMap[nLevel] + p.Put(zw) +} + +var ( + stacklessDeflateWriterPoolMap = newCompressWriterPoolMap() + realDeflateWriterPoolMap = newCompressWriterPoolMap() +) + +func newCompressWriterPoolMap() []*sync.Pool { + // Initialize pools for all the compression levels defined + // in https://golang.org/pkg/compress/flate/#pkg-constants . + // Compression levels are normalized with normalizeCompressLevel, + // so the fit [0..11]. + var m []*sync.Pool + for i := 0; i < 12; i++ { + m = append(m, &sync.Pool{}) + } + return m +} + +func isFileCompressible(f *os.File, minCompressRatio float64) bool { + // Try compressing the first 4kb of of the file + // and see if it can be compressed by more than + // the given minCompressRatio. + b := bytebufferpool.Get() + zw := acquireStacklessGzipWriter(b, CompressDefaultCompression) + lr := &io.LimitedReader{ + R: f, + N: 4096, + } + _, err := copyZeroAlloc(zw, lr) + releaseStacklessGzipWriter(zw, CompressDefaultCompression) + f.Seek(0, 0) //nolint:errcheck + if err != nil { + return false + } + + n := 4096 - lr.N + zn := len(b.B) + bytebufferpool.Put(b) + return float64(zn) < float64(n)*minCompressRatio +} + +// normalizes compression level into [0..11], so it could be used as an index +// in *PoolMap. +func normalizeCompressLevel(level int) int { + // -2 is the lowest compression level - CompressHuffmanOnly + // 9 is the highest compression level - CompressBestCompression + if level < -2 || level > 9 { + level = CompressDefaultCompression + } + return level + 2 +} diff --git a/vendor/github.com/valyala/fasthttp/cookie.go b/vendor/github.com/valyala/fasthttp/cookie.go new file mode 100644 index 000000000..9e9bd8717 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/cookie.go @@ -0,0 +1,550 @@ +package fasthttp + +import ( + "bytes" + "errors" + "io" + "sync" + "time" +) + +var zeroTime time.Time + +var ( + // CookieExpireDelete may be set on Cookie.Expire for expiring the given cookie. + CookieExpireDelete = time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + + // CookieExpireUnlimited indicates that the cookie doesn't expire. + CookieExpireUnlimited = zeroTime +) + +// CookieSameSite is an enum for the mode in which the SameSite flag should be set for the given cookie. +// See https://tools.ietf.org/html/draft-ietf-httpbis-cookie-same-site-00 for details. +type CookieSameSite int + +const ( + // CookieSameSiteDisabled removes the SameSite flag + CookieSameSiteDisabled CookieSameSite = iota + // CookieSameSiteDefaultMode sets the SameSite flag + CookieSameSiteDefaultMode + // CookieSameSiteLaxMode sets the SameSite flag with the "Lax" parameter + CookieSameSiteLaxMode + // CookieSameSiteStrictMode sets the SameSite flag with the "Strict" parameter + CookieSameSiteStrictMode + // CookieSameSiteNoneMode sets the SameSite flag with the "None" parameter + // see https://tools.ietf.org/html/draft-west-cookie-incrementalism-00 + CookieSameSiteNoneMode +) + +// AcquireCookie returns an empty Cookie object from the pool. +// +// The returned object may be returned back to the pool with ReleaseCookie. +// This allows reducing GC load. +func AcquireCookie() *Cookie { + return cookiePool.Get().(*Cookie) +} + +// ReleaseCookie returns the Cookie object acquired with AcquireCookie back +// to the pool. +// +// Do not access released Cookie object, otherwise data races may occur. +func ReleaseCookie(c *Cookie) { + c.Reset() + cookiePool.Put(c) +} + +var cookiePool = &sync.Pool{ + New: func() interface{} { + return &Cookie{} + }, +} + +// Cookie represents HTTP response cookie. +// +// Do not copy Cookie objects. Create new object and use CopyTo instead. +// +// Cookie instance MUST NOT be used from concurrently running goroutines. +type Cookie struct { + noCopy noCopy //nolint:unused,structcheck + + key []byte + value []byte + expire time.Time + maxAge int + domain []byte + path []byte + + httpOnly bool + secure bool + sameSite CookieSameSite + + bufKV argsKV + buf []byte +} + +// CopyTo copies src cookie to c. +func (c *Cookie) CopyTo(src *Cookie) { + c.Reset() + c.key = append(c.key[:0], src.key...) + c.value = append(c.value[:0], src.value...) + c.expire = src.expire + c.maxAge = src.maxAge + c.domain = append(c.domain[:0], src.domain...) + c.path = append(c.path[:0], src.path...) + c.httpOnly = src.httpOnly + c.secure = src.secure + c.sameSite = src.sameSite +} + +// HTTPOnly returns true if the cookie is http only. +func (c *Cookie) HTTPOnly() bool { + return c.httpOnly +} + +// SetHTTPOnly sets cookie's httpOnly flag to the given value. +func (c *Cookie) SetHTTPOnly(httpOnly bool) { + c.httpOnly = httpOnly +} + +// Secure returns true if the cookie is secure. +func (c *Cookie) Secure() bool { + return c.secure +} + +// SetSecure sets cookie's secure flag to the given value. +func (c *Cookie) SetSecure(secure bool) { + c.secure = secure +} + +// SameSite returns the SameSite mode. +func (c *Cookie) SameSite() CookieSameSite { + return c.sameSite +} + +// SetSameSite sets the cookie's SameSite flag to the given value. +// set value CookieSameSiteNoneMode will set Secure to true also to avoid browser rejection +func (c *Cookie) SetSameSite(mode CookieSameSite) { + c.sameSite = mode + if mode == CookieSameSiteNoneMode { + c.SetSecure(true) + } +} + +// Path returns cookie path. +func (c *Cookie) Path() []byte { + return c.path +} + +// SetPath sets cookie path. +func (c *Cookie) SetPath(path string) { + c.buf = append(c.buf[:0], path...) + c.path = normalizePath(c.path, c.buf) +} + +// SetPathBytes sets cookie path. +func (c *Cookie) SetPathBytes(path []byte) { + c.buf = append(c.buf[:0], path...) + c.path = normalizePath(c.path, c.buf) +} + +// Domain returns cookie domain. +// +// The returned domain is valid until the next Cookie modification method call. +func (c *Cookie) Domain() []byte { + return c.domain +} + +// SetDomain sets cookie domain. +func (c *Cookie) SetDomain(domain string) { + c.domain = append(c.domain[:0], domain...) +} + +// SetDomainBytes sets cookie domain. +func (c *Cookie) SetDomainBytes(domain []byte) { + c.domain = append(c.domain[:0], domain...) +} + +// MaxAge returns the seconds until the cookie is meant to expire or 0 +// if no max age. +func (c *Cookie) MaxAge() int { + return c.maxAge +} + +// SetMaxAge sets cookie expiration time based on seconds. This takes precedence +// over any absolute expiry set on the cookie +// +// Set max age to 0 to unset +func (c *Cookie) SetMaxAge(seconds int) { + c.maxAge = seconds +} + +// Expire returns cookie expiration time. +// +// CookieExpireUnlimited is returned if cookie doesn't expire +func (c *Cookie) Expire() time.Time { + expire := c.expire + if expire.IsZero() { + expire = CookieExpireUnlimited + } + return expire +} + +// SetExpire sets cookie expiration time. +// +// Set expiration time to CookieExpireDelete for expiring (deleting) +// the cookie on the client. +// +// By default cookie lifetime is limited by browser session. +func (c *Cookie) SetExpire(expire time.Time) { + c.expire = expire +} + +// Value returns cookie value. +// +// The returned value is valid until the next Cookie modification method call. +func (c *Cookie) Value() []byte { + return c.value +} + +// SetValue sets cookie value. +func (c *Cookie) SetValue(value string) { + c.value = append(c.value[:0], value...) +} + +// SetValueBytes sets cookie value. +func (c *Cookie) SetValueBytes(value []byte) { + c.value = append(c.value[:0], value...) +} + +// Key returns cookie name. +// +// The returned value is valid until the next Cookie modification method call. +func (c *Cookie) Key() []byte { + return c.key +} + +// SetKey sets cookie name. +func (c *Cookie) SetKey(key string) { + c.key = append(c.key[:0], key...) +} + +// SetKeyBytes sets cookie name. +func (c *Cookie) SetKeyBytes(key []byte) { + c.key = append(c.key[:0], key...) +} + +// Reset clears the cookie. +func (c *Cookie) Reset() { + c.key = c.key[:0] + c.value = c.value[:0] + c.expire = zeroTime + c.maxAge = 0 + c.domain = c.domain[:0] + c.path = c.path[:0] + c.httpOnly = false + c.secure = false + c.sameSite = CookieSameSiteDisabled +} + +// AppendBytes appends cookie representation to dst and returns +// the extended dst. +func (c *Cookie) AppendBytes(dst []byte) []byte { + if len(c.key) > 0 { + dst = append(dst, c.key...) + dst = append(dst, '=') + } + dst = append(dst, c.value...) + + if c.maxAge > 0 { + dst = append(dst, ';', ' ') + dst = append(dst, strCookieMaxAge...) + dst = append(dst, '=') + dst = AppendUint(dst, c.maxAge) + } else if !c.expire.IsZero() { + c.bufKV.value = AppendHTTPDate(c.bufKV.value[:0], c.expire) + dst = append(dst, ';', ' ') + dst = append(dst, strCookieExpires...) + dst = append(dst, '=') + dst = append(dst, c.bufKV.value...) + } + if len(c.domain) > 0 { + dst = appendCookiePart(dst, strCookieDomain, c.domain) + } + if len(c.path) > 0 { + dst = appendCookiePart(dst, strCookiePath, c.path) + } + if c.httpOnly { + dst = append(dst, ';', ' ') + dst = append(dst, strCookieHTTPOnly...) + } + if c.secure { + dst = append(dst, ';', ' ') + dst = append(dst, strCookieSecure...) + } + switch c.sameSite { + case CookieSameSiteDefaultMode: + dst = append(dst, ';', ' ') + dst = append(dst, strCookieSameSite...) + case CookieSameSiteLaxMode: + dst = append(dst, ';', ' ') + dst = append(dst, strCookieSameSite...) + dst = append(dst, '=') + dst = append(dst, strCookieSameSiteLax...) + case CookieSameSiteStrictMode: + dst = append(dst, ';', ' ') + dst = append(dst, strCookieSameSite...) + dst = append(dst, '=') + dst = append(dst, strCookieSameSiteStrict...) + case CookieSameSiteNoneMode: + dst = append(dst, ';', ' ') + dst = append(dst, strCookieSameSite...) + dst = append(dst, '=') + dst = append(dst, strCookieSameSiteNone...) + } + return dst +} + +// Cookie returns cookie representation. +// +// The returned value is valid until the next call to Cookie methods. +func (c *Cookie) Cookie() []byte { + c.buf = c.AppendBytes(c.buf[:0]) + return c.buf +} + +// String returns cookie representation. +func (c *Cookie) String() string { + return string(c.Cookie()) +} + +// WriteTo writes cookie representation to w. +// +// WriteTo implements io.WriterTo interface. +func (c *Cookie) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(c.Cookie()) + return int64(n), err +} + +var errNoCookies = errors.New("no cookies found") + +// Parse parses Set-Cookie header. +func (c *Cookie) Parse(src string) error { + c.buf = append(c.buf[:0], src...) + return c.ParseBytes(c.buf) +} + +// ParseBytes parses Set-Cookie header. +func (c *Cookie) ParseBytes(src []byte) error { + c.Reset() + + var s cookieScanner + s.b = src + + kv := &c.bufKV + if !s.next(kv) { + return errNoCookies + } + + c.key = append(c.key[:0], kv.key...) + c.value = append(c.value[:0], kv.value...) + + for s.next(kv) { + if len(kv.key) != 0 { + // Case insensitive switch on first char + switch kv.key[0] | 0x20 { + case 'm': + if caseInsensitiveCompare(strCookieMaxAge, kv.key) { + maxAge, err := ParseUint(kv.value) + if err != nil { + return err + } + c.maxAge = maxAge + } + + case 'e': // "expires" + if caseInsensitiveCompare(strCookieExpires, kv.key) { + v := b2s(kv.value) + // Try the same two formats as net/http + // See: https://github.com/golang/go/blob/00379be17e63a5b75b3237819392d2dc3b313a27/src/net/http/cookie.go#L133-L135 + exptime, err := time.ParseInLocation(time.RFC1123, v, time.UTC) + if err != nil { + exptime, err = time.Parse("Mon, 02-Jan-2006 15:04:05 MST", v) + if err != nil { + return err + } + } + c.expire = exptime + } + + case 'd': // "domain" + if caseInsensitiveCompare(strCookieDomain, kv.key) { + c.domain = append(c.domain[:0], kv.value...) + } + + case 'p': // "path" + if caseInsensitiveCompare(strCookiePath, kv.key) { + c.path = append(c.path[:0], kv.value...) + } + + case 's': // "samesite" + if caseInsensitiveCompare(strCookieSameSite, kv.key) { + // Case insensitive switch on first char + switch kv.value[0] | 0x20 { + case 'l': // "lax" + if caseInsensitiveCompare(strCookieSameSiteLax, kv.value) { + c.sameSite = CookieSameSiteLaxMode + } + case 's': // "strict" + if caseInsensitiveCompare(strCookieSameSiteStrict, kv.value) { + c.sameSite = CookieSameSiteStrictMode + } + case 'n': // "none" + if caseInsensitiveCompare(strCookieSameSiteNone, kv.value) { + c.sameSite = CookieSameSiteNoneMode + } + } + } + } + + } else if len(kv.value) != 0 { + // Case insensitive switch on first char + switch kv.value[0] | 0x20 { + case 'h': // "httponly" + if caseInsensitiveCompare(strCookieHTTPOnly, kv.value) { + c.httpOnly = true + } + + case 's': // "secure" + if caseInsensitiveCompare(strCookieSecure, kv.value) { + c.secure = true + } else if caseInsensitiveCompare(strCookieSameSite, kv.value) { + c.sameSite = CookieSameSiteDefaultMode + } + } + } // else empty or no match + } + return nil +} + +func appendCookiePart(dst, key, value []byte) []byte { + dst = append(dst, ';', ' ') + dst = append(dst, key...) + dst = append(dst, '=') + return append(dst, value...) +} + +func getCookieKey(dst, src []byte) []byte { + n := bytes.IndexByte(src, '=') + if n >= 0 { + src = src[:n] + } + return decodeCookieArg(dst, src, false) +} + +func appendRequestCookieBytes(dst []byte, cookies []argsKV) []byte { + for i, n := 0, len(cookies); i < n; i++ { + kv := &cookies[i] + if len(kv.key) > 0 { + dst = append(dst, kv.key...) + dst = append(dst, '=') + } + dst = append(dst, kv.value...) + if i+1 < n { + dst = append(dst, ';', ' ') + } + } + return dst +} + +// For Response we can not use the above function as response cookies +// already contain the key= in the value. +func appendResponseCookieBytes(dst []byte, cookies []argsKV) []byte { + for i, n := 0, len(cookies); i < n; i++ { + kv := &cookies[i] + dst = append(dst, kv.value...) + if i+1 < n { + dst = append(dst, ';', ' ') + } + } + return dst +} + +func parseRequestCookies(cookies []argsKV, src []byte) []argsKV { + var s cookieScanner + s.b = src + var kv *argsKV + cookies, kv = allocArg(cookies) + for s.next(kv) { + if len(kv.key) > 0 || len(kv.value) > 0 { + cookies, kv = allocArg(cookies) + } + } + return releaseArg(cookies) +} + +type cookieScanner struct { + b []byte +} + +func (s *cookieScanner) next(kv *argsKV) bool { + b := s.b + if len(b) == 0 { + return false + } + + isKey := true + k := 0 + for i, c := range b { + switch c { + case '=': + if isKey { + isKey = false + kv.key = decodeCookieArg(kv.key, b[:i], false) + k = i + 1 + } + case ';': + if isKey { + kv.key = kv.key[:0] + } + kv.value = decodeCookieArg(kv.value, b[k:i], true) + s.b = b[i+1:] + return true + } + } + + if isKey { + kv.key = kv.key[:0] + } + kv.value = decodeCookieArg(kv.value, b[k:], true) + s.b = b[len(b):] + return true +} + +func decodeCookieArg(dst, src []byte, skipQuotes bool) []byte { + for len(src) > 0 && src[0] == ' ' { + src = src[1:] + } + for len(src) > 0 && src[len(src)-1] == ' ' { + src = src[:len(src)-1] + } + if skipQuotes { + if len(src) > 1 && src[0] == '"' && src[len(src)-1] == '"' { + src = src[1 : len(src)-1] + } + } + return append(dst[:0], src...) +} + +// caseInsensitiveCompare does a case insensitive equality comparison of +// two []byte. Assumes only letters need to be matched. +func caseInsensitiveCompare(a, b []byte) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if a[i]|0x20 != b[i]|0x20 { + return false + } + } + return true +} diff --git a/vendor/github.com/valyala/fasthttp/doc.go b/vendor/github.com/valyala/fasthttp/doc.go new file mode 100644 index 000000000..efcd4a033 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/doc.go @@ -0,0 +1,37 @@ +/* +Package fasthttp provides fast HTTP server and client API. + +Fasthttp provides the following features: + + * Optimized for speed. Easily handles more than 100K qps and more than 1M + concurrent keep-alive connections on modern hardware. + * Optimized for low memory usage. + * Easy 'Connection: Upgrade' support via RequestCtx.Hijack. + * Server provides the following anti-DoS limits: + + * The number of concurrent connections. + * The number of concurrent connections per client IP. + * The number of requests per connection. + * Request read timeout. + * Response write timeout. + * Maximum request header size. + * Maximum request body size. + * Maximum request execution time. + * Maximum keep-alive connection lifetime. + * Early filtering out non-GET requests. + + * A lot of additional useful info is exposed to request handler: + + * Server and client address. + * Per-request logger. + * Unique request id. + * Request start time. + * Connection start time. + * Request sequence number for the current connection. + + * Client supports automatic retry on idempotent requests' failure. + * Fasthttp API is designed with the ability to extend existing client + and server implementations or to write custom client and server + implementations from scratch. +*/ +package fasthttp diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/doc.go b/vendor/github.com/valyala/fasthttp/fasthttputil/doc.go new file mode 100644 index 000000000..9cf69e710 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/doc.go @@ -0,0 +1,2 @@ +// Package fasthttputil provides utility functions for fasthttp. +package fasthttputil diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.key b/vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.key new file mode 100644 index 000000000..7e201fc42 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.key @@ -0,0 +1,5 @@ +-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIBpQbZ6a5jL1Yh4wdP6yZk4MKjYWArD/QOLENFw8vbELoAoGCCqGSM49 +AwEHoUQDQgAEKQCZWgE2IBhb47ot8MIs1D4KSisHYlZ41IWyeutpjb0fjwwIhimh +pl1Qld1/d2j3Z3vVyfa5yD+ncV7qCFZuSg== +-----END EC PRIVATE KEY----- diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.pem b/vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.pem new file mode 100644 index 000000000..ca1a7f2e9 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/ecdsa.pem @@ -0,0 +1,10 @@ +-----BEGIN CERTIFICATE----- +MIIBbTCCAROgAwIBAgIQPo718S+K+G7hc1SgTEU4QDAKBggqhkjOPQQDAjASMRAw +DgYDVQQKEwdBY21lIENvMB4XDTE3MDQyMDIxMDExNFoXDTE4MDQyMDIxMDExNFow +EjEQMA4GA1UEChMHQWNtZSBDbzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCkA +mVoBNiAYW+O6LfDCLNQ+CkorB2JWeNSFsnrraY29H48MCIYpoaZdUJXdf3do92d7 +1cn2ucg/p3Fe6ghWbkqjSzBJMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggr +BgEFBQcDATAMBgNVHRMBAf8EAjAAMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDAKBggq +hkjOPQQDAgNIADBFAiEAoLAIQkvSuIcHUqyWroA6yWYw2fznlRH/uO9/hMCxUCEC +IClRYb/5O9eD/Eq/ozPnwNpsQHOeYefEhadJ/P82y0lG +-----END CERTIFICATE----- diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go b/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go new file mode 100644 index 000000000..9997d1cc4 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go @@ -0,0 +1,97 @@ +package fasthttputil + +import ( + "errors" + "net" + "sync" +) + +// ErrInmemoryListenerClosed indicates that the InmemoryListener is already closed. +var ErrInmemoryListenerClosed = errors.New("InmemoryListener is already closed: use of closed network connection") + +// InmemoryListener provides in-memory dialer<->net.Listener implementation. +// +// It may be used either for fast in-process client<->server communications +// without network stack overhead or for client<->server tests. +type InmemoryListener struct { + lock sync.Mutex + closed bool + conns chan acceptConn +} + +type acceptConn struct { + conn net.Conn + accepted chan struct{} +} + +// NewInmemoryListener returns new in-memory dialer<->net.Listener. +func NewInmemoryListener() *InmemoryListener { + return &InmemoryListener{ + conns: make(chan acceptConn, 1024), + } +} + +// Accept implements net.Listener's Accept. +// +// It is safe calling Accept from concurrently running goroutines. +// +// Accept returns new connection per each Dial call. +func (ln *InmemoryListener) Accept() (net.Conn, error) { + c, ok := <-ln.conns + if !ok { + return nil, ErrInmemoryListenerClosed + } + close(c.accepted) + return c.conn, nil +} + +// Close implements net.Listener's Close. +func (ln *InmemoryListener) Close() error { + var err error + + ln.lock.Lock() + if !ln.closed { + close(ln.conns) + ln.closed = true + } else { + err = ErrInmemoryListenerClosed + } + ln.lock.Unlock() + return err +} + +// Addr implements net.Listener's Addr. +func (ln *InmemoryListener) Addr() net.Addr { + return &net.UnixAddr{ + Name: "InmemoryListener", + Net: "memory", + } +} + +// Dial creates new client<->server connection. +// Just like a real Dial it only returns once the server +// has accepted the connection. +// +// It is safe calling Dial from concurrently running goroutines. +func (ln *InmemoryListener) Dial() (net.Conn, error) { + pc := NewPipeConns() + cConn := pc.Conn1() + sConn := pc.Conn2() + ln.lock.Lock() + accepted := make(chan struct{}) + if !ln.closed { + ln.conns <- acceptConn{sConn, accepted} + // Wait until the connection has been accepted. + <-accepted + } else { + sConn.Close() + cConn.Close() + cConn = nil + } + ln.lock.Unlock() + + if cConn == nil { + return nil, ErrInmemoryListenerClosed + } + return cConn, nil +} diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go b/vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go new file mode 100644 index 000000000..c992da30c --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go @@ -0,0 +1,313 @@ +package fasthttputil + +import ( + "errors" + "io" + "net" + "sync" + "time" +) + +// NewPipeConns returns new bi-directional connection pipe. +// +// PipeConns is NOT safe for concurrent use by multiple goroutines! +func NewPipeConns() *PipeConns { + ch1 := make(chan *byteBuffer, 4) + ch2 := make(chan *byteBuffer, 4) + + pc := &PipeConns{ + stopCh: make(chan struct{}), + } + pc.c1.rCh = ch1 + pc.c1.wCh = ch2 + pc.c2.rCh = ch2 + pc.c2.wCh = ch1 + pc.c1.pc = pc + pc.c2.pc = pc + return pc +} + +// PipeConns provides bi-directional connection pipe, +// which use in-process memory as a transport. +// +// PipeConns must be created by calling NewPipeConns. +// +// PipeConns has the following additional features comparing to connections +// returned from net.Pipe(): +// +// * It is faster. +// * It buffers Write calls, so there is no need to have concurrent goroutine +// calling Read in order to unblock each Write call. +// * It supports read and write deadlines. +// +// PipeConns is NOT safe for concurrent use by multiple goroutines! +type PipeConns struct { + c1 pipeConn + c2 pipeConn + stopCh chan struct{} + stopChLock sync.Mutex +} + +// Conn1 returns the first end of bi-directional pipe. +// +// Data written to Conn1 may be read from Conn2. +// Data written to Conn2 may be read from Conn1. +func (pc *PipeConns) Conn1() net.Conn { + return &pc.c1 +} + +// Conn2 returns the second end of bi-directional pipe. +// +// Data written to Conn2 may be read from Conn1. +// Data written to Conn1 may be read from Conn2. +func (pc *PipeConns) Conn2() net.Conn { + return &pc.c2 +} + +// Close closes pipe connections. +func (pc *PipeConns) Close() error { + pc.stopChLock.Lock() + select { + case <-pc.stopCh: + default: + close(pc.stopCh) + } + pc.stopChLock.Unlock() + + return nil +} + +type pipeConn struct { + b *byteBuffer + bb []byte + + rCh chan *byteBuffer + wCh chan *byteBuffer + pc *PipeConns + + readDeadlineTimer *time.Timer + writeDeadlineTimer *time.Timer + + readDeadlineCh <-chan time.Time + writeDeadlineCh <-chan time.Time + + readDeadlineChLock sync.Mutex +} + +func (c *pipeConn) Write(p []byte) (int, error) { + b := acquireByteBuffer() + b.b = append(b.b[:0], p...) + + select { + case <-c.pc.stopCh: + releaseByteBuffer(b) + return 0, errConnectionClosed + default: + } + + select { + case c.wCh <- b: + default: + select { + case c.wCh <- b: + case <-c.writeDeadlineCh: + c.writeDeadlineCh = closedDeadlineCh + return 0, ErrTimeout + case <-c.pc.stopCh: + releaseByteBuffer(b) + return 0, errConnectionClosed + } + } + + return len(p), nil +} + +func (c *pipeConn) Read(p []byte) (int, error) { + mayBlock := true + nn := 0 + for len(p) > 0 { + n, err := c.read(p, mayBlock) + nn += n + if err != nil { + if !mayBlock && err == errWouldBlock { + err = nil + } + return nn, err + } + p = p[n:] + mayBlock = false + } + + return nn, nil +} + +func (c *pipeConn) read(p []byte, mayBlock bool) (int, error) { + if len(c.bb) == 0 { + if err := c.readNextByteBuffer(mayBlock); err != nil { + return 0, err + } + } + n := copy(p, c.bb) + c.bb = c.bb[n:] + + return n, nil +} + +func (c *pipeConn) readNextByteBuffer(mayBlock bool) error { + releaseByteBuffer(c.b) + c.b = nil + + select { + case c.b = <-c.rCh: + default: + if !mayBlock { + return errWouldBlock + } + c.readDeadlineChLock.Lock() + readDeadlineCh := c.readDeadlineCh + c.readDeadlineChLock.Unlock() + select { + case c.b = <-c.rCh: + case <-readDeadlineCh: + c.readDeadlineChLock.Lock() + c.readDeadlineCh = closedDeadlineCh + c.readDeadlineChLock.Unlock() + // rCh may contain data when deadline is reached. + // Read the data before returning ErrTimeout. + select { + case c.b = <-c.rCh: + default: + return ErrTimeout + } + case <-c.pc.stopCh: + // rCh may contain data when stopCh is closed. + // Read the data before returning EOF. + select { + case c.b = <-c.rCh: + default: + return io.EOF + } + } + } + + c.bb = c.b.b + return nil +} + +var ( + errWouldBlock = errors.New("would block") + errConnectionClosed = errors.New("connection closed") +) + +type timeoutError struct { +} + +func (e *timeoutError) Error() string { + return "timeout" +} + +// Only implement the Timeout() function of the net.Error interface. +// This allows for checks like: +// +// if x, ok := err.(interface{ Timeout() bool }); ok && x.Timeout() { +func (e *timeoutError) Timeout() bool { + return true +} + +var ( + // ErrTimeout is returned from Read() or Write() on timeout. + ErrTimeout = &timeoutError{} +) + +func (c *pipeConn) Close() error { + return c.pc.Close() +} + +func (c *pipeConn) LocalAddr() net.Addr { + return pipeAddr(0) +} + +func (c *pipeConn) RemoteAddr() net.Addr { + return pipeAddr(0) +} + +func (c *pipeConn) SetDeadline(deadline time.Time) error { + c.SetReadDeadline(deadline) //nolint:errcheck + c.SetWriteDeadline(deadline) //nolint:errcheck + return nil +} + +func (c *pipeConn) SetReadDeadline(deadline time.Time) error { + if c.readDeadlineTimer == nil { + c.readDeadlineTimer = time.NewTimer(time.Hour) + } + readDeadlineCh := updateTimer(c.readDeadlineTimer, deadline) + c.readDeadlineChLock.Lock() + c.readDeadlineCh = readDeadlineCh + c.readDeadlineChLock.Unlock() + return nil +} + +func (c *pipeConn) SetWriteDeadline(deadline time.Time) error { + if c.writeDeadlineTimer == nil { + c.writeDeadlineTimer = time.NewTimer(time.Hour) + } + c.writeDeadlineCh = updateTimer(c.writeDeadlineTimer, deadline) + return nil +} + +func updateTimer(t *time.Timer, deadline time.Time) <-chan time.Time { + if !t.Stop() { + select { + case <-t.C: + default: + } + } + if deadline.IsZero() { + return nil + } + d := -time.Since(deadline) + if d <= 0 { + return closedDeadlineCh + } + t.Reset(d) + return t.C +} + +var closedDeadlineCh = func() <-chan time.Time { + ch := make(chan time.Time) + close(ch) + return ch +}() + +type pipeAddr int + +func (pipeAddr) Network() string { + return "pipe" +} + +func (pipeAddr) String() string { + return "pipe" +} + +type byteBuffer struct { + b []byte +} + +func acquireByteBuffer() *byteBuffer { + return byteBufferPool.Get().(*byteBuffer) +} + +func releaseByteBuffer(b *byteBuffer) { + if b != nil { + byteBufferPool.Put(b) + } +} + +var byteBufferPool = &sync.Pool{ + New: func() interface{} { + return &byteBuffer{ + b: make([]byte, 1024), + } + }, +} diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/rsa.key b/vendor/github.com/valyala/fasthttp/fasthttputil/rsa.key new file mode 100644 index 000000000..00a79a3b5 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/rsa.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG +3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U +wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0 +FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf +IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg +GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF +sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2 +sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D +uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb +K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3 +YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+ +DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk +B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV +Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x +IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY +wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj +wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D +FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m +tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX +fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU +ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk +K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT +6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt +9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN +Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV +c257YgaWmjK9uB0Y2r2VxS0G +-----END PRIVATE KEY----- diff --git a/vendor/github.com/valyala/fasthttp/fasthttputil/rsa.pem b/vendor/github.com/valyala/fasthttp/fasthttputil/rsa.pem new file mode 100644 index 000000000..93e77cd95 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fasthttputil/rsa.pem @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV +BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV +MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D +K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te ++z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij +L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1 +xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY +6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG +SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98 +L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2 +45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li +K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6 +X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI +whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd +-----END CERTIFICATE----- diff --git a/vendor/github.com/valyala/fasthttp/fs.go b/vendor/github.com/valyala/fasthttp/fs.go new file mode 100644 index 000000000..ca70f729a --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fs.go @@ -0,0 +1,1273 @@ +package fasthttp + +import ( + "bytes" + "errors" + "fmt" + "html" + "io" + "io/ioutil" + "mime" + "net/http" + "os" + "path/filepath" + "sort" + "strings" + "sync" + "time" + + "github.com/klauspost/compress/gzip" + "github.com/valyala/bytebufferpool" +) + +// ServeFileBytesUncompressed returns HTTP response containing file contents +// from the given path. +// +// Directory contents is returned if path points to directory. +// +// ServeFileBytes may be used for saving network traffic when serving files +// with good compression ratio. +// +// See also RequestCtx.SendFileBytes. +func ServeFileBytesUncompressed(ctx *RequestCtx, path []byte) { + ServeFileUncompressed(ctx, b2s(path)) +} + +// ServeFileUncompressed returns HTTP response containing file contents +// from the given path. +// +// Directory contents is returned if path points to directory. +// +// ServeFile may be used for saving network traffic when serving files +// with good compression ratio. +// +// See also RequestCtx.SendFile. +func ServeFileUncompressed(ctx *RequestCtx, path string) { + ctx.Request.Header.DelBytes(strAcceptEncoding) + ServeFile(ctx, path) +} + +// ServeFileBytes returns HTTP response containing compressed file contents +// from the given path. +// +// HTTP response may contain uncompressed file contents in the following cases: +// +// * Missing 'Accept-Encoding: gzip' request header. +// * No write access to directory containing the file. +// +// Directory contents is returned if path points to directory. +// +// Use ServeFileBytesUncompressed is you don't need serving compressed +// file contents. +// +// See also RequestCtx.SendFileBytes. +func ServeFileBytes(ctx *RequestCtx, path []byte) { + ServeFile(ctx, b2s(path)) +} + +// ServeFile returns HTTP response containing compressed file contents +// from the given path. +// +// HTTP response may contain uncompressed file contents in the following cases: +// +// * Missing 'Accept-Encoding: gzip' request header. +// * No write access to directory containing the file. +// +// Directory contents is returned if path points to directory. +// +// Use ServeFileUncompressed is you don't need serving compressed file contents. +// +// See also RequestCtx.SendFile. +func ServeFile(ctx *RequestCtx, path string) { + rootFSOnce.Do(func() { + rootFSHandler = rootFS.NewRequestHandler() + }) + if len(path) == 0 || path[0] != '/' { + // extend relative path to absolute path + var err error + if path, err = filepath.Abs(path); err != nil { + ctx.Logger().Printf("cannot resolve path %q to absolute file path: %s", path, err) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + } + ctx.Request.SetRequestURI(path) + rootFSHandler(ctx) +} + +var ( + rootFSOnce sync.Once + rootFS = &FS{ + Root: "/", + GenerateIndexPages: true, + Compress: true, + AcceptByteRange: true, + } + rootFSHandler RequestHandler +) + +// PathRewriteFunc must return new request path based on arbitrary ctx +// info such as ctx.Path(). +// +// Path rewriter is used in FS for translating the current request +// to the local filesystem path relative to FS.Root. +// +// The returned path must not contain '/../' substrings due to security reasons, +// since such paths may refer files outside FS.Root. +// +// The returned path may refer to ctx members. For example, ctx.Path(). +type PathRewriteFunc func(ctx *RequestCtx) []byte + +// NewVHostPathRewriter returns path rewriter, which strips slashesCount +// leading slashes from the path and prepends the path with request's host, +// thus simplifying virtual hosting for static files. +// +// Examples: +// +// * host=foobar.com, slashesCount=0, original path="/foo/bar". +// Resulting path: "/foobar.com/foo/bar" +// +// * host=img.aaa.com, slashesCount=1, original path="/images/123/456.jpg" +// Resulting path: "/img.aaa.com/123/456.jpg" +// +func NewVHostPathRewriter(slashesCount int) PathRewriteFunc { + return func(ctx *RequestCtx) []byte { + path := stripLeadingSlashes(ctx.Path(), slashesCount) + host := ctx.Host() + if n := bytes.IndexByte(host, '/'); n >= 0 { + host = nil + } + if len(host) == 0 { + host = strInvalidHost + } + b := bytebufferpool.Get() + b.B = append(b.B, '/') + b.B = append(b.B, host...) + b.B = append(b.B, path...) + ctx.URI().SetPathBytes(b.B) + bytebufferpool.Put(b) + + return ctx.Path() + } +} + +var strInvalidHost = []byte("invalid-host") + +// NewPathSlashesStripper returns path rewriter, which strips slashesCount +// leading slashes from the path. +// +// Examples: +// +// * slashesCount = 0, original path: "/foo/bar", result: "/foo/bar" +// * slashesCount = 1, original path: "/foo/bar", result: "/bar" +// * slashesCount = 2, original path: "/foo/bar", result: "" +// +// The returned path rewriter may be used as FS.PathRewrite . +func NewPathSlashesStripper(slashesCount int) PathRewriteFunc { + return func(ctx *RequestCtx) []byte { + return stripLeadingSlashes(ctx.Path(), slashesCount) + } +} + +// NewPathPrefixStripper returns path rewriter, which removes prefixSize bytes +// from the path prefix. +// +// Examples: +// +// * prefixSize = 0, original path: "/foo/bar", result: "/foo/bar" +// * prefixSize = 3, original path: "/foo/bar", result: "o/bar" +// * prefixSize = 7, original path: "/foo/bar", result: "r" +// +// The returned path rewriter may be used as FS.PathRewrite . +func NewPathPrefixStripper(prefixSize int) PathRewriteFunc { + return func(ctx *RequestCtx) []byte { + path := ctx.Path() + if len(path) >= prefixSize { + path = path[prefixSize:] + } + return path + } +} + +// FS represents settings for request handler serving static files +// from the local filesystem. +// +// It is prohibited copying FS values. Create new values instead. +type FS struct { + noCopy noCopy //nolint:unused,structcheck + + // Path to the root directory to serve files from. + Root string + + // List of index file names to try opening during directory access. + // + // For example: + // + // * index.html + // * index.htm + // * my-super-index.xml + // + // By default the list is empty. + IndexNames []string + + // Index pages for directories without files matching IndexNames + // are automatically generated if set. + // + // Directory index generation may be quite slow for directories + // with many files (more than 1K), so it is discouraged enabling + // index pages' generation for such directories. + // + // By default index pages aren't generated. + GenerateIndexPages bool + + // Transparently compresses responses if set to true. + // + // The server tries minimizing CPU usage by caching compressed files. + // It adds CompressedFileSuffix suffix to the original file name and + // tries saving the resulting compressed file under the new file name. + // So it is advisable to give the server write access to Root + // and to all inner folders in order to minimize CPU usage when serving + // compressed responses. + // + // Transparent compression is disabled by default. + Compress bool + + // Enables byte range requests if set to true. + // + // Byte range requests are disabled by default. + AcceptByteRange bool + + // Path rewriting function. + // + // By default request path is not modified. + PathRewrite PathRewriteFunc + + // PathNotFound fires when file is not found in filesystem + // this functions tries to replace "Cannot open requested path" + // server response giving to the programmer the control of server flow. + // + // By default PathNotFound returns + // "Cannot open requested path" + PathNotFound RequestHandler + + // Expiration duration for inactive file handlers. + // + // FSHandlerCacheDuration is used by default. + CacheDuration time.Duration + + // Suffix to add to the name of cached compressed file. + // + // This value has sense only if Compress is set. + // + // FSCompressedFileSuffix is used by default. + CompressedFileSuffix string + + once sync.Once + h RequestHandler +} + +// FSCompressedFileSuffix is the suffix FS adds to the original file names +// when trying to store compressed file under the new file name. +// See FS.Compress for details. +const FSCompressedFileSuffix = ".fasthttp.gz" + +// FSHandlerCacheDuration is the default expiration duration for inactive +// file handlers opened by FS. +const FSHandlerCacheDuration = 10 * time.Second + +// FSHandler returns request handler serving static files from +// the given root folder. +// +// stripSlashes indicates how many leading slashes must be stripped +// from requested path before searching requested file in the root folder. +// Examples: +// +// * stripSlashes = 0, original path: "/foo/bar", result: "/foo/bar" +// * stripSlashes = 1, original path: "/foo/bar", result: "/bar" +// * stripSlashes = 2, original path: "/foo/bar", result: "" +// +// The returned request handler automatically generates index pages +// for directories without index.html. +// +// The returned handler caches requested file handles +// for FSHandlerCacheDuration. +// Make sure your program has enough 'max open files' limit aka +// 'ulimit -n' if root folder contains many files. +// +// Do not create multiple request handler instances for the same +// (root, stripSlashes) arguments - just reuse a single instance. +// Otherwise goroutine leak will occur. +func FSHandler(root string, stripSlashes int) RequestHandler { + fs := &FS{ + Root: root, + IndexNames: []string{"index.html"}, + GenerateIndexPages: true, + AcceptByteRange: true, + } + if stripSlashes > 0 { + fs.PathRewrite = NewPathSlashesStripper(stripSlashes) + } + return fs.NewRequestHandler() +} + +// NewRequestHandler returns new request handler with the given FS settings. +// +// The returned handler caches requested file handles +// for FS.CacheDuration. +// Make sure your program has enough 'max open files' limit aka +// 'ulimit -n' if FS.Root folder contains many files. +// +// Do not create multiple request handlers from a single FS instance - +// just reuse a single request handler. +func (fs *FS) NewRequestHandler() RequestHandler { + fs.once.Do(fs.initRequestHandler) + return fs.h +} + +func (fs *FS) initRequestHandler() { + root := fs.Root + + // serve files from the current working directory if root is empty + if len(root) == 0 { + root = "." + } + + // strip trailing slashes from the root path + for len(root) > 0 && root[len(root)-1] == '/' { + root = root[:len(root)-1] + } + + cacheDuration := fs.CacheDuration + if cacheDuration <= 0 { + cacheDuration = FSHandlerCacheDuration + } + compressedFileSuffix := fs.CompressedFileSuffix + if len(compressedFileSuffix) == 0 { + compressedFileSuffix = FSCompressedFileSuffix + } + + h := &fsHandler{ + root: root, + indexNames: fs.IndexNames, + pathRewrite: fs.PathRewrite, + generateIndexPages: fs.GenerateIndexPages, + compress: fs.Compress, + pathNotFound: fs.PathNotFound, + acceptByteRange: fs.AcceptByteRange, + cacheDuration: cacheDuration, + compressedFileSuffix: compressedFileSuffix, + cache: make(map[string]*fsFile), + compressedCache: make(map[string]*fsFile), + } + + go func() { + var pendingFiles []*fsFile + for { + time.Sleep(cacheDuration / 2) + pendingFiles = h.cleanCache(pendingFiles) + } + }() + + fs.h = h.handleRequest +} + +type fsHandler struct { + root string + indexNames []string + pathRewrite PathRewriteFunc + pathNotFound RequestHandler + generateIndexPages bool + compress bool + acceptByteRange bool + cacheDuration time.Duration + compressedFileSuffix string + + cache map[string]*fsFile + compressedCache map[string]*fsFile + cacheLock sync.Mutex + + smallFileReaderPool sync.Pool +} + +type fsFile struct { + h *fsHandler + f *os.File + dirIndex []byte + contentType string + contentLength int + compressed bool + + lastModified time.Time + lastModifiedStr []byte + + t time.Time + readersCount int + + bigFiles []*bigFileReader + bigFilesLock sync.Mutex +} + +func (ff *fsFile) NewReader() (io.Reader, error) { + if ff.isBig() { + r, err := ff.bigFileReader() + if err != nil { + ff.decReadersCount() + } + return r, err + } + return ff.smallFileReader(), nil +} + +func (ff *fsFile) smallFileReader() io.Reader { + v := ff.h.smallFileReaderPool.Get() + if v == nil { + v = &fsSmallFileReader{} + } + r := v.(*fsSmallFileReader) + r.ff = ff + r.endPos = ff.contentLength + if r.startPos > 0 { + panic("BUG: fsSmallFileReader with non-nil startPos found in the pool") + } + return r +} + +// files bigger than this size are sent with sendfile +const maxSmallFileSize = 2 * 4096 + +func (ff *fsFile) isBig() bool { + return ff.contentLength > maxSmallFileSize && len(ff.dirIndex) == 0 +} + +func (ff *fsFile) bigFileReader() (io.Reader, error) { + if ff.f == nil { + panic("BUG: ff.f must be non-nil in bigFileReader") + } + + var r io.Reader + + ff.bigFilesLock.Lock() + n := len(ff.bigFiles) + if n > 0 { + r = ff.bigFiles[n-1] + ff.bigFiles = ff.bigFiles[:n-1] + } + ff.bigFilesLock.Unlock() + + if r != nil { + return r, nil + } + + f, err := os.Open(ff.f.Name()) + if err != nil { + return nil, fmt.Errorf("cannot open already opened file: %s", err) + } + return &bigFileReader{ + f: f, + ff: ff, + r: f, + }, nil +} + +func (ff *fsFile) Release() { + if ff.f != nil { + ff.f.Close() + + if ff.isBig() { + ff.bigFilesLock.Lock() + for _, r := range ff.bigFiles { + r.f.Close() + } + ff.bigFilesLock.Unlock() + } + } +} + +func (ff *fsFile) decReadersCount() { + ff.h.cacheLock.Lock() + ff.readersCount-- + if ff.readersCount < 0 { + panic("BUG: negative fsFile.readersCount!") + } + ff.h.cacheLock.Unlock() +} + +// bigFileReader attempts to trigger sendfile +// for sending big files over the wire. +type bigFileReader struct { + f *os.File + ff *fsFile + r io.Reader + lr io.LimitedReader +} + +func (r *bigFileReader) UpdateByteRange(startPos, endPos int) error { + if _, err := r.f.Seek(int64(startPos), 0); err != nil { + return err + } + r.r = &r.lr + r.lr.R = r.f + r.lr.N = int64(endPos - startPos + 1) + return nil +} + +func (r *bigFileReader) Read(p []byte) (int, error) { + return r.r.Read(p) +} + +func (r *bigFileReader) WriteTo(w io.Writer) (int64, error) { + if rf, ok := w.(io.ReaderFrom); ok { + // fast path. Senfile must be triggered + return rf.ReadFrom(r.r) + } + + // slow path + return copyZeroAlloc(w, r.r) +} + +func (r *bigFileReader) Close() error { + r.r = r.f + n, err := r.f.Seek(0, 0) + if err == nil { + if n != 0 { + panic("BUG: File.Seek(0,0) returned (non-zero, nil)") + } + + ff := r.ff + ff.bigFilesLock.Lock() + ff.bigFiles = append(ff.bigFiles, r) + ff.bigFilesLock.Unlock() + } else { + r.f.Close() + } + r.ff.decReadersCount() + return err +} + +type fsSmallFileReader struct { + ff *fsFile + startPos int + endPos int +} + +func (r *fsSmallFileReader) Close() error { + ff := r.ff + ff.decReadersCount() + r.ff = nil + r.startPos = 0 + r.endPos = 0 + ff.h.smallFileReaderPool.Put(r) + return nil +} + +func (r *fsSmallFileReader) UpdateByteRange(startPos, endPos int) error { + r.startPos = startPos + r.endPos = endPos + 1 + return nil +} + +func (r *fsSmallFileReader) Read(p []byte) (int, error) { + tailLen := r.endPos - r.startPos + if tailLen <= 0 { + return 0, io.EOF + } + if len(p) > tailLen { + p = p[:tailLen] + } + + ff := r.ff + if ff.f != nil { + n, err := ff.f.ReadAt(p, int64(r.startPos)) + r.startPos += n + return n, err + } + + n := copy(p, ff.dirIndex[r.startPos:]) + r.startPos += n + return n, nil +} + +func (r *fsSmallFileReader) WriteTo(w io.Writer) (int64, error) { + ff := r.ff + + var n int + var err error + if ff.f == nil { + n, err = w.Write(ff.dirIndex[r.startPos:r.endPos]) + return int64(n), err + } + + if rf, ok := w.(io.ReaderFrom); ok { + return rf.ReadFrom(r) + } + + curPos := r.startPos + bufv := copyBufPool.Get() + buf := bufv.([]byte) + for err == nil { + tailLen := r.endPos - curPos + if tailLen <= 0 { + break + } + if len(buf) > tailLen { + buf = buf[:tailLen] + } + n, err = ff.f.ReadAt(buf, int64(curPos)) + nw, errw := w.Write(buf[:n]) + curPos += nw + if errw == nil && nw != n { + panic("BUG: Write(p) returned (n, nil), where n != len(p)") + } + if err == nil { + err = errw + } + } + copyBufPool.Put(bufv) + + if err == io.EOF { + err = nil + } + return int64(curPos - r.startPos), err +} + +func (h *fsHandler) cleanCache(pendingFiles []*fsFile) []*fsFile { + var filesToRelease []*fsFile + + h.cacheLock.Lock() + + // Close files which couldn't be closed before due to non-zero + // readers count on the previous run. + var remainingFiles []*fsFile + for _, ff := range pendingFiles { + if ff.readersCount > 0 { + remainingFiles = append(remainingFiles, ff) + } else { + filesToRelease = append(filesToRelease, ff) + } + } + pendingFiles = remainingFiles + + pendingFiles, filesToRelease = cleanCacheNolock(h.cache, pendingFiles, filesToRelease, h.cacheDuration) + pendingFiles, filesToRelease = cleanCacheNolock(h.compressedCache, pendingFiles, filesToRelease, h.cacheDuration) + + h.cacheLock.Unlock() + + for _, ff := range filesToRelease { + ff.Release() + } + + return pendingFiles +} + +func cleanCacheNolock(cache map[string]*fsFile, pendingFiles, filesToRelease []*fsFile, cacheDuration time.Duration) ([]*fsFile, []*fsFile) { + t := time.Now() + for k, ff := range cache { + if t.Sub(ff.t) > cacheDuration { + if ff.readersCount > 0 { + // There are pending readers on stale file handle, + // so we cannot close it. Put it into pendingFiles + // so it will be closed later. + pendingFiles = append(pendingFiles, ff) + } else { + filesToRelease = append(filesToRelease, ff) + } + delete(cache, k) + } + } + return pendingFiles, filesToRelease +} + +func (h *fsHandler) handleRequest(ctx *RequestCtx) { + var path []byte + if h.pathRewrite != nil { + path = h.pathRewrite(ctx) + } else { + path = ctx.Path() + } + path = stripTrailingSlashes(path) + + if n := bytes.IndexByte(path, 0); n >= 0 { + ctx.Logger().Printf("cannot serve path with nil byte at position %d: %q", n, path) + ctx.Error("Are you a hacker?", StatusBadRequest) + return + } + if h.pathRewrite != nil { + // There is no need to check for '/../' if path = ctx.Path(), + // since ctx.Path must normalize and sanitize the path. + + if n := bytes.Index(path, strSlashDotDotSlash); n >= 0 { + ctx.Logger().Printf("cannot serve path with '/../' at position %d due to security reasons: %q", n, path) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + } + + mustCompress := false + fileCache := h.cache + byteRange := ctx.Request.Header.peek(strRange) + if len(byteRange) == 0 && h.compress && ctx.Request.Header.HasAcceptEncodingBytes(strGzip) { + mustCompress = true + fileCache = h.compressedCache + } + + h.cacheLock.Lock() + ff, ok := fileCache[string(path)] + if ok { + ff.readersCount++ + } + h.cacheLock.Unlock() + + if !ok { + pathStr := string(path) + filePath := h.root + pathStr + var err error + ff, err = h.openFSFile(filePath, mustCompress) + if mustCompress && err == errNoCreatePermission { + ctx.Logger().Printf("insufficient permissions for saving compressed file for %q. Serving uncompressed file. "+ + "Allow write access to the directory with this file in order to improve fasthttp performance", filePath) + mustCompress = false + ff, err = h.openFSFile(filePath, mustCompress) + } + if err == errDirIndexRequired { + ff, err = h.openIndexFile(ctx, filePath, mustCompress) + if err != nil { + ctx.Logger().Printf("cannot open dir index %q: %s", filePath, err) + ctx.Error("Directory index is forbidden", StatusForbidden) + return + } + } else if err != nil { + ctx.Logger().Printf("cannot open file %q: %s", filePath, err) + if h.pathNotFound == nil { + ctx.Error("Cannot open requested path", StatusNotFound) + } else { + ctx.SetStatusCode(StatusNotFound) + h.pathNotFound(ctx) + } + return + } + + h.cacheLock.Lock() + ff1, ok := fileCache[pathStr] + if !ok { + fileCache[pathStr] = ff + ff.readersCount++ + } else { + ff1.readersCount++ + } + h.cacheLock.Unlock() + + if ok { + // The file has been already opened by another + // goroutine, so close the current file and use + // the file opened by another goroutine instead. + ff.Release() + ff = ff1 + } + } + + if !ctx.IfModifiedSince(ff.lastModified) { + ff.decReadersCount() + ctx.NotModified() + return + } + + r, err := ff.NewReader() + if err != nil { + ctx.Logger().Printf("cannot obtain file reader for path=%q: %s", path, err) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + + hdr := &ctx.Response.Header + if ff.compressed { + hdr.SetCanonical(strContentEncoding, strGzip) + } + + statusCode := StatusOK + contentLength := ff.contentLength + if h.acceptByteRange { + hdr.SetCanonical(strAcceptRanges, strBytes) + if len(byteRange) > 0 { + startPos, endPos, err := ParseByteRange(byteRange, contentLength) + if err != nil { + r.(io.Closer).Close() + ctx.Logger().Printf("cannot parse byte range %q for path=%q: %s", byteRange, path, err) + ctx.Error("Range Not Satisfiable", StatusRequestedRangeNotSatisfiable) + return + } + + if err = r.(byteRangeUpdater).UpdateByteRange(startPos, endPos); err != nil { + r.(io.Closer).Close() + ctx.Logger().Printf("cannot seek byte range %q for path=%q: %s", byteRange, path, err) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + + hdr.SetContentRange(startPos, endPos, contentLength) + contentLength = endPos - startPos + 1 + statusCode = StatusPartialContent + } + } + + hdr.SetCanonical(strLastModified, ff.lastModifiedStr) + if !ctx.IsHead() { + ctx.SetBodyStream(r, contentLength) + } else { + ctx.Response.ResetBody() + ctx.Response.SkipBody = true + ctx.Response.Header.SetContentLength(contentLength) + if rc, ok := r.(io.Closer); ok { + if err := rc.Close(); err != nil { + ctx.Logger().Printf("cannot close file reader: %s", err) + ctx.Error("Internal Server Error", StatusInternalServerError) + return + } + } + } + hdr.noDefaultContentType = true + if len(hdr.ContentType()) == 0 { + ctx.SetContentType(ff.contentType) + } + ctx.SetStatusCode(statusCode) +} + +type byteRangeUpdater interface { + UpdateByteRange(startPos, endPos int) error +} + +// ParseByteRange parses 'Range: bytes=...' header value. +// +// It follows https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 . +func ParseByteRange(byteRange []byte, contentLength int) (startPos, endPos int, err error) { + b := byteRange + if !bytes.HasPrefix(b, strBytes) { + return 0, 0, fmt.Errorf("unsupported range units: %q. Expecting %q", byteRange, strBytes) + } + + b = b[len(strBytes):] + if len(b) == 0 || b[0] != '=' { + return 0, 0, fmt.Errorf("missing byte range in %q", byteRange) + } + b = b[1:] + + n := bytes.IndexByte(b, '-') + if n < 0 { + return 0, 0, fmt.Errorf("missing the end position of byte range in %q", byteRange) + } + + if n == 0 { + v, err := ParseUint(b[n+1:]) + if err != nil { + return 0, 0, err + } + startPos := contentLength - v + if startPos < 0 { + startPos = 0 + } + return startPos, contentLength - 1, nil + } + + if startPos, err = ParseUint(b[:n]); err != nil { + return 0, 0, err + } + if startPos >= contentLength { + return 0, 0, fmt.Errorf("the start position of byte range cannot exceed %d. byte range %q", contentLength-1, byteRange) + } + + b = b[n+1:] + if len(b) == 0 { + return startPos, contentLength - 1, nil + } + + if endPos, err = ParseUint(b); err != nil { + return 0, 0, err + } + if endPos >= contentLength { + endPos = contentLength - 1 + } + if endPos < startPos { + return 0, 0, fmt.Errorf("the start position of byte range cannot exceed the end position. byte range %q", byteRange) + } + return startPos, endPos, nil +} + +func (h *fsHandler) openIndexFile(ctx *RequestCtx, dirPath string, mustCompress bool) (*fsFile, error) { + for _, indexName := range h.indexNames { + indexFilePath := dirPath + "/" + indexName + ff, err := h.openFSFile(indexFilePath, mustCompress) + if err == nil { + return ff, nil + } + if !os.IsNotExist(err) { + return nil, fmt.Errorf("cannot open file %q: %s", indexFilePath, err) + } + } + + if !h.generateIndexPages { + return nil, fmt.Errorf("cannot access directory without index page. Directory %q", dirPath) + } + + return h.createDirIndex(ctx.URI(), dirPath, mustCompress) +} + +var ( + errDirIndexRequired = errors.New("directory index required") + errNoCreatePermission = errors.New("no 'create file' permissions") +) + +func (h *fsHandler) createDirIndex(base *URI, dirPath string, mustCompress bool) (*fsFile, error) { + w := &bytebufferpool.ByteBuffer{} + + basePathEscaped := html.EscapeString(string(base.Path())) + fmt.Fprintf(w, "%s", basePathEscaped) + fmt.Fprintf(w, "

%s

", basePathEscaped) + fmt.Fprintf(w, "
    ") + + if len(basePathEscaped) > 1 { + var parentURI URI + base.CopyTo(&parentURI) + parentURI.Update(string(base.Path()) + "/..") + parentPathEscaped := html.EscapeString(string(parentURI.Path())) + fmt.Fprintf(w, `
  • ..
  • `, parentPathEscaped) + } + + f, err := os.Open(dirPath) + if err != nil { + return nil, err + } + + fileinfos, err := f.Readdir(0) + f.Close() + if err != nil { + return nil, err + } + + fm := make(map[string]os.FileInfo, len(fileinfos)) + filenames := make([]string, 0, len(fileinfos)) + for _, fi := range fileinfos { + name := fi.Name() + if strings.HasSuffix(name, h.compressedFileSuffix) { + // Do not show compressed files on index page. + continue + } + fm[name] = fi + filenames = append(filenames, name) + } + + var u URI + base.CopyTo(&u) + u.Update(string(u.Path()) + "/") + + sort.Strings(filenames) + for _, name := range filenames { + u.Update(name) + pathEscaped := html.EscapeString(string(u.Path())) + fi := fm[name] + auxStr := "dir" + className := "dir" + if !fi.IsDir() { + auxStr = fmt.Sprintf("file, %d bytes", fi.Size()) + className = "file" + } + fmt.Fprintf(w, `
  • %s, %s, last modified %s
  • `, + pathEscaped, className, html.EscapeString(name), auxStr, fsModTime(fi.ModTime())) + } + + fmt.Fprintf(w, "
") + + if mustCompress { + var zbuf bytebufferpool.ByteBuffer + zbuf.B = AppendGzipBytesLevel(zbuf.B, w.B, CompressDefaultCompression) + w = &zbuf + } + + dirIndex := w.B + lastModified := time.Now() + ff := &fsFile{ + h: h, + dirIndex: dirIndex, + contentType: "text/html; charset=utf-8", + contentLength: len(dirIndex), + compressed: mustCompress, + lastModified: lastModified, + lastModifiedStr: AppendHTTPDate(nil, lastModified), + + t: lastModified, + } + return ff, nil +} + +const ( + fsMinCompressRatio = 0.8 + fsMaxCompressibleFileSize = 8 * 1024 * 1024 +) + +func (h *fsHandler) compressAndOpenFSFile(filePath string) (*fsFile, error) { + f, err := os.Open(filePath) + if err != nil { + return nil, err + } + + fileInfo, err := f.Stat() + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot obtain info for file %q: %s", filePath, err) + } + + if fileInfo.IsDir() { + f.Close() + return nil, errDirIndexRequired + } + + if strings.HasSuffix(filePath, h.compressedFileSuffix) || + fileInfo.Size() > fsMaxCompressibleFileSize || + !isFileCompressible(f, fsMinCompressRatio) { + return h.newFSFile(f, fileInfo, false) + } + + compressedFilePath := filePath + h.compressedFileSuffix + absPath, err := filepath.Abs(compressedFilePath) + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot determine absolute path for %q: %s", compressedFilePath, err) + } + + flock := getFileLock(absPath) + flock.Lock() + ff, err := h.compressFileNolock(f, fileInfo, filePath, compressedFilePath) + flock.Unlock() + + return ff, err +} + +func (h *fsHandler) compressFileNolock(f *os.File, fileInfo os.FileInfo, filePath, compressedFilePath string) (*fsFile, error) { + // Attempt to open compressed file created by another concurrent + // goroutine. + // It is safe opening such a file, since the file creation + // is guarded by file mutex - see getFileLock call. + if _, err := os.Stat(compressedFilePath); err == nil { + f.Close() + return h.newCompressedFSFile(compressedFilePath) + } + + // Create temporary file, so concurrent goroutines don't use + // it until it is created. + tmpFilePath := compressedFilePath + ".tmp" + zf, err := os.Create(tmpFilePath) + if err != nil { + f.Close() + if !os.IsPermission(err) { + return nil, fmt.Errorf("cannot create temporary file %q: %s", tmpFilePath, err) + } + return nil, errNoCreatePermission + } + + zw := acquireStacklessGzipWriter(zf, CompressDefaultCompression) + _, err = copyZeroAlloc(zw, f) + if err1 := zw.Flush(); err == nil { + err = err1 + } + releaseStacklessGzipWriter(zw, CompressDefaultCompression) + zf.Close() + f.Close() + if err != nil { + return nil, fmt.Errorf("error when compressing file %q to %q: %s", filePath, tmpFilePath, err) + } + if err = os.Chtimes(tmpFilePath, time.Now(), fileInfo.ModTime()); err != nil { + return nil, fmt.Errorf("cannot change modification time to %s for tmp file %q: %s", + fileInfo.ModTime(), tmpFilePath, err) + } + if err = os.Rename(tmpFilePath, compressedFilePath); err != nil { + return nil, fmt.Errorf("cannot move compressed file from %q to %q: %s", tmpFilePath, compressedFilePath, err) + } + return h.newCompressedFSFile(compressedFilePath) +} + +func (h *fsHandler) newCompressedFSFile(filePath string) (*fsFile, error) { + f, err := os.Open(filePath) + if err != nil { + return nil, fmt.Errorf("cannot open compressed file %q: %s", filePath, err) + } + fileInfo, err := f.Stat() + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot obtain info for compressed file %q: %s", filePath, err) + } + return h.newFSFile(f, fileInfo, true) +} + +func (h *fsHandler) openFSFile(filePath string, mustCompress bool) (*fsFile, error) { + filePathOriginal := filePath + if mustCompress { + filePath += h.compressedFileSuffix + } + + f, err := os.Open(filePath) + if err != nil { + if mustCompress && os.IsNotExist(err) { + return h.compressAndOpenFSFile(filePathOriginal) + } + return nil, err + } + + fileInfo, err := f.Stat() + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot obtain info for file %q: %s", filePath, err) + } + + if fileInfo.IsDir() { + f.Close() + if mustCompress { + return nil, fmt.Errorf("directory with unexpected suffix found: %q. Suffix: %q", + filePath, h.compressedFileSuffix) + } + return nil, errDirIndexRequired + } + + if mustCompress { + fileInfoOriginal, err := os.Stat(filePathOriginal) + if err != nil { + f.Close() + return nil, fmt.Errorf("cannot obtain info for original file %q: %s", filePathOriginal, err) + } + + if fileInfoOriginal.ModTime() != fileInfo.ModTime() { + // The compressed file became stale. Re-create it. + f.Close() + os.Remove(filePath) + return h.compressAndOpenFSFile(filePathOriginal) + } + } + + return h.newFSFile(f, fileInfo, mustCompress) +} + +func (h *fsHandler) newFSFile(f *os.File, fileInfo os.FileInfo, compressed bool) (*fsFile, error) { + n := fileInfo.Size() + contentLength := int(n) + if n != int64(contentLength) { + f.Close() + return nil, fmt.Errorf("too big file: %d bytes", n) + } + + // detect content-type + ext := fileExtension(fileInfo.Name(), compressed, h.compressedFileSuffix) + contentType := mime.TypeByExtension(ext) + if len(contentType) == 0 { + data, err := readFileHeader(f, compressed) + if err != nil { + return nil, fmt.Errorf("cannot read header of the file %q: %s", f.Name(), err) + } + contentType = http.DetectContentType(data) + } + + lastModified := fileInfo.ModTime() + ff := &fsFile{ + h: h, + f: f, + contentType: contentType, + contentLength: contentLength, + compressed: compressed, + lastModified: lastModified, + lastModifiedStr: AppendHTTPDate(nil, lastModified), + + t: time.Now(), + } + return ff, nil +} + +func readFileHeader(f *os.File, compressed bool) ([]byte, error) { + r := io.Reader(f) + var zr *gzip.Reader + if compressed { + var err error + if zr, err = acquireGzipReader(f); err != nil { + return nil, err + } + r = zr + } + + lr := &io.LimitedReader{ + R: r, + N: 512, + } + data, err := ioutil.ReadAll(lr) + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + + if zr != nil { + releaseGzipReader(zr) + } + + return data, err +} + +func stripLeadingSlashes(path []byte, stripSlashes int) []byte { + for stripSlashes > 0 && len(path) > 0 { + if path[0] != '/' { + panic("BUG: path must start with slash") + } + n := bytes.IndexByte(path[1:], '/') + if n < 0 { + path = path[:0] + break + } + path = path[n+1:] + stripSlashes-- + } + return path +} + +func stripTrailingSlashes(path []byte) []byte { + for len(path) > 0 && path[len(path)-1] == '/' { + path = path[:len(path)-1] + } + return path +} + +func fileExtension(path string, compressed bool, compressedFileSuffix string) string { + if compressed && strings.HasSuffix(path, compressedFileSuffix) { + path = path[:len(path)-len(compressedFileSuffix)] + } + n := strings.LastIndexByte(path, '.') + if n < 0 { + return "" + } + return path[n:] +} + +// FileLastModified returns last modified time for the file. +func FileLastModified(path string) (time.Time, error) { + f, err := os.Open(path) + if err != nil { + return zeroTime, err + } + fileInfo, err := f.Stat() + f.Close() + if err != nil { + return zeroTime, err + } + return fsModTime(fileInfo.ModTime()), nil +} + +func fsModTime(t time.Time) time.Time { + return t.In(time.UTC).Truncate(time.Second) +} + +var ( + filesLockMap = make(map[string]*sync.Mutex) + filesLockMapLock sync.Mutex +) + +func getFileLock(absPath string) *sync.Mutex { + filesLockMapLock.Lock() + flock := filesLockMap[absPath] + if flock == nil { + flock = &sync.Mutex{} + filesLockMap[absPath] = flock + } + filesLockMapLock.Unlock() + return flock +} diff --git a/vendor/github.com/valyala/fasthttp/fuzzit.sh b/vendor/github.com/valyala/fasthttp/fuzzit.sh new file mode 100644 index 000000000..37f83db68 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/fuzzit.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -xe + +## go-fuzz doesn't support modules for now, so ensure we do everything +## in the old style GOPATH way +export GO111MODULE="off" + +# We need to download these dependencies again after we set GO111MODULE="off" +go get -t -v ./... + +go get github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build + +wget -q -O fuzzitbin https://github.com/fuzzitdev/fuzzit/releases/download/v2.4.52/fuzzit_Linux_x86_64 +chmod a+x fuzzitbin + +for w in request response cookie url; do + go-fuzz-build -libfuzzer -o fasthttp_$w.a ./fuzzit/$w/ + clang -fsanitize=fuzzer fasthttp_$w.a -o fasthttp_$w + + ./fuzzitbin create job --type $1 fasthttp/$w fasthttp_$w +done diff --git a/vendor/github.com/valyala/fasthttp/go.mod b/vendor/github.com/valyala/fasthttp/go.mod new file mode 100644 index 000000000..3e1c929b5 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/go.mod @@ -0,0 +1,11 @@ +module github.com/valyala/fasthttp + +go 1.11 + +require ( + github.com/klauspost/compress v1.8.2 + github.com/klauspost/cpuid v1.2.1 // indirect + github.com/valyala/bytebufferpool v1.0.0 + github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a + golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 +) diff --git a/vendor/github.com/valyala/fasthttp/go.sum b/vendor/github.com/valyala/fasthttp/go.sum new file mode 100644 index 000000000..52407f3f4 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/go.sum @@ -0,0 +1,13 @@ +github.com/klauspost/compress v1.8.2 h1:Bx0qjetmNjdFXASH02NSAREKpiaDwkO1DRZ3dV2KCcs= +github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= +github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/valyala/fasthttp/header.go b/vendor/github.com/valyala/fasthttp/header.go new file mode 100644 index 000000000..b7a12ee21 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/header.go @@ -0,0 +1,2342 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "sync" + "sync/atomic" + "time" +) + +// ResponseHeader represents HTTP response header. +// +// It is forbidden copying ResponseHeader instances. +// Create new instances instead and use CopyTo. +// +// ResponseHeader instance MUST NOT be used from concurrently running +// goroutines. +type ResponseHeader struct { + noCopy noCopy //nolint:unused,structcheck + + disableNormalizing bool + noHTTP11 bool + connectionClose bool + noDefaultContentType bool + + statusCode int + contentLength int + contentLengthBytes []byte + + contentType []byte + server []byte + + h []argsKV + bufKV argsKV + + cookies []argsKV +} + +// RequestHeader represents HTTP request header. +// +// It is forbidden copying RequestHeader instances. +// Create new instances instead and use CopyTo. +// +// RequestHeader instance MUST NOT be used from concurrently running +// goroutines. +type RequestHeader struct { + noCopy noCopy //nolint:unused,structcheck + + disableNormalizing bool + noHTTP11 bool + connectionClose bool + + // These two fields have been moved close to other bool fields + // for reducing RequestHeader object size. + cookiesCollected bool + rawHeadersParsed bool + + contentLength int + contentLengthBytes []byte + + method []byte + requestURI []byte + host []byte + contentType []byte + userAgent []byte + + h []argsKV + bufKV argsKV + + cookies []argsKV + + rawHeaders []byte + + // stores an immutable copy of headers as they were received from the + // wire. + rawHeadersCopy []byte +} + +// SetContentRange sets 'Content-Range: bytes startPos-endPos/contentLength' +// header. +func (h *ResponseHeader) SetContentRange(startPos, endPos, contentLength int) { + b := h.bufKV.value[:0] + b = append(b, strBytes...) + b = append(b, ' ') + b = AppendUint(b, startPos) + b = append(b, '-') + b = AppendUint(b, endPos) + b = append(b, '/') + b = AppendUint(b, contentLength) + h.bufKV.value = b + + h.SetCanonical(strContentRange, h.bufKV.value) +} + +// SetByteRange sets 'Range: bytes=startPos-endPos' header. +// +// * If startPos is negative, then 'bytes=-startPos' value is set. +// * If endPos is negative, then 'bytes=startPos-' value is set. +func (h *RequestHeader) SetByteRange(startPos, endPos int) { + h.parseRawHeaders() + + b := h.bufKV.value[:0] + b = append(b, strBytes...) + b = append(b, '=') + if startPos >= 0 { + b = AppendUint(b, startPos) + } else { + endPos = -startPos + } + b = append(b, '-') + if endPos >= 0 { + b = AppendUint(b, endPos) + } + h.bufKV.value = b + + h.SetCanonical(strRange, h.bufKV.value) +} + +// StatusCode returns response status code. +func (h *ResponseHeader) StatusCode() int { + if h.statusCode == 0 { + return StatusOK + } + return h.statusCode +} + +// SetStatusCode sets response status code. +func (h *ResponseHeader) SetStatusCode(statusCode int) { + h.statusCode = statusCode +} + +// SetLastModified sets 'Last-Modified' header to the given value. +func (h *ResponseHeader) SetLastModified(t time.Time) { + h.bufKV.value = AppendHTTPDate(h.bufKV.value[:0], t) + h.SetCanonical(strLastModified, h.bufKV.value) +} + +// ConnectionClose returns true if 'Connection: close' header is set. +func (h *ResponseHeader) ConnectionClose() bool { + return h.connectionClose +} + +// SetConnectionClose sets 'Connection: close' header. +func (h *ResponseHeader) SetConnectionClose() { + h.connectionClose = true +} + +// ResetConnectionClose clears 'Connection: close' header if it exists. +func (h *ResponseHeader) ResetConnectionClose() { + if h.connectionClose { + h.connectionClose = false + h.h = delAllArgsBytes(h.h, strConnection) + } +} + +// ConnectionClose returns true if 'Connection: close' header is set. +func (h *RequestHeader) ConnectionClose() bool { + h.parseRawHeaders() + return h.connectionClose +} + +// SetConnectionClose sets 'Connection: close' header. +func (h *RequestHeader) SetConnectionClose() { + // h.parseRawHeaders() isn't called for performance reasons. + h.connectionClose = true +} + +// ResetConnectionClose clears 'Connection: close' header if it exists. +func (h *RequestHeader) ResetConnectionClose() { + h.parseRawHeaders() + if h.connectionClose { + h.connectionClose = false + h.h = delAllArgsBytes(h.h, strConnection) + } +} + +// ConnectionUpgrade returns true if 'Connection: Upgrade' header is set. +func (h *ResponseHeader) ConnectionUpgrade() bool { + return hasHeaderValue(h.Peek(HeaderConnection), strUpgrade) +} + +// ConnectionUpgrade returns true if 'Connection: Upgrade' header is set. +func (h *RequestHeader) ConnectionUpgrade() bool { + h.parseRawHeaders() + return hasHeaderValue(h.Peek(HeaderConnection), strUpgrade) +} + +// PeekCookie is able to returns cookie by a given key from response. +func (h *ResponseHeader) PeekCookie(key string) []byte { + return peekArgStr(h.cookies, key) +} + +// ContentLength returns Content-Length header value. +// +// It may be negative: +// -1 means Transfer-Encoding: chunked. +// -2 means Transfer-Encoding: identity. +func (h *ResponseHeader) ContentLength() int { + return h.contentLength +} + +// SetContentLength sets Content-Length header value. +// +// Content-Length may be negative: +// -1 means Transfer-Encoding: chunked. +// -2 means Transfer-Encoding: identity. +func (h *ResponseHeader) SetContentLength(contentLength int) { + if h.mustSkipContentLength() { + return + } + h.contentLength = contentLength + if contentLength >= 0 { + h.contentLengthBytes = AppendUint(h.contentLengthBytes[:0], contentLength) + h.h = delAllArgsBytes(h.h, strTransferEncoding) + } else { + h.contentLengthBytes = h.contentLengthBytes[:0] + value := strChunked + if contentLength == -2 { + h.SetConnectionClose() + value = strIdentity + } + h.h = setArgBytes(h.h, strTransferEncoding, value, argsHasValue) + } +} + +func (h *ResponseHeader) mustSkipContentLength() bool { + // From http/1.1 specs: + // All 1xx (informational), 204 (no content), and 304 (not modified) responses MUST NOT include a message-body + statusCode := h.StatusCode() + + // Fast path. + if statusCode < 100 || statusCode == StatusOK { + return false + } + + // Slow path. + return statusCode == StatusNotModified || statusCode == StatusNoContent || statusCode < 200 +} + +// ContentLength returns Content-Length header value. +// +// It may be negative: +// -1 means Transfer-Encoding: chunked. +func (h *RequestHeader) ContentLength() int { + return h.realContentLength() +} + +// realContentLength returns the actual Content-Length set in the request, +// including positive lengths for GET/HEAD requests. +func (h *RequestHeader) realContentLength() int { + h.parseRawHeaders() + return h.contentLength +} + +// SetContentLength sets Content-Length header value. +// +// Negative content-length sets 'Transfer-Encoding: chunked' header. +func (h *RequestHeader) SetContentLength(contentLength int) { + h.parseRawHeaders() + h.contentLength = contentLength + if contentLength >= 0 { + h.contentLengthBytes = AppendUint(h.contentLengthBytes[:0], contentLength) + h.h = delAllArgsBytes(h.h, strTransferEncoding) + } else { + h.contentLengthBytes = h.contentLengthBytes[:0] + h.h = setArgBytes(h.h, strTransferEncoding, strChunked, argsHasValue) + } +} + +func (h *ResponseHeader) isCompressibleContentType() bool { + contentType := h.ContentType() + return bytes.HasPrefix(contentType, strTextSlash) || + bytes.HasPrefix(contentType, strApplicationSlash) +} + +// ContentType returns Content-Type header value. +func (h *ResponseHeader) ContentType() []byte { + contentType := h.contentType + if !h.noDefaultContentType && len(h.contentType) == 0 { + contentType = defaultContentType + } + return contentType +} + +// SetContentType sets Content-Type header value. +func (h *ResponseHeader) SetContentType(contentType string) { + h.contentType = append(h.contentType[:0], contentType...) +} + +// SetContentTypeBytes sets Content-Type header value. +func (h *ResponseHeader) SetContentTypeBytes(contentType []byte) { + h.contentType = append(h.contentType[:0], contentType...) +} + +// Server returns Server header value. +func (h *ResponseHeader) Server() []byte { + return h.server +} + +// SetServer sets Server header value. +func (h *ResponseHeader) SetServer(server string) { + h.server = append(h.server[:0], server...) +} + +// SetServerBytes sets Server header value. +func (h *ResponseHeader) SetServerBytes(server []byte) { + h.server = append(h.server[:0], server...) +} + +// ContentType returns Content-Type header value. +func (h *RequestHeader) ContentType() []byte { + h.parseRawHeaders() + return h.contentType +} + +// SetContentType sets Content-Type header value. +func (h *RequestHeader) SetContentType(contentType string) { + h.parseRawHeaders() + h.contentType = append(h.contentType[:0], contentType...) +} + +// SetContentTypeBytes sets Content-Type header value. +func (h *RequestHeader) SetContentTypeBytes(contentType []byte) { + h.parseRawHeaders() + h.contentType = append(h.contentType[:0], contentType...) +} + +// SetMultipartFormBoundary sets the following Content-Type: +// 'multipart/form-data; boundary=...' +// where ... is substituted by the given boundary. +func (h *RequestHeader) SetMultipartFormBoundary(boundary string) { + h.parseRawHeaders() + + b := h.bufKV.value[:0] + b = append(b, strMultipartFormData...) + b = append(b, ';', ' ') + b = append(b, strBoundary...) + b = append(b, '=') + b = append(b, boundary...) + h.bufKV.value = b + + h.SetContentTypeBytes(h.bufKV.value) +} + +// SetMultipartFormBoundaryBytes sets the following Content-Type: +// 'multipart/form-data; boundary=...' +// where ... is substituted by the given boundary. +func (h *RequestHeader) SetMultipartFormBoundaryBytes(boundary []byte) { + h.parseRawHeaders() + + b := h.bufKV.value[:0] + b = append(b, strMultipartFormData...) + b = append(b, ';', ' ') + b = append(b, strBoundary...) + b = append(b, '=') + b = append(b, boundary...) + h.bufKV.value = b + + h.SetContentTypeBytes(h.bufKV.value) +} + +// MultipartFormBoundary returns boundary part +// from 'multipart/form-data; boundary=...' Content-Type. +func (h *RequestHeader) MultipartFormBoundary() []byte { + b := h.ContentType() + if !bytes.HasPrefix(b, strMultipartFormData) { + return nil + } + b = b[len(strMultipartFormData):] + if len(b) == 0 || b[0] != ';' { + return nil + } + + var n int + for len(b) > 0 { + n++ + for len(b) > n && b[n] == ' ' { + n++ + } + b = b[n:] + if !bytes.HasPrefix(b, strBoundary) { + if n = bytes.IndexByte(b, ';'); n < 0 { + return nil + } + continue + } + + b = b[len(strBoundary):] + if len(b) == 0 || b[0] != '=' { + return nil + } + b = b[1:] + if n = bytes.IndexByte(b, ';'); n >= 0 { + b = b[:n] + } + if len(b) > 1 && b[0] == '"' && b[len(b)-1] == '"' { + b = b[1 : len(b)-1] + } + return b + } + return nil +} + +// Host returns Host header value. +func (h *RequestHeader) Host() []byte { + if len(h.host) > 0 { + return h.host + } + if !h.rawHeadersParsed { + // fast path without employing full headers parsing. + host := peekRawHeader(h.rawHeaders, strHost) + if len(host) > 0 { + h.host = append(h.host[:0], host...) + return h.host + } + } + + // slow path. + h.parseRawHeaders() + return h.host +} + +// SetHost sets Host header value. +func (h *RequestHeader) SetHost(host string) { + h.parseRawHeaders() + h.host = append(h.host[:0], host...) +} + +// SetHostBytes sets Host header value. +func (h *RequestHeader) SetHostBytes(host []byte) { + h.parseRawHeaders() + h.host = append(h.host[:0], host...) +} + +// UserAgent returns User-Agent header value. +func (h *RequestHeader) UserAgent() []byte { + h.parseRawHeaders() + return h.userAgent +} + +// SetUserAgent sets User-Agent header value. +func (h *RequestHeader) SetUserAgent(userAgent string) { + h.parseRawHeaders() + h.userAgent = append(h.userAgent[:0], userAgent...) +} + +// SetUserAgentBytes sets User-Agent header value. +func (h *RequestHeader) SetUserAgentBytes(userAgent []byte) { + h.parseRawHeaders() + h.userAgent = append(h.userAgent[:0], userAgent...) +} + +// Referer returns Referer header value. +func (h *RequestHeader) Referer() []byte { + return h.PeekBytes(strReferer) +} + +// SetReferer sets Referer header value. +func (h *RequestHeader) SetReferer(referer string) { + h.SetBytesK(strReferer, referer) +} + +// SetRefererBytes sets Referer header value. +func (h *RequestHeader) SetRefererBytes(referer []byte) { + h.SetCanonical(strReferer, referer) +} + +// Method returns HTTP request method. +func (h *RequestHeader) Method() []byte { + if len(h.method) == 0 { + return strGet + } + return h.method +} + +// SetMethod sets HTTP request method. +func (h *RequestHeader) SetMethod(method string) { + h.method = append(h.method[:0], method...) +} + +// SetMethodBytes sets HTTP request method. +func (h *RequestHeader) SetMethodBytes(method []byte) { + h.method = append(h.method[:0], method...) +} + +// RequestURI returns RequestURI from the first HTTP request line. +func (h *RequestHeader) RequestURI() []byte { + requestURI := h.requestURI + if len(requestURI) == 0 { + requestURI = strSlash + } + return requestURI +} + +// SetRequestURI sets RequestURI for the first HTTP request line. +// RequestURI must be properly encoded. +// Use URI.RequestURI for constructing proper RequestURI if unsure. +func (h *RequestHeader) SetRequestURI(requestURI string) { + h.requestURI = append(h.requestURI[:0], requestURI...) +} + +// SetRequestURIBytes sets RequestURI for the first HTTP request line. +// RequestURI must be properly encoded. +// Use URI.RequestURI for constructing proper RequestURI if unsure. +func (h *RequestHeader) SetRequestURIBytes(requestURI []byte) { + h.requestURI = append(h.requestURI[:0], requestURI...) +} + +// IsGet returns true if request method is GET. +func (h *RequestHeader) IsGet() bool { + return bytes.Equal(h.Method(), strGet) +} + +// IsPost returns true if request method is POST. +func (h *RequestHeader) IsPost() bool { + return bytes.Equal(h.Method(), strPost) +} + +// IsPut returns true if request method is PUT. +func (h *RequestHeader) IsPut() bool { + return bytes.Equal(h.Method(), strPut) +} + +// IsHead returns true if request method is HEAD. +func (h *RequestHeader) IsHead() bool { + return bytes.Equal(h.Method(), strHead) +} + +// IsDelete returns true if request method is DELETE. +func (h *RequestHeader) IsDelete() bool { + return bytes.Equal(h.Method(), strDelete) +} + +// IsConnect returns true if request method is CONNECT. +func (h *RequestHeader) IsConnect() bool { + return bytes.Equal(h.Method(), strConnect) +} + +// IsOptions returns true if request method is OPTIONS. +func (h *RequestHeader) IsOptions() bool { + return bytes.Equal(h.Method(), strOptions) +} + +// IsTrace returns true if request method is TRACE. +func (h *RequestHeader) IsTrace() bool { + return bytes.Equal(h.Method(), strTrace) +} + +// IsPatch returns true if request method is PATCH. +func (h *RequestHeader) IsPatch() bool { + return bytes.Equal(h.Method(), strPatch) +} + +// IsHTTP11 returns true if the request is HTTP/1.1. +func (h *RequestHeader) IsHTTP11() bool { + return !h.noHTTP11 +} + +// IsHTTP11 returns true if the response is HTTP/1.1. +func (h *ResponseHeader) IsHTTP11() bool { + return !h.noHTTP11 +} + +// HasAcceptEncoding returns true if the header contains +// the given Accept-Encoding value. +func (h *RequestHeader) HasAcceptEncoding(acceptEncoding string) bool { + h.bufKV.value = append(h.bufKV.value[:0], acceptEncoding...) + return h.HasAcceptEncodingBytes(h.bufKV.value) +} + +// HasAcceptEncodingBytes returns true if the header contains +// the given Accept-Encoding value. +func (h *RequestHeader) HasAcceptEncodingBytes(acceptEncoding []byte) bool { + ae := h.peek(strAcceptEncoding) + n := bytes.Index(ae, acceptEncoding) + if n < 0 { + return false + } + b := ae[n+len(acceptEncoding):] + if len(b) > 0 && b[0] != ',' { + return false + } + if n == 0 { + return true + } + return ae[n-1] == ' ' +} + +// Len returns the number of headers set, +// i.e. the number of times f is called in VisitAll. +func (h *ResponseHeader) Len() int { + n := 0 + h.VisitAll(func(k, v []byte) { n++ }) + return n +} + +// Len returns the number of headers set, +// i.e. the number of times f is called in VisitAll. +func (h *RequestHeader) Len() int { + n := 0 + h.VisitAll(func(k, v []byte) { n++ }) + return n +} + +// DisableNormalizing disables header names' normalization. +// +// By default all the header names are normalized by uppercasing +// the first letter and all the first letters following dashes, +// while lowercasing all the other letters. +// Examples: +// +// * CONNECTION -> Connection +// * conteNT-tYPE -> Content-Type +// * foo-bar-baz -> Foo-Bar-Baz +// +// Disable header names' normalization only if know what are you doing. +func (h *RequestHeader) DisableNormalizing() { + h.disableNormalizing = true +} + +// DisableNormalizing disables header names' normalization. +// +// By default all the header names are normalized by uppercasing +// the first letter and all the first letters following dashes, +// while lowercasing all the other letters. +// Examples: +// +// * CONNECTION -> Connection +// * conteNT-tYPE -> Content-Type +// * foo-bar-baz -> Foo-Bar-Baz +// +// Disable header names' normalization only if know what are you doing. +func (h *ResponseHeader) DisableNormalizing() { + h.disableNormalizing = true +} + +// Reset clears response header. +func (h *ResponseHeader) Reset() { + h.disableNormalizing = false + h.noDefaultContentType = false + h.resetSkipNormalize() +} + +func (h *ResponseHeader) resetSkipNormalize() { + h.noHTTP11 = false + h.connectionClose = false + + h.statusCode = 0 + h.contentLength = 0 + h.contentLengthBytes = h.contentLengthBytes[:0] + + h.contentType = h.contentType[:0] + h.server = h.server[:0] + + h.h = h.h[:0] + h.cookies = h.cookies[:0] +} + +// Reset clears request header. +func (h *RequestHeader) Reset() { + h.disableNormalizing = false + h.resetSkipNormalize() +} + +func (h *RequestHeader) resetSkipNormalize() { + h.noHTTP11 = false + h.connectionClose = false + + h.contentLength = 0 + h.contentLengthBytes = h.contentLengthBytes[:0] + + h.method = h.method[:0] + h.requestURI = h.requestURI[:0] + h.host = h.host[:0] + h.contentType = h.contentType[:0] + h.userAgent = h.userAgent[:0] + + h.h = h.h[:0] + h.cookies = h.cookies[:0] + h.cookiesCollected = false + + h.rawHeaders = h.rawHeaders[:0] + h.rawHeadersParsed = false +} + +// CopyTo copies all the headers to dst. +func (h *ResponseHeader) CopyTo(dst *ResponseHeader) { + dst.Reset() + + dst.disableNormalizing = h.disableNormalizing + dst.noHTTP11 = h.noHTTP11 + dst.connectionClose = h.connectionClose + dst.noDefaultContentType = h.noDefaultContentType + + dst.statusCode = h.statusCode + dst.contentLength = h.contentLength + dst.contentLengthBytes = append(dst.contentLengthBytes[:0], h.contentLengthBytes...) + dst.contentType = append(dst.contentType[:0], h.contentType...) + dst.server = append(dst.server[:0], h.server...) + dst.h = copyArgs(dst.h, h.h) + dst.cookies = copyArgs(dst.cookies, h.cookies) +} + +// CopyTo copies all the headers to dst. +func (h *RequestHeader) CopyTo(dst *RequestHeader) { + dst.Reset() + + dst.disableNormalizing = h.disableNormalizing + dst.noHTTP11 = h.noHTTP11 + dst.connectionClose = h.connectionClose + + dst.contentLength = h.contentLength + dst.contentLengthBytes = append(dst.contentLengthBytes[:0], h.contentLengthBytes...) + dst.method = append(dst.method[:0], h.method...) + dst.requestURI = append(dst.requestURI[:0], h.requestURI...) + dst.host = append(dst.host[:0], h.host...) + dst.contentType = append(dst.contentType[:0], h.contentType...) + dst.userAgent = append(dst.userAgent[:0], h.userAgent...) + dst.h = copyArgs(dst.h, h.h) + dst.cookies = copyArgs(dst.cookies, h.cookies) + dst.cookiesCollected = h.cookiesCollected + dst.rawHeaders = append(dst.rawHeaders[:0], h.rawHeaders...) + dst.rawHeadersParsed = h.rawHeadersParsed + dst.rawHeadersCopy = append(dst.rawHeadersCopy[:0], h.rawHeadersCopy...) +} + +// VisitAll calls f for each header. +// +// f must not retain references to key and/or value after returning. +// Copy key and/or value contents before returning if you need retaining them. +func (h *ResponseHeader) VisitAll(f func(key, value []byte)) { + if len(h.contentLengthBytes) > 0 { + f(strContentLength, h.contentLengthBytes) + } + contentType := h.ContentType() + if len(contentType) > 0 { + f(strContentType, contentType) + } + server := h.Server() + if len(server) > 0 { + f(strServer, server) + } + if len(h.cookies) > 0 { + visitArgs(h.cookies, func(k, v []byte) { + f(strSetCookie, v) + }) + } + visitArgs(h.h, f) + if h.ConnectionClose() { + f(strConnection, strClose) + } +} + +// VisitAllCookie calls f for each response cookie. +// +// Cookie name is passed in key and the whole Set-Cookie header value +// is passed in value on each f invocation. Value may be parsed +// with Cookie.ParseBytes(). +// +// f must not retain references to key and/or value after returning. +func (h *ResponseHeader) VisitAllCookie(f func(key, value []byte)) { + visitArgs(h.cookies, f) +} + +// VisitAllCookie calls f for each request cookie. +// +// f must not retain references to key and/or value after returning. +func (h *RequestHeader) VisitAllCookie(f func(key, value []byte)) { + h.parseRawHeaders() + h.collectCookies() + visitArgs(h.cookies, f) +} + +// VisitAll calls f for each header. +// +// f must not retain references to key and/or value after returning. +// Copy key and/or value contents before returning if you need retaining them. +// +// To get the headers in order they were received use VisitAllInOrder. +func (h *RequestHeader) VisitAll(f func(key, value []byte)) { + h.parseRawHeaders() + host := h.Host() + if len(host) > 0 { + f(strHost, host) + } + if len(h.contentLengthBytes) > 0 { + f(strContentLength, h.contentLengthBytes) + } + contentType := h.ContentType() + if len(contentType) > 0 { + f(strContentType, contentType) + } + userAgent := h.UserAgent() + if len(userAgent) > 0 { + f(strUserAgent, userAgent) + } + + h.collectCookies() + if len(h.cookies) > 0 { + h.bufKV.value = appendRequestCookieBytes(h.bufKV.value[:0], h.cookies) + f(strCookie, h.bufKV.value) + } + visitArgs(h.h, f) + if h.ConnectionClose() { + f(strConnection, strClose) + } +} + +// VisitAllInOrder calls f for each header in the order they were received. +// +// f must not retain references to key and/or value after returning. +// Copy key and/or value contents before returning if you need retaining them. +// +// This function is slightly slower than VisitAll because it has to reparse the +// raw headers to get the order. +func (h *RequestHeader) VisitAllInOrder(f func(key, value []byte)) { + h.parseRawHeaders() + var s headerScanner + s.b = h.rawHeaders + s.disableNormalizing = h.disableNormalizing + for s.next() { + if len(s.key) > 0 { + f(s.key, s.value) + } + } +} + +// Del deletes header with the given key. +func (h *ResponseHeader) Del(key string) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.del(k) +} + +// DelBytes deletes header with the given key. +func (h *ResponseHeader) DelBytes(key []byte) { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + h.del(h.bufKV.key) +} + +func (h *ResponseHeader) del(key []byte) { + switch string(key) { + case HeaderContentType: + h.contentType = h.contentType[:0] + case HeaderServer: + h.server = h.server[:0] + case HeaderSetCookie: + h.cookies = h.cookies[:0] + case HeaderContentLength: + h.contentLength = 0 + h.contentLengthBytes = h.contentLengthBytes[:0] + case HeaderConnection: + h.connectionClose = false + } + h.h = delAllArgsBytes(h.h, key) +} + +// Del deletes header with the given key. +func (h *RequestHeader) Del(key string) { + h.parseRawHeaders() + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.del(k) +} + +// DelBytes deletes header with the given key. +func (h *RequestHeader) DelBytes(key []byte) { + h.parseRawHeaders() + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + h.del(h.bufKV.key) +} + +func (h *RequestHeader) del(key []byte) { + switch string(key) { + case HeaderHost: + h.host = h.host[:0] + case HeaderContentType: + h.contentType = h.contentType[:0] + case HeaderUserAgent: + h.userAgent = h.userAgent[:0] + case HeaderCookie: + h.cookies = h.cookies[:0] + case HeaderContentLength: + h.contentLength = 0 + h.contentLengthBytes = h.contentLengthBytes[:0] + case HeaderConnection: + h.connectionClose = false + } + h.h = delAllArgsBytes(h.h, key) +} + +// Add adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use Set for setting a single header for the given key. +func (h *ResponseHeader) Add(key, value string) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.h = appendArg(h.h, b2s(k), value, argsHasValue) +} + +// AddBytesK adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesK for setting a single header for the given key. +func (h *ResponseHeader) AddBytesK(key []byte, value string) { + h.Add(b2s(key), value) +} + +// AddBytesV adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesV for setting a single header for the given key. +func (h *ResponseHeader) AddBytesV(key string, value []byte) { + h.Add(key, b2s(value)) +} + +// AddBytesKV adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesKV for setting a single header for the given key. +func (h *ResponseHeader) AddBytesKV(key, value []byte) { + h.Add(b2s(key), b2s(value)) +} + +// Set sets the given 'key: value' header. +// +// Use Add for setting multiple header values under the same key. +func (h *ResponseHeader) Set(key, value string) { + initHeaderKV(&h.bufKV, key, value, h.disableNormalizing) + h.SetCanonical(h.bufKV.key, h.bufKV.value) +} + +// SetBytesK sets the given 'key: value' header. +// +// Use AddBytesK for setting multiple header values under the same key. +func (h *ResponseHeader) SetBytesK(key []byte, value string) { + h.bufKV.value = append(h.bufKV.value[:0], value...) + h.SetBytesKV(key, h.bufKV.value) +} + +// SetBytesV sets the given 'key: value' header. +// +// Use AddBytesV for setting multiple header values under the same key. +func (h *ResponseHeader) SetBytesV(key string, value []byte) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.SetCanonical(k, value) +} + +// SetBytesKV sets the given 'key: value' header. +// +// Use AddBytesKV for setting multiple header values under the same key. +func (h *ResponseHeader) SetBytesKV(key, value []byte) { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + h.SetCanonical(h.bufKV.key, value) +} + +// SetCanonical sets the given 'key: value' header assuming that +// key is in canonical form. +func (h *ResponseHeader) SetCanonical(key, value []byte) { + switch string(key) { + case HeaderContentType: + h.SetContentTypeBytes(value) + case HeaderServer: + h.SetServerBytes(value) + case HeaderSetCookie: + var kv *argsKV + h.cookies, kv = allocArg(h.cookies) + kv.key = getCookieKey(kv.key, value) + kv.value = append(kv.value[:0], value...) + case HeaderContentLength: + if contentLength, err := parseContentLength(value); err == nil { + h.contentLength = contentLength + h.contentLengthBytes = append(h.contentLengthBytes[:0], value...) + } + case HeaderConnection: + if bytes.Equal(strClose, value) { + h.SetConnectionClose() + } else { + h.ResetConnectionClose() + h.h = setArgBytes(h.h, key, value, argsHasValue) + } + case HeaderTransferEncoding: + // Transfer-Encoding is managed automatically. + case HeaderDate: + // Date is managed automatically. + default: + h.h = setArgBytes(h.h, key, value, argsHasValue) + } +} + +// SetCookie sets the given response cookie. +// +// It is save re-using the cookie after the function returns. +func (h *ResponseHeader) SetCookie(cookie *Cookie) { + h.cookies = setArgBytes(h.cookies, cookie.Key(), cookie.Cookie(), argsHasValue) +} + +// SetCookie sets 'key: value' cookies. +func (h *RequestHeader) SetCookie(key, value string) { + h.parseRawHeaders() + h.collectCookies() + h.cookies = setArg(h.cookies, key, value, argsHasValue) +} + +// SetCookieBytesK sets 'key: value' cookies. +func (h *RequestHeader) SetCookieBytesK(key []byte, value string) { + h.SetCookie(b2s(key), value) +} + +// SetCookieBytesKV sets 'key: value' cookies. +func (h *RequestHeader) SetCookieBytesKV(key, value []byte) { + h.SetCookie(b2s(key), b2s(value)) +} + +// DelClientCookie instructs the client to remove the given cookie. +// +// Use DelCookie if you want just removing the cookie from response header. +func (h *ResponseHeader) DelClientCookie(key string) { + h.DelCookie(key) + + c := AcquireCookie() + c.SetKey(key) + c.SetExpire(CookieExpireDelete) + h.SetCookie(c) + ReleaseCookie(c) +} + +// DelClientCookieBytes instructs the client to remove the given cookie. +// +// Use DelCookieBytes if you want just removing the cookie from response header. +func (h *ResponseHeader) DelClientCookieBytes(key []byte) { + h.DelClientCookie(b2s(key)) +} + +// DelCookie removes cookie under the given key from response header. +// +// Note that DelCookie doesn't remove the cookie from the client. +// Use DelClientCookie instead. +func (h *ResponseHeader) DelCookie(key string) { + h.cookies = delAllArgs(h.cookies, key) +} + +// DelCookieBytes removes cookie under the given key from response header. +// +// Note that DelCookieBytes doesn't remove the cookie from the client. +// Use DelClientCookieBytes instead. +func (h *ResponseHeader) DelCookieBytes(key []byte) { + h.DelCookie(b2s(key)) +} + +// DelCookie removes cookie under the given key. +func (h *RequestHeader) DelCookie(key string) { + h.parseRawHeaders() + h.collectCookies() + h.cookies = delAllArgs(h.cookies, key) +} + +// DelCookieBytes removes cookie under the given key. +func (h *RequestHeader) DelCookieBytes(key []byte) { + h.DelCookie(b2s(key)) +} + +// DelAllCookies removes all the cookies from response headers. +func (h *ResponseHeader) DelAllCookies() { + h.cookies = h.cookies[:0] +} + +// DelAllCookies removes all the cookies from request headers. +func (h *RequestHeader) DelAllCookies() { + h.parseRawHeaders() + h.collectCookies() + h.cookies = h.cookies[:0] +} + +// Add adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use Set for setting a single header for the given key. +func (h *RequestHeader) Add(key, value string) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.h = appendArg(h.h, b2s(k), value, argsHasValue) +} + +// AddBytesK adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesK for setting a single header for the given key. +func (h *RequestHeader) AddBytesK(key []byte, value string) { + h.Add(b2s(key), value) +} + +// AddBytesV adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesV for setting a single header for the given key. +func (h *RequestHeader) AddBytesV(key string, value []byte) { + h.Add(key, b2s(value)) +} + +// AddBytesKV adds the given 'key: value' header. +// +// Multiple headers with the same key may be added with this function. +// Use SetBytesKV for setting a single header for the given key. +func (h *RequestHeader) AddBytesKV(key, value []byte) { + h.Add(b2s(key), b2s(value)) +} + +// Set sets the given 'key: value' header. +// +// Use Add for setting multiple header values under the same key. +func (h *RequestHeader) Set(key, value string) { + initHeaderKV(&h.bufKV, key, value, h.disableNormalizing) + h.SetCanonical(h.bufKV.key, h.bufKV.value) +} + +// SetBytesK sets the given 'key: value' header. +// +// Use AddBytesK for setting multiple header values under the same key. +func (h *RequestHeader) SetBytesK(key []byte, value string) { + h.bufKV.value = append(h.bufKV.value[:0], value...) + h.SetBytesKV(key, h.bufKV.value) +} + +// SetBytesV sets the given 'key: value' header. +// +// Use AddBytesV for setting multiple header values under the same key. +func (h *RequestHeader) SetBytesV(key string, value []byte) { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + h.SetCanonical(k, value) +} + +// SetBytesKV sets the given 'key: value' header. +// +// Use AddBytesKV for setting multiple header values under the same key. +func (h *RequestHeader) SetBytesKV(key, value []byte) { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + h.SetCanonical(h.bufKV.key, value) +} + +// SetCanonical sets the given 'key: value' header assuming that +// key is in canonical form. +func (h *RequestHeader) SetCanonical(key, value []byte) { + h.parseRawHeaders() + switch string(key) { + case HeaderHost: + h.SetHostBytes(value) + case HeaderContentType: + h.SetContentTypeBytes(value) + case HeaderUserAgent: + h.SetUserAgentBytes(value) + case HeaderCookie: + h.collectCookies() + h.cookies = parseRequestCookies(h.cookies, value) + case HeaderContentLength: + if contentLength, err := parseContentLength(value); err == nil { + h.contentLength = contentLength + h.contentLengthBytes = append(h.contentLengthBytes[:0], value...) + } + case HeaderConnection: + if bytes.Equal(strClose, value) { + h.SetConnectionClose() + } else { + h.ResetConnectionClose() + h.h = setArgBytes(h.h, key, value, argsHasValue) + } + case HeaderTransferEncoding: + // Transfer-Encoding is managed automatically. + default: + h.h = setArgBytes(h.h, key, value, argsHasValue) + } +} + +// Peek returns header value for the given key. +// +// Returned value is valid until the next call to ResponseHeader. +// Do not store references to returned value. Make copies instead. +func (h *ResponseHeader) Peek(key string) []byte { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + return h.peek(k) +} + +// PeekBytes returns header value for the given key. +// +// Returned value is valid until the next call to ResponseHeader. +// Do not store references to returned value. Make copies instead. +func (h *ResponseHeader) PeekBytes(key []byte) []byte { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + return h.peek(h.bufKV.key) +} + +// Peek returns header value for the given key. +// +// Returned value is valid until the next call to RequestHeader. +// Do not store references to returned value. Make copies instead. +func (h *RequestHeader) Peek(key string) []byte { + k := getHeaderKeyBytes(&h.bufKV, key, h.disableNormalizing) + return h.peek(k) +} + +// PeekBytes returns header value for the given key. +// +// Returned value is valid until the next call to RequestHeader. +// Do not store references to returned value. Make copies instead. +func (h *RequestHeader) PeekBytes(key []byte) []byte { + h.bufKV.key = append(h.bufKV.key[:0], key...) + normalizeHeaderKey(h.bufKV.key, h.disableNormalizing) + return h.peek(h.bufKV.key) +} + +func (h *ResponseHeader) peek(key []byte) []byte { + switch string(key) { + case HeaderContentType: + return h.ContentType() + case HeaderServer: + return h.Server() + case HeaderConnection: + if h.ConnectionClose() { + return strClose + } + return peekArgBytes(h.h, key) + case HeaderContentLength: + return h.contentLengthBytes + case HeaderSetCookie: + return appendResponseCookieBytes(nil, h.cookies) + default: + return peekArgBytes(h.h, key) + } +} + +func (h *RequestHeader) peek(key []byte) []byte { + h.parseRawHeaders() + switch string(key) { + case HeaderHost: + return h.Host() + case HeaderContentType: + return h.ContentType() + case HeaderUserAgent: + return h.UserAgent() + case HeaderConnection: + if h.ConnectionClose() { + return strClose + } + return peekArgBytes(h.h, key) + case HeaderContentLength: + return h.contentLengthBytes + case HeaderCookie: + if h.cookiesCollected { + return appendRequestCookieBytes(nil, h.cookies) + } + return peekArgBytes(h.h, key) + default: + return peekArgBytes(h.h, key) + } +} + +// Cookie returns cookie for the given key. +func (h *RequestHeader) Cookie(key string) []byte { + h.parseRawHeaders() + h.collectCookies() + return peekArgStr(h.cookies, key) +} + +// CookieBytes returns cookie for the given key. +func (h *RequestHeader) CookieBytes(key []byte) []byte { + h.parseRawHeaders() + h.collectCookies() + return peekArgBytes(h.cookies, key) +} + +// Cookie fills cookie for the given cookie.Key. +// +// Returns false if cookie with the given cookie.Key is missing. +func (h *ResponseHeader) Cookie(cookie *Cookie) bool { + v := peekArgBytes(h.cookies, cookie.Key()) + if v == nil { + return false + } + cookie.ParseBytes(v) //nolint:errcheck + return true +} + +// Read reads response header from r. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (h *ResponseHeader) Read(r *bufio.Reader) error { + n := 1 + for { + err := h.tryRead(r, n) + if err == nil { + return nil + } + if err != errNeedMore { + h.resetSkipNormalize() + return err + } + n = r.Buffered() + 1 + } +} + +func (h *ResponseHeader) tryRead(r *bufio.Reader, n int) error { + h.resetSkipNormalize() + b, err := r.Peek(n) + if len(b) == 0 { + // Return ErrTimeout on any timeout. + if x, ok := err.(interface{ Timeout() bool }); ok && x.Timeout() { + return ErrTimeout + } + // treat all other errors on the first byte read as EOF + if n == 1 || err == io.EOF { + return io.EOF + } + + // This is for go 1.6 bug. See https://github.com/golang/go/issues/14121 . + if err == bufio.ErrBufferFull { + return &ErrSmallBuffer{ + error: fmt.Errorf("error when reading response headers: %s", errSmallBuffer), + } + } + + return fmt.Errorf("error when reading response headers: %s", err) + } + b = mustPeekBuffered(r) + headersLen, errParse := h.parse(b) + if errParse != nil { + return headerError("response", err, errParse, b) + } + mustDiscard(r, headersLen) + return nil +} + +func headerError(typ string, err, errParse error, b []byte) error { + if errParse != errNeedMore { + return headerErrorMsg(typ, errParse, b) + } + if err == nil { + return errNeedMore + } + + // Buggy servers may leave trailing CRLFs after http body. + // Treat this case as EOF. + if isOnlyCRLF(b) { + return io.EOF + } + + if err != bufio.ErrBufferFull { + return headerErrorMsg(typ, err, b) + } + return &ErrSmallBuffer{ + error: headerErrorMsg(typ, errSmallBuffer, b), + } +} + +func headerErrorMsg(typ string, err error, b []byte) error { + return fmt.Errorf("error when reading %s headers: %s. Buffer size=%d, contents: %s", typ, err, len(b), bufferSnippet(b)) +} + +// Read reads request header from r. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (h *RequestHeader) Read(r *bufio.Reader) error { + n := 1 + for { + err := h.tryRead(r, n) + if err == nil { + return nil + } + if err != errNeedMore { + h.resetSkipNormalize() + return err + } + n = r.Buffered() + 1 + } +} + +func (h *RequestHeader) tryRead(r *bufio.Reader, n int) error { + h.resetSkipNormalize() + b, err := r.Peek(n) + if len(b) == 0 { + if err == io.EOF { + return err + } + + if err == nil { + panic("bufio.Reader.Peek() returned nil, nil") + } + + // This is for go 1.6 bug. See https://github.com/golang/go/issues/14121 . + if err == bufio.ErrBufferFull { + return &ErrSmallBuffer{ + error: fmt.Errorf("error when reading request headers: %s", errSmallBuffer), + } + } + + // n == 1 on the first read for the request. + if n == 1 { + // We didn't read a single byte. + return errNothingRead{err} + } + + return fmt.Errorf("error when reading request headers: %s", err) + } + b = mustPeekBuffered(r) + headersLen, errParse := h.parse(b) + if errParse != nil { + return headerError("request", err, errParse, b) + } + mustDiscard(r, headersLen) + return nil +} + +func bufferSnippet(b []byte) string { + n := len(b) + start := 200 + end := n - start + if start >= end { + start = n + end = n + } + bStart, bEnd := b[:start], b[end:] + if len(bEnd) == 0 { + return fmt.Sprintf("%q", b) + } + return fmt.Sprintf("%q...%q", bStart, bEnd) +} + +func isOnlyCRLF(b []byte) bool { + for _, ch := range b { + if ch != '\r' && ch != '\n' { + return false + } + } + return true +} + +func updateServerDate() { + refreshServerDate() + go func() { + for { + time.Sleep(time.Second) + refreshServerDate() + } + }() +} + +var ( + serverDate atomic.Value + serverDateOnce sync.Once // serverDateOnce.Do(updateServerDate) +) + +func refreshServerDate() { + b := AppendHTTPDate(nil, time.Now()) + serverDate.Store(b) +} + +// Write writes response header to w. +func (h *ResponseHeader) Write(w *bufio.Writer) error { + _, err := w.Write(h.Header()) + return err +} + +// WriteTo writes response header to w. +// +// WriteTo implements io.WriterTo interface. +func (h *ResponseHeader) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(h.Header()) + return int64(n), err +} + +// Header returns response header representation. +// +// The returned value is valid until the next call to ResponseHeader methods. +func (h *ResponseHeader) Header() []byte { + h.bufKV.value = h.AppendBytes(h.bufKV.value[:0]) + return h.bufKV.value +} + +// String returns response header representation. +func (h *ResponseHeader) String() string { + return string(h.Header()) +} + +// AppendBytes appends response header representation to dst and returns +// the extended dst. +func (h *ResponseHeader) AppendBytes(dst []byte) []byte { + statusCode := h.StatusCode() + if statusCode < 0 { + statusCode = StatusOK + } + dst = append(dst, statusLine(statusCode)...) + + server := h.Server() + if len(server) != 0 { + dst = appendHeaderLine(dst, strServer, server) + } + + serverDateOnce.Do(updateServerDate) + dst = appendHeaderLine(dst, strDate, serverDate.Load().([]byte)) + + // Append Content-Type only for non-zero responses + // or if it is explicitly set. + // See https://github.com/valyala/fasthttp/issues/28 . + if h.ContentLength() != 0 || len(h.contentType) > 0 { + contentType := h.ContentType() + if len(contentType) > 0 { + dst = appendHeaderLine(dst, strContentType, contentType) + } + } + + if len(h.contentLengthBytes) > 0 { + dst = appendHeaderLine(dst, strContentLength, h.contentLengthBytes) + } + + for i, n := 0, len(h.h); i < n; i++ { + kv := &h.h[i] + if !bytes.Equal(kv.key, strDate) { + dst = appendHeaderLine(dst, kv.key, kv.value) + } + } + + n := len(h.cookies) + if n > 0 { + for i := 0; i < n; i++ { + kv := &h.cookies[i] + dst = appendHeaderLine(dst, strSetCookie, kv.value) + } + } + + if h.ConnectionClose() { + dst = appendHeaderLine(dst, strConnection, strClose) + } + + return append(dst, strCRLF...) +} + +// Write writes request header to w. +func (h *RequestHeader) Write(w *bufio.Writer) error { + _, err := w.Write(h.Header()) + return err +} + +// WriteTo writes request header to w. +// +// WriteTo implements io.WriterTo interface. +func (h *RequestHeader) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(h.Header()) + return int64(n), err +} + +// Header returns request header representation. +// +// The returned representation is valid until the next call to RequestHeader methods. +func (h *RequestHeader) Header() []byte { + h.bufKV.value = h.AppendBytes(h.bufKV.value[:0]) + return h.bufKV.value +} + +// RawHeaders returns raw header key/value bytes. +// +// Depending on server configuration, header keys may be normalized to +// capital-case in place. +// +// This copy is set aside during parsing, so empty slice is returned for all +// cases where parsing did not happen. Similarly, request line is not stored +// during parsing and can not be returned. +// +// The slice is not safe to use after the handler returns. +func (h *RequestHeader) RawHeaders() []byte { + return h.rawHeadersCopy +} + +// String returns request header representation. +func (h *RequestHeader) String() string { + return string(h.Header()) +} + +// AppendBytes appends request header representation to dst and returns +// the extended dst. +func (h *RequestHeader) AppendBytes(dst []byte) []byte { + // there is no need in h.parseRawHeaders() here - raw headers are specially handled below. + dst = append(dst, h.Method()...) + dst = append(dst, ' ') + dst = append(dst, h.RequestURI()...) + dst = append(dst, ' ') + dst = append(dst, strHTTP11...) + dst = append(dst, strCRLF...) + + if !h.rawHeadersParsed && len(h.rawHeaders) > 0 { + return append(dst, h.rawHeaders...) + } + + userAgent := h.UserAgent() + if len(userAgent) > 0 { + dst = appendHeaderLine(dst, strUserAgent, userAgent) + } + + host := h.Host() + if len(host) > 0 { + dst = appendHeaderLine(dst, strHost, host) + } + + contentType := h.ContentType() + if len(contentType) == 0 && !h.ignoreBody() { + contentType = strPostArgsContentType + } + if len(contentType) > 0 { + dst = appendHeaderLine(dst, strContentType, contentType) + } + if len(h.contentLengthBytes) > 0 { + dst = appendHeaderLine(dst, strContentLength, h.contentLengthBytes) + } + + for i, n := 0, len(h.h); i < n; i++ { + kv := &h.h[i] + dst = appendHeaderLine(dst, kv.key, kv.value) + } + + // there is no need in h.collectCookies() here, since if cookies aren't collected yet, + // they all are located in h.h. + n := len(h.cookies) + if n > 0 { + dst = append(dst, strCookie...) + dst = append(dst, strColonSpace...) + dst = appendRequestCookieBytes(dst, h.cookies) + dst = append(dst, strCRLF...) + } + + if h.ConnectionClose() { + dst = appendHeaderLine(dst, strConnection, strClose) + } + + return append(dst, strCRLF...) +} + +func appendHeaderLine(dst, key, value []byte) []byte { + dst = append(dst, key...) + dst = append(dst, strColonSpace...) + dst = append(dst, value...) + return append(dst, strCRLF...) +} + +func (h *ResponseHeader) parse(buf []byte) (int, error) { + m, err := h.parseFirstLine(buf) + if err != nil { + return 0, err + } + n, err := h.parseHeaders(buf[m:]) + if err != nil { + return 0, err + } + return m + n, nil +} + +func (h *RequestHeader) ignoreBody() bool { + return h.IsGet() || h.IsHead() +} + +func (h *RequestHeader) parse(buf []byte) (int, error) { + m, err := h.parseFirstLine(buf) + if err != nil { + return 0, err + } + + var rawHeaders []byte + rawHeaders, _, err = readRawHeaders(h.rawHeaders[:0], buf[m:]) + if err != nil { + return 0, err + } + h.rawHeadersCopy = append(h.rawHeadersCopy[:0], rawHeaders...) + var n int + n, err = h.parseHeaders(buf[m:]) + if err != nil { + return 0, err + } + h.rawHeaders = append(h.rawHeaders[:0], buf[m:m+n]...) + h.rawHeadersParsed = true + return m + n, nil +} + +func (h *ResponseHeader) parseFirstLine(buf []byte) (int, error) { + bNext := buf + var b []byte + var err error + for len(b) == 0 { + if b, bNext, err = nextLine(bNext); err != nil { + return 0, err + } + } + + // parse protocol + n := bytes.IndexByte(b, ' ') + if n < 0 { + return 0, fmt.Errorf("cannot find whitespace in the first line of response %q", buf) + } + h.noHTTP11 = !bytes.Equal(b[:n], strHTTP11) + b = b[n+1:] + + // parse status code + h.statusCode, n, err = parseUintBuf(b) + if err != nil { + return 0, fmt.Errorf("cannot parse response status code: %s. Response %q", err, buf) + } + if len(b) > n && b[n] != ' ' { + return 0, fmt.Errorf("unexpected char at the end of status code. Response %q", buf) + } + + return len(buf) - len(bNext), nil +} + +func (h *RequestHeader) parseFirstLine(buf []byte) (int, error) { + bNext := buf + var b []byte + var err error + for len(b) == 0 { + if b, bNext, err = nextLine(bNext); err != nil { + return 0, err + } + } + + // parse method + n := bytes.IndexByte(b, ' ') + if n <= 0 { + return 0, fmt.Errorf("cannot find http request method in %q", buf) + } + h.method = append(h.method[:0], b[:n]...) + b = b[n+1:] + + // parse requestURI + n = bytes.LastIndexByte(b, ' ') + if n < 0 { + h.noHTTP11 = true + n = len(b) + } else if n == 0 { + return 0, fmt.Errorf("requestURI cannot be empty in %q", buf) + } else if !bytes.Equal(b[n+1:], strHTTP11) { + h.noHTTP11 = true + } + h.requestURI = append(h.requestURI[:0], b[:n]...) + + return len(buf) - len(bNext), nil +} + +func peekRawHeader(buf, key []byte) []byte { + n := bytes.Index(buf, key) + if n < 0 { + return nil + } + if n > 0 && buf[n-1] != '\n' { + return nil + } + n += len(key) + if n >= len(buf) { + return nil + } + if buf[n] != ':' { + return nil + } + n++ + if buf[n] != ' ' { + return nil + } + n++ + buf = buf[n:] + n = bytes.IndexByte(buf, '\n') + if n < 0 { + return nil + } + if n > 0 && buf[n-1] == '\r' { + n-- + } + return buf[:n] +} + +func readRawHeaders(dst, buf []byte) ([]byte, int, error) { + n := bytes.IndexByte(buf, '\n') + if n < 0 { + return nil, 0, errNeedMore + } + if (n == 1 && buf[0] == '\r') || n == 0 { + // empty headers + return dst, n + 1, nil + } + + n++ + b := buf + m := n + for { + b = b[m:] + m = bytes.IndexByte(b, '\n') + if m < 0 { + return nil, 0, errNeedMore + } + m++ + n += m + if (m == 2 && b[0] == '\r') || m == 1 { + dst = append(dst, buf[:n]...) + return dst, n, nil + } + } +} + +func (h *ResponseHeader) parseHeaders(buf []byte) (int, error) { + // 'identity' content-length by default + h.contentLength = -2 + + var s headerScanner + s.b = buf + s.disableNormalizing = h.disableNormalizing + var err error + var kv *argsKV + for s.next() { + if len(s.key) > 0 { + switch s.key[0] | 0x20 { + case 'c': + if caseInsensitiveCompare(s.key, strContentType) { + h.contentType = append(h.contentType[:0], s.value...) + continue + } + if caseInsensitiveCompare(s.key, strContentLength) { + if h.contentLength != -1 { + if h.contentLength, err = parseContentLength(s.value); err != nil { + h.contentLength = -2 + } else { + h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...) + } + } + continue + } + if caseInsensitiveCompare(s.key, strConnection) { + if bytes.Equal(s.value, strClose) { + h.connectionClose = true + } else { + h.connectionClose = false + h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) + } + continue + } + case 's': + if caseInsensitiveCompare(s.key, strServer) { + h.server = append(h.server[:0], s.value...) + continue + } + if caseInsensitiveCompare(s.key, strSetCookie) { + h.cookies, kv = allocArg(h.cookies) + kv.key = getCookieKey(kv.key, s.value) + kv.value = append(kv.value[:0], s.value...) + continue + } + case 't': + if caseInsensitiveCompare(s.key, strTransferEncoding) { + if !bytes.Equal(s.value, strIdentity) { + h.contentLength = -1 + h.h = setArgBytes(h.h, strTransferEncoding, strChunked, argsHasValue) + } + continue + } + } + h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) + } + } + if s.err != nil { + h.connectionClose = true + return 0, s.err + } + + if h.contentLength < 0 { + h.contentLengthBytes = h.contentLengthBytes[:0] + } + if h.contentLength == -2 && !h.ConnectionUpgrade() && !h.mustSkipContentLength() { + h.h = setArgBytes(h.h, strTransferEncoding, strIdentity, argsHasValue) + h.connectionClose = true + } + if h.noHTTP11 && !h.connectionClose { + // close connection for non-http/1.1 response unless 'Connection: keep-alive' is set. + v := peekArgBytes(h.h, strConnection) + h.connectionClose = !hasHeaderValue(v, strKeepAlive) + } + + return len(buf) - len(s.b), nil +} + +func (h *RequestHeader) parseHeaders(buf []byte) (int, error) { + h.contentLength = -2 + + var s headerScanner + s.b = buf + s.disableNormalizing = h.disableNormalizing + var err error + for s.next() { + if len(s.key) > 0 { + // Spaces between the header key and colon are not allowed. + // See RFC 7230, Section 3.2.4. + if bytes.IndexByte(s.key, ' ') != -1 || bytes.IndexByte(s.key, '\t') != -1 { + err = fmt.Errorf("invalid header key %q", s.key) + continue + } + + switch s.key[0] | 0x20 { + case 'h': + if caseInsensitiveCompare(s.key, strHost) { + h.host = append(h.host[:0], s.value...) + continue + } + case 'u': + if caseInsensitiveCompare(s.key, strUserAgent) { + h.userAgent = append(h.userAgent[:0], s.value...) + continue + } + case 'c': + if caseInsensitiveCompare(s.key, strContentType) { + h.contentType = append(h.contentType[:0], s.value...) + continue + } + if caseInsensitiveCompare(s.key, strContentLength) { + if h.contentLength != -1 { + var nerr error + if h.contentLength, nerr = parseContentLength(s.value); nerr != nil { + if err == nil { + err = nerr + } + h.contentLength = -2 + } else { + h.contentLengthBytes = append(h.contentLengthBytes[:0], s.value...) + } + } + continue + } + if caseInsensitiveCompare(s.key, strConnection) { + if bytes.Equal(s.value, strClose) { + h.connectionClose = true + } else { + h.connectionClose = false + h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) + } + continue + } + case 't': + if caseInsensitiveCompare(s.key, strTransferEncoding) { + if !bytes.Equal(s.value, strIdentity) { + h.contentLength = -1 + h.h = setArgBytes(h.h, strTransferEncoding, strChunked, argsHasValue) + } + continue + } + } + } + h.h = appendArgBytes(h.h, s.key, s.value, argsHasValue) + } + if s.err != nil && err == nil { + err = s.err + } + if err != nil { + h.connectionClose = true + return 0, err + } + + if h.contentLength < 0 { + h.contentLengthBytes = h.contentLengthBytes[:0] + } + if h.noHTTP11 && !h.connectionClose { + // close connection for non-http/1.1 request unless 'Connection: keep-alive' is set. + v := peekArgBytes(h.h, strConnection) + h.connectionClose = !hasHeaderValue(v, strKeepAlive) + } + return s.hLen, nil +} + +func (h *RequestHeader) parseRawHeaders() { + if h.rawHeadersParsed { + return + } + h.rawHeadersParsed = true + if len(h.rawHeaders) == 0 { + return + } + h.parseHeaders(h.rawHeaders) //nolint:errcheck +} + +func (h *RequestHeader) collectCookies() { + if h.cookiesCollected { + return + } + + for i, n := 0, len(h.h); i < n; i++ { + kv := &h.h[i] + if bytes.Equal(kv.key, strCookie) { + h.cookies = parseRequestCookies(h.cookies, kv.value) + tmp := *kv + copy(h.h[i:], h.h[i+1:]) + n-- + i-- + h.h[n] = tmp + h.h = h.h[:n] + } + } + h.cookiesCollected = true +} + +func parseContentLength(b []byte) (int, error) { + v, n, err := parseUintBuf(b) + if err != nil { + return -1, err + } + if n != len(b) { + return -1, fmt.Errorf("non-numeric chars at the end of Content-Length") + } + return v, nil +} + +type headerScanner struct { + b []byte + key []byte + value []byte + err error + + // hLen stores header subslice len + hLen int + + disableNormalizing bool + + // by checking whether the next line contains a colon or not to tell + // it's a header entry or a multi line value of current header entry. + // the side effect of this operation is that we know the index of the + // next colon and new line, so this can be used during next iteration, + // instead of find them again. + nextColon int + nextNewLine int + + initialized bool +} + +func (s *headerScanner) next() bool { + if !s.initialized { + s.nextColon = -1 + s.nextNewLine = -1 + s.initialized = true + } + bLen := len(s.b) + if bLen >= 2 && s.b[0] == '\r' && s.b[1] == '\n' { + s.b = s.b[2:] + s.hLen += 2 + return false + } + if bLen >= 1 && s.b[0] == '\n' { + s.b = s.b[1:] + s.hLen++ + return false + } + var n int + if s.nextColon >= 0 { + n = s.nextColon + s.nextColon = -1 + } else { + n = bytes.IndexByte(s.b, ':') + + // There can't be a \n inside the header name, check for this. + x := bytes.IndexByte(s.b, '\n') + if x < 0 { + // A header name should always at some point be followed by a \n + // even if it's the one that terminates the header block. + s.err = errNeedMore + return false + } + if x < n { + // There was a \n before the : + s.err = errInvalidName + return false + } + } + if n < 0 { + s.err = errNeedMore + return false + } + s.key = s.b[:n] + normalizeHeaderKey(s.key, s.disableNormalizing) + n++ + for len(s.b) > n && s.b[n] == ' ' { + n++ + // the newline index is a relative index, and lines below trimed `s.b` by `n`, + // so the relative newline index also shifted forward. it's safe to decrease + // to a minus value, it means it's invalid, and will find the newline again. + s.nextNewLine-- + } + s.hLen += n + s.b = s.b[n:] + if s.nextNewLine >= 0 { + n = s.nextNewLine + s.nextNewLine = -1 + } else { + n = bytes.IndexByte(s.b, '\n') + } + if n < 0 { + s.err = errNeedMore + return false + } + isMultiLineValue := false + for { + if n+1 >= len(s.b) { + break + } + if s.b[n+1] != ' ' && s.b[n+1] != '\t' { + break + } + d := bytes.IndexByte(s.b[n+1:], '\n') + if d <= 0 { + break + } else if d == 1 && s.b[n+1] == '\r' { + break + } + e := n + d + 1 + if c := bytes.IndexByte(s.b[n+1:e], ':'); c >= 0 { + s.nextColon = c + s.nextNewLine = d - c - 1 + break + } + isMultiLineValue = true + n = e + } + if n >= len(s.b) { + s.err = errNeedMore + return false + } + oldB := s.b + s.value = s.b[:n] + s.hLen += n + 1 + s.b = s.b[n+1:] + + if n > 0 && s.value[n-1] == '\r' { + n-- + } + for n > 0 && s.value[n-1] == ' ' { + n-- + } + s.value = s.value[:n] + if isMultiLineValue { + s.value, s.b, s.hLen = normalizeHeaderValue(s.value, oldB, s.hLen) + } + return true +} + +type headerValueScanner struct { + b []byte + value []byte +} + +func (s *headerValueScanner) next() bool { + b := s.b + if len(b) == 0 { + return false + } + n := bytes.IndexByte(b, ',') + if n < 0 { + s.value = stripSpace(b) + s.b = b[len(b):] + return true + } + s.value = stripSpace(b[:n]) + s.b = b[n+1:] + return true +} + +func stripSpace(b []byte) []byte { + for len(b) > 0 && b[0] == ' ' { + b = b[1:] + } + for len(b) > 0 && b[len(b)-1] == ' ' { + b = b[:len(b)-1] + } + return b +} + +func hasHeaderValue(s, value []byte) bool { + var vs headerValueScanner + vs.b = s + for vs.next() { + if caseInsensitiveCompare(vs.value, value) { + return true + } + } + return false +} + +func nextLine(b []byte) ([]byte, []byte, error) { + nNext := bytes.IndexByte(b, '\n') + if nNext < 0 { + return nil, nil, errNeedMore + } + n := nNext + if n > 0 && b[n-1] == '\r' { + n-- + } + return b[:n], b[nNext+1:], nil +} + +func initHeaderKV(kv *argsKV, key, value string, disableNormalizing bool) { + kv.key = getHeaderKeyBytes(kv, key, disableNormalizing) + kv.value = append(kv.value[:0], value...) +} + +func getHeaderKeyBytes(kv *argsKV, key string, disableNormalizing bool) []byte { + kv.key = append(kv.key[:0], key...) + normalizeHeaderKey(kv.key, disableNormalizing) + return kv.key +} + +func normalizeHeaderValue(ov, ob []byte, headerLength int) (nv, nb []byte, nhl int) { + nv = ov + length := len(ov) + if length <= 0 { + return + } + write := 0 + shrunk := 0 + lineStart := false + for read := 0; read < length; read++ { + c := ov[read] + if c == '\r' || c == '\n' { + shrunk++ + if c == '\n' { + lineStart = true + } + continue + } else if lineStart && c == '\t' { + c = ' ' + } else { + lineStart = false + } + nv[write] = c + write++ + } + + nv = nv[:write] + copy(ob[write:], ob[write+shrunk:]) + + // Check if we need to skip \r\n or just \n + skip := 0 + if ob[write] == '\r' { + if ob[write+1] == '\n' { + skip += 2 + } else { + skip++ + } + } else if ob[write] == '\n' { + skip++ + } + + nb = ob[write+skip : len(ob)-shrunk] + nhl = headerLength - shrunk + return +} + +func normalizeHeaderKey(b []byte, disableNormalizing bool) { + if disableNormalizing { + return + } + + n := len(b) + if n == 0 { + return + } + + b[0] = toUpperTable[b[0]] + for i := 1; i < n; i++ { + p := &b[i] + if *p == '-' { + i++ + if i < n { + b[i] = toUpperTable[b[i]] + } + continue + } + *p = toLowerTable[*p] + } +} + +// AppendNormalizedHeaderKey appends normalized header key (name) to dst +// and returns the resulting dst. +// +// Normalized header key starts with uppercase letter. The first letters +// after dashes are also uppercased. All the other letters are lowercased. +// Examples: +// +// * coNTENT-TYPe -> Content-Type +// * HOST -> Host +// * foo-bar-baz -> Foo-Bar-Baz +func AppendNormalizedHeaderKey(dst []byte, key string) []byte { + dst = append(dst, key...) + normalizeHeaderKey(dst[len(dst)-len(key):], false) + return dst +} + +// AppendNormalizedHeaderKeyBytes appends normalized header key (name) to dst +// and returns the resulting dst. +// +// Normalized header key starts with uppercase letter. The first letters +// after dashes are also uppercased. All the other letters are lowercased. +// Examples: +// +// * coNTENT-TYPe -> Content-Type +// * HOST -> Host +// * foo-bar-baz -> Foo-Bar-Baz +func AppendNormalizedHeaderKeyBytes(dst, key []byte) []byte { + return AppendNormalizedHeaderKey(dst, b2s(key)) +} + +var ( + errNeedMore = errors.New("need more data: cannot find trailing lf") + errInvalidName = errors.New("invalid header name") + errSmallBuffer = errors.New("small read buffer. Increase ReadBufferSize") +) + +type errNothingRead struct { + error +} + +// ErrSmallBuffer is returned when the provided buffer size is too small +// for reading request and/or response headers. +// +// ReadBufferSize value from Server or clients should reduce the number +// of such errors. +type ErrSmallBuffer struct { + error +} + +func mustPeekBuffered(r *bufio.Reader) []byte { + buf, err := r.Peek(r.Buffered()) + if len(buf) == 0 || err != nil { + panic(fmt.Sprintf("bufio.Reader.Peek() returned unexpected data (%q, %v)", buf, err)) + } + return buf +} + +func mustDiscard(r *bufio.Reader, n int) { + if _, err := r.Discard(n); err != nil { + panic(fmt.Sprintf("bufio.Reader.Discard(%d) failed: %s", n, err)) + } +} diff --git a/vendor/github.com/valyala/fasthttp/headers.go b/vendor/github.com/valyala/fasthttp/headers.go new file mode 100644 index 000000000..378dfec8f --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/headers.go @@ -0,0 +1,164 @@ +package fasthttp + +// Headers +const ( + // Authentication + HeaderAuthorization = "Authorization" + HeaderProxyAuthenticate = "Proxy-Authenticate" + HeaderProxyAuthorization = "Proxy-Authorization" + HeaderWWWAuthenticate = "WWW-Authenticate" + + // Caching + HeaderAge = "Age" + HeaderCacheControl = "Cache-Control" + HeaderClearSiteData = "Clear-Site-Data" + HeaderExpires = "Expires" + HeaderPragma = "Pragma" + HeaderWarning = "Warning" + + // Client hints + HeaderAcceptCH = "Accept-CH" + HeaderAcceptCHLifetime = "Accept-CH-Lifetime" + HeaderContentDPR = "Content-DPR" + HeaderDPR = "DPR" + HeaderEarlyData = "Early-Data" + HeaderSaveData = "Save-Data" + HeaderViewportWidth = "Viewport-Width" + HeaderWidth = "Width" + + // Conditionals + HeaderETag = "ETag" + HeaderIfMatch = "If-Match" + HeaderIfModifiedSince = "If-Modified-Since" + HeaderIfNoneMatch = "If-None-Match" + HeaderIfUnmodifiedSince = "If-Unmodified-Since" + HeaderLastModified = "Last-Modified" + HeaderVary = "Vary" + + // Connection management + HeaderConnection = "Connection" + HeaderKeepAlive = "Keep-Alive" + + // Content negotiation + HeaderAccept = "Accept" + HeaderAcceptCharset = "Accept-Charset" + HeaderAcceptEncoding = "Accept-Encoding" + HeaderAcceptLanguage = "Accept-Language" + + // Controls + HeaderCookie = "Cookie" + HeaderExpect = "Expect" + HeaderMaxForwards = "Max-Forwards" + HeaderSetCookie = "Set-Cookie" + + // CORS + HeaderAccessControlAllowCredentials = "Access-Control-Allow-Credentials" + HeaderAccessControlAllowHeaders = "Access-Control-Allow-Headers" + HeaderAccessControlAllowMethods = "Access-Control-Allow-Methods" + HeaderAccessControlAllowOrigin = "Access-Control-Allow-Origin" + HeaderAccessControlExposeHeaders = "Access-Control-Expose-Headers" + HeaderAccessControlMaxAge = "Access-Control-Max-Age" + HeaderAccessControlRequestHeaders = "Access-Control-Request-Headers" + HeaderAccessControlRequestMethod = "Access-Control-Request-Method" + HeaderOrigin = "Origin" + HeaderTimingAllowOrigin = "Timing-Allow-Origin" + HeaderXPermittedCrossDomainPolicies = "X-Permitted-Cross-Domain-Policies" + + // Do Not Track + HeaderDNT = "DNT" + HeaderTk = "Tk" + + // Downloads + HeaderContentDisposition = "Content-Disposition" + + // Message body information + HeaderContentEncoding = "Content-Encoding" + HeaderContentLanguage = "Content-Language" + HeaderContentLength = "Content-Length" + HeaderContentLocation = "Content-Location" + HeaderContentType = "Content-Type" + + // Proxies + HeaderForwarded = "Forwarded" + HeaderVia = "Via" + HeaderXForwardedFor = "X-Forwarded-For" + HeaderXForwardedHost = "X-Forwarded-Host" + HeaderXForwardedProto = "X-Forwarded-Proto" + + // Redirects + HeaderLocation = "Location" + + // Request context + HeaderFrom = "From" + HeaderHost = "Host" + HeaderReferer = "Referer" + HeaderReferrerPolicy = "Referrer-Policy" + HeaderUserAgent = "User-Agent" + + // Response context + HeaderAllow = "Allow" + HeaderServer = "Server" + + // Range requests + HeaderAcceptRanges = "Accept-Ranges" + HeaderContentRange = "Content-Range" + HeaderIfRange = "If-Range" + HeaderRange = "Range" + + // Security + HeaderContentSecurityPolicy = "Content-Security-Policy" + HeaderContentSecurityPolicyReportOnly = "Content-Security-Policy-Report-Only" + HeaderCrossOriginResourcePolicy = "Cross-Origin-Resource-Policy" + HeaderExpectCT = "Expect-CT" + HeaderFeaturePolicy = "Feature-Policy" + HeaderPublicKeyPins = "Public-Key-Pins" + HeaderPublicKeyPinsReportOnly = "Public-Key-Pins-Report-Only" + HeaderStrictTransportSecurity = "Strict-Transport-Security" + HeaderUpgradeInsecureRequests = "Upgrade-Insecure-Requests" + HeaderXContentTypeOptions = "X-Content-Type-Options" + HeaderXDownloadOptions = "X-Download-Options" + HeaderXFrameOptions = "X-Frame-Options" + HeaderXPoweredBy = "X-Powered-By" + HeaderXXSSProtection = "X-XSS-Protection" + + // Server-sent event + HeaderLastEventID = "Last-Event-ID" + HeaderNEL = "NEL" + HeaderPingFrom = "Ping-From" + HeaderPingTo = "Ping-To" + HeaderReportTo = "Report-To" + + // Transfer coding + HeaderTE = "TE" + HeaderTrailer = "Trailer" + HeaderTransferEncoding = "Transfer-Encoding" + + // WebSockets + HeaderSecWebSocketAccept = "Sec-WebSocket-Accept" + HeaderSecWebSocketExtensions = "Sec-WebSocket-Extensions" + HeaderSecWebSocketKey = "Sec-WebSocket-Key" + HeaderSecWebSocketProtocol = "Sec-WebSocket-Protocol" + HeaderSecWebSocketVersion = "Sec-WebSocket-Version" + + // Other + HeaderAcceptPatch = "Accept-Patch" + HeaderAcceptPushPolicy = "Accept-Push-Policy" + HeaderAcceptSignature = "Accept-Signature" + HeaderAltSvc = "Alt-Svc" + HeaderDate = "Date" + HeaderIndex = "Index" + HeaderLargeAllocation = "Large-Allocation" + HeaderLink = "Link" + HeaderPushPolicy = "Push-Policy" + HeaderRetryAfter = "Retry-After" + HeaderServerTiming = "Server-Timing" + HeaderSignature = "Signature" + HeaderSignedHeaders = "Signed-Headers" + HeaderSourceMap = "SourceMap" + HeaderUpgrade = "Upgrade" + HeaderXDNSPrefetchControl = "X-DNS-Prefetch-Control" + HeaderXPingback = "X-Pingback" + HeaderXRequestedWith = "X-Requested-With" + HeaderXRobotsTag = "X-Robots-Tag" + HeaderXUACompatible = "X-UA-Compatible" +) diff --git a/vendor/github.com/valyala/fasthttp/http.go b/vendor/github.com/valyala/fasthttp/http.go new file mode 100644 index 000000000..9b1bcc02f --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/http.go @@ -0,0 +1,1844 @@ +package fasthttp + +import ( + "bufio" + "bytes" + "encoding/base64" + "errors" + "fmt" + "io" + "mime/multipart" + "net" + "os" + "sync" + + "github.com/valyala/bytebufferpool" +) + +// Request represents HTTP request. +// +// It is forbidden copying Request instances. Create new instances +// and use CopyTo instead. +// +// Request instance MUST NOT be used from concurrently running goroutines. +type Request struct { + noCopy noCopy //nolint:unused,structcheck + + // Request header + // + // Copying Header by value is forbidden. Use pointer to Header instead. + Header RequestHeader + + uri URI + postArgs Args + + bodyStream io.Reader + w requestBodyWriter + body *bytebufferpool.ByteBuffer + + multipartForm *multipart.Form + multipartFormBoundary string + + // Group bool members in order to reduce Request object size. + parsedURI bool + parsedPostArgs bool + + keepBodyBuffer bool + + isTLS bool + + // To detect scheme changes in redirects + schemaUpdate bool +} + +// Response represents HTTP response. +// +// It is forbidden copying Response instances. Create new instances +// and use CopyTo instead. +// +// Response instance MUST NOT be used from concurrently running goroutines. +type Response struct { + noCopy noCopy //nolint:unused,structcheck + + // Response header + // + // Copying Header by value is forbidden. Use pointer to Header instead. + Header ResponseHeader + + // Flush headers as soon as possible without waiting for first body bytes. + // Relevant for bodyStream only. + ImmediateHeaderFlush bool + + bodyStream io.Reader + w responseBodyWriter + body *bytebufferpool.ByteBuffer + bodyRaw []byte + + // Response.Read() skips reading body if set to true. + // Use it for reading HEAD responses. + // + // Response.Write() skips writing body if set to true. + // Use it for writing HEAD responses. + SkipBody bool + + keepBodyBuffer bool + + // Remote TCPAddr from concurrently net.Conn + raddr net.Addr + // Local TCPAddr from concurrently net.Conn + laddr net.Addr +} + +// SetHost sets host for the request. +func (req *Request) SetHost(host string) { + req.URI().SetHost(host) +} + +// SetHostBytes sets host for the request. +func (req *Request) SetHostBytes(host []byte) { + req.URI().SetHostBytes(host) +} + +// Host returns the host for the given request. +func (req *Request) Host() []byte { + return req.URI().Host() +} + +// SetRequestURI sets RequestURI. +func (req *Request) SetRequestURI(requestURI string) { + req.Header.SetRequestURI(requestURI) + req.parsedURI = false +} + +// SetRequestURIBytes sets RequestURI. +func (req *Request) SetRequestURIBytes(requestURI []byte) { + req.Header.SetRequestURIBytes(requestURI) + req.parsedURI = false +} + +// RequestURI returns request's URI. +func (req *Request) RequestURI() []byte { + if req.parsedURI { + requestURI := req.uri.RequestURI() + req.SetRequestURIBytes(requestURI) + } + return req.Header.RequestURI() +} + +// StatusCode returns response status code. +func (resp *Response) StatusCode() int { + return resp.Header.StatusCode() +} + +// SetStatusCode sets response status code. +func (resp *Response) SetStatusCode(statusCode int) { + resp.Header.SetStatusCode(statusCode) +} + +// ConnectionClose returns true if 'Connection: close' header is set. +func (resp *Response) ConnectionClose() bool { + return resp.Header.ConnectionClose() +} + +// SetConnectionClose sets 'Connection: close' header. +func (resp *Response) SetConnectionClose() { + resp.Header.SetConnectionClose() +} + +// ConnectionClose returns true if 'Connection: close' header is set. +func (req *Request) ConnectionClose() bool { + return req.Header.ConnectionClose() +} + +// SetConnectionClose sets 'Connection: close' header. +func (req *Request) SetConnectionClose() { + req.Header.SetConnectionClose() +} + +// SendFile registers file on the given path to be used as response body +// when Write is called. +// +// Note that SendFile doesn't set Content-Type, so set it yourself +// with Header.SetContentType. +func (resp *Response) SendFile(path string) error { + f, err := os.Open(path) + if err != nil { + return err + } + fileInfo, err := f.Stat() + if err != nil { + f.Close() + return err + } + size64 := fileInfo.Size() + size := int(size64) + if int64(size) != size64 { + size = -1 + } + + resp.Header.SetLastModified(fileInfo.ModTime()) + resp.SetBodyStream(f, size) + return nil +} + +// SetBodyStream sets request body stream and, optionally body size. +// +// If bodySize is >= 0, then the bodyStream must provide exactly bodySize bytes +// before returning io.EOF. +// +// If bodySize < 0, then bodyStream is read until io.EOF. +// +// bodyStream.Close() is called after finishing reading all body data +// if it implements io.Closer. +// +// Note that GET and HEAD requests cannot have body. +// +// See also SetBodyStreamWriter. +func (req *Request) SetBodyStream(bodyStream io.Reader, bodySize int) { + req.ResetBody() + req.bodyStream = bodyStream + req.Header.SetContentLength(bodySize) +} + +// SetBodyStream sets response body stream and, optionally body size. +// +// If bodySize is >= 0, then the bodyStream must provide exactly bodySize bytes +// before returning io.EOF. +// +// If bodySize < 0, then bodyStream is read until io.EOF. +// +// bodyStream.Close() is called after finishing reading all body data +// if it implements io.Closer. +// +// See also SetBodyStreamWriter. +func (resp *Response) SetBodyStream(bodyStream io.Reader, bodySize int) { + resp.ResetBody() + resp.bodyStream = bodyStream + resp.Header.SetContentLength(bodySize) +} + +// IsBodyStream returns true if body is set via SetBodyStream* +func (req *Request) IsBodyStream() bool { + return req.bodyStream != nil +} + +// IsBodyStream returns true if body is set via SetBodyStream* +func (resp *Response) IsBodyStream() bool { + return resp.bodyStream != nil +} + +// SetBodyStreamWriter registers the given sw for populating request body. +// +// This function may be used in the following cases: +// +// * if request body is too big (more than 10MB). +// * if request body is streamed from slow external sources. +// * if request body must be streamed to the server in chunks +// (aka `http client push` or `chunked transfer-encoding`). +// +// Note that GET and HEAD requests cannot have body. +// +/// See also SetBodyStream. +func (req *Request) SetBodyStreamWriter(sw StreamWriter) { + sr := NewStreamReader(sw) + req.SetBodyStream(sr, -1) +} + +// SetBodyStreamWriter registers the given sw for populating response body. +// +// This function may be used in the following cases: +// +// * if response body is too big (more than 10MB). +// * if response body is streamed from slow external sources. +// * if response body must be streamed to the client in chunks +// (aka `http server push` or `chunked transfer-encoding`). +// +// See also SetBodyStream. +func (resp *Response) SetBodyStreamWriter(sw StreamWriter) { + sr := NewStreamReader(sw) + resp.SetBodyStream(sr, -1) +} + +// BodyWriter returns writer for populating response body. +// +// If used inside RequestHandler, the returned writer must not be used +// after returning from RequestHandler. Use RequestCtx.Write +// or SetBodyStreamWriter in this case. +func (resp *Response) BodyWriter() io.Writer { + resp.w.r = resp + return &resp.w +} + +// BodyWriter returns writer for populating request body. +func (req *Request) BodyWriter() io.Writer { + req.w.r = req + return &req.w +} + +type responseBodyWriter struct { + r *Response +} + +func (w *responseBodyWriter) Write(p []byte) (int, error) { + w.r.AppendBody(p) + return len(p), nil +} + +type requestBodyWriter struct { + r *Request +} + +func (w *requestBodyWriter) Write(p []byte) (int, error) { + w.r.AppendBody(p) + return len(p), nil +} + +func (resp *Response) parseNetConn(conn net.Conn) { + resp.raddr = conn.RemoteAddr() + resp.laddr = conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. The Addr returned is shared +// by all invocations of RemoteAddr, so do not modify it. +func (resp *Response) RemoteAddr() net.Addr { + return resp.raddr +} + +// LocalAddr returns the local network address. The Addr returned is shared +// by all invocations of LocalAddr, so do not modify it. +func (resp *Response) LocalAddr() net.Addr { + return resp.laddr +} + +// Body returns response body. +// +// The returned body is valid until the response modification. +func (resp *Response) Body() []byte { + if resp.bodyStream != nil { + bodyBuf := resp.bodyBuffer() + bodyBuf.Reset() + _, err := copyZeroAlloc(bodyBuf, resp.bodyStream) + resp.closeBodyStream() //nolint:errcheck + if err != nil { + bodyBuf.SetString(err.Error()) + } + } + return resp.bodyBytes() +} + +func (resp *Response) bodyBytes() []byte { + if resp.bodyRaw != nil { + return resp.bodyRaw + } + if resp.body == nil { + return nil + } + return resp.body.B +} + +func (req *Request) bodyBytes() []byte { + if req.body == nil { + return nil + } + return req.body.B +} + +func (resp *Response) bodyBuffer() *bytebufferpool.ByteBuffer { + if resp.body == nil { + resp.body = responseBodyPool.Get() + } + resp.bodyRaw = nil + return resp.body +} + +func (req *Request) bodyBuffer() *bytebufferpool.ByteBuffer { + if req.body == nil { + req.body = requestBodyPool.Get() + } + return req.body +} + +var ( + responseBodyPool bytebufferpool.Pool + requestBodyPool bytebufferpool.Pool +) + +// BodyGunzip returns un-gzipped body data. +// +// This method may be used if the request header contains +// 'Content-Encoding: gzip' for reading un-gzipped body. +// Use Body for reading gzipped request body. +func (req *Request) BodyGunzip() ([]byte, error) { + return gunzipData(req.Body()) +} + +// BodyGunzip returns un-gzipped body data. +// +// This method may be used if the response header contains +// 'Content-Encoding: gzip' for reading un-gzipped body. +// Use Body for reading gzipped response body. +func (resp *Response) BodyGunzip() ([]byte, error) { + return gunzipData(resp.Body()) +} + +func gunzipData(p []byte) ([]byte, error) { + var bb bytebufferpool.ByteBuffer + _, err := WriteGunzip(&bb, p) + if err != nil { + return nil, err + } + return bb.B, nil +} + +// BodyInflate returns inflated body data. +// +// This method may be used if the response header contains +// 'Content-Encoding: deflate' for reading inflated request body. +// Use Body for reading deflated request body. +func (req *Request) BodyInflate() ([]byte, error) { + return inflateData(req.Body()) +} + +// BodyInflate returns inflated body data. +// +// This method may be used if the response header contains +// 'Content-Encoding: deflate' for reading inflated response body. +// Use Body for reading deflated response body. +func (resp *Response) BodyInflate() ([]byte, error) { + return inflateData(resp.Body()) +} + +func inflateData(p []byte) ([]byte, error) { + var bb bytebufferpool.ByteBuffer + _, err := WriteInflate(&bb, p) + if err != nil { + return nil, err + } + return bb.B, nil +} + +// BodyWriteTo writes request body to w. +func (req *Request) BodyWriteTo(w io.Writer) error { + if req.bodyStream != nil { + _, err := copyZeroAlloc(w, req.bodyStream) + req.closeBodyStream() //nolint:errcheck + return err + } + if req.onlyMultipartForm() { + return WriteMultipartForm(w, req.multipartForm, req.multipartFormBoundary) + } + _, err := w.Write(req.bodyBytes()) + return err +} + +// BodyWriteTo writes response body to w. +func (resp *Response) BodyWriteTo(w io.Writer) error { + if resp.bodyStream != nil { + _, err := copyZeroAlloc(w, resp.bodyStream) + resp.closeBodyStream() //nolint:errcheck + return err + } + _, err := w.Write(resp.bodyBytes()) + return err +} + +// AppendBody appends p to response body. +// +// It is safe re-using p after the function returns. +func (resp *Response) AppendBody(p []byte) { + resp.AppendBodyString(b2s(p)) +} + +// AppendBodyString appends s to response body. +func (resp *Response) AppendBodyString(s string) { + resp.closeBodyStream() //nolint:errcheck + resp.bodyBuffer().WriteString(s) //nolint:errcheck +} + +// SetBody sets response body. +// +// It is safe re-using body argument after the function returns. +func (resp *Response) SetBody(body []byte) { + resp.SetBodyString(b2s(body)) +} + +// SetBodyString sets response body. +func (resp *Response) SetBodyString(body string) { + resp.closeBodyStream() //nolint:errcheck + bodyBuf := resp.bodyBuffer() + bodyBuf.Reset() + bodyBuf.WriteString(body) //nolint:errcheck +} + +// ResetBody resets response body. +func (resp *Response) ResetBody() { + resp.bodyRaw = nil + resp.closeBodyStream() //nolint:errcheck + if resp.body != nil { + if resp.keepBodyBuffer { + resp.body.Reset() + } else { + responseBodyPool.Put(resp.body) + resp.body = nil + } + } +} + +// SetBodyRaw sets response body, but without copying it. +// +// From this point onward the body argument must not be changed. +func (resp *Response) SetBodyRaw(body []byte) { + resp.ResetBody() + resp.bodyRaw = body +} + +// ReleaseBody retires the response body if it is greater than "size" bytes. +// +// This permits GC to reclaim the large buffer. If used, must be before +// ReleaseResponse. +// +// Use this method only if you really understand how it works. +// The majority of workloads don't need this method. +func (resp *Response) ReleaseBody(size int) { + resp.bodyRaw = nil + if cap(resp.body.B) > size { + resp.closeBodyStream() //nolint:errcheck + resp.body = nil + } +} + +// ReleaseBody retires the request body if it is greater than "size" bytes. +// +// This permits GC to reclaim the large buffer. If used, must be before +// ReleaseRequest. +// +// Use this method only if you really understand how it works. +// The majority of workloads don't need this method. +func (req *Request) ReleaseBody(size int) { + if cap(req.body.B) > size { + req.closeBodyStream() //nolint:errcheck + req.body = nil + } +} + +// SwapBody swaps response body with the given body and returns +// the previous response body. +// +// It is forbidden to use the body passed to SwapBody after +// the function returns. +func (resp *Response) SwapBody(body []byte) []byte { + bb := resp.bodyBuffer() + + if resp.bodyStream != nil { + bb.Reset() + _, err := copyZeroAlloc(bb, resp.bodyStream) + resp.closeBodyStream() //nolint:errcheck + if err != nil { + bb.Reset() + bb.SetString(err.Error()) + } + } + + resp.bodyRaw = nil + + oldBody := bb.B + bb.B = body + return oldBody +} + +// SwapBody swaps request body with the given body and returns +// the previous request body. +// +// It is forbidden to use the body passed to SwapBody after +// the function returns. +func (req *Request) SwapBody(body []byte) []byte { + bb := req.bodyBuffer() + + if req.bodyStream != nil { + bb.Reset() + _, err := copyZeroAlloc(bb, req.bodyStream) + req.closeBodyStream() //nolint:errcheck + if err != nil { + bb.Reset() + bb.SetString(err.Error()) + } + } + + oldBody := bb.B + bb.B = body + return oldBody +} + +// Body returns request body. +// +// The returned body is valid until the request modification. +func (req *Request) Body() []byte { + if req.bodyStream != nil { + bodyBuf := req.bodyBuffer() + bodyBuf.Reset() + _, err := copyZeroAlloc(bodyBuf, req.bodyStream) + req.closeBodyStream() //nolint:errcheck + if err != nil { + bodyBuf.SetString(err.Error()) + } + } else if req.onlyMultipartForm() { + body, err := marshalMultipartForm(req.multipartForm, req.multipartFormBoundary) + if err != nil { + return []byte(err.Error()) + } + return body + } + return req.bodyBytes() +} + +// AppendBody appends p to request body. +// +// It is safe re-using p after the function returns. +func (req *Request) AppendBody(p []byte) { + req.AppendBodyString(b2s(p)) +} + +// AppendBodyString appends s to request body. +func (req *Request) AppendBodyString(s string) { + req.RemoveMultipartFormFiles() + req.closeBodyStream() //nolint:errcheck + req.bodyBuffer().WriteString(s) //nolint:errcheck +} + +// SetBody sets request body. +// +// It is safe re-using body argument after the function returns. +func (req *Request) SetBody(body []byte) { + req.SetBodyString(b2s(body)) +} + +// SetBodyString sets request body. +func (req *Request) SetBodyString(body string) { + req.RemoveMultipartFormFiles() + req.closeBodyStream() //nolint:errcheck + req.bodyBuffer().SetString(body) +} + +// ResetBody resets request body. +func (req *Request) ResetBody() { + req.RemoveMultipartFormFiles() + req.closeBodyStream() //nolint:errcheck + if req.body != nil { + if req.keepBodyBuffer { + req.body.Reset() + } else { + requestBodyPool.Put(req.body) + req.body = nil + } + } +} + +// CopyTo copies req contents to dst except of body stream. +func (req *Request) CopyTo(dst *Request) { + req.copyToSkipBody(dst) + if req.body != nil { + dst.bodyBuffer().Set(req.body.B) + } else if dst.body != nil { + dst.body.Reset() + } +} + +func (req *Request) copyToSkipBody(dst *Request) { + dst.Reset() + req.Header.CopyTo(&dst.Header) + + req.uri.CopyTo(&dst.uri) + dst.parsedURI = req.parsedURI + + req.postArgs.CopyTo(&dst.postArgs) + dst.parsedPostArgs = req.parsedPostArgs + dst.isTLS = req.isTLS + + // do not copy multipartForm - it will be automatically + // re-created on the first call to MultipartForm. +} + +// CopyTo copies resp contents to dst except of body stream. +func (resp *Response) CopyTo(dst *Response) { + resp.copyToSkipBody(dst) + if resp.bodyRaw != nil { + dst.bodyRaw = resp.bodyRaw + if dst.body != nil { + dst.body.Reset() + } + } else if resp.body != nil { + dst.bodyBuffer().Set(resp.body.B) + } else if dst.body != nil { + dst.body.Reset() + } +} + +func (resp *Response) copyToSkipBody(dst *Response) { + dst.Reset() + resp.Header.CopyTo(&dst.Header) + dst.SkipBody = resp.SkipBody + dst.raddr = resp.raddr + dst.laddr = resp.laddr +} + +func swapRequestBody(a, b *Request) { + a.body, b.body = b.body, a.body + a.bodyStream, b.bodyStream = b.bodyStream, a.bodyStream +} + +func swapResponseBody(a, b *Response) { + a.body, b.body = b.body, a.body + a.bodyRaw, b.bodyRaw = b.bodyRaw, a.bodyRaw + a.bodyStream, b.bodyStream = b.bodyStream, a.bodyStream +} + +// URI returns request URI +func (req *Request) URI() *URI { + req.parseURI() + return &req.uri +} + +func (req *Request) parseURI() { + if req.parsedURI { + return + } + req.parsedURI = true + + req.uri.parse(req.Header.Host(), req.Header.RequestURI(), req.isTLS) +} + +// PostArgs returns POST arguments. +func (req *Request) PostArgs() *Args { + req.parsePostArgs() + return &req.postArgs +} + +func (req *Request) parsePostArgs() { + if req.parsedPostArgs { + return + } + req.parsedPostArgs = true + + if !bytes.HasPrefix(req.Header.ContentType(), strPostArgsContentType) { + return + } + req.postArgs.ParseBytes(req.bodyBytes()) +} + +// ErrNoMultipartForm means that the request's Content-Type +// isn't 'multipart/form-data'. +var ErrNoMultipartForm = errors.New("request has no multipart/form-data Content-Type") + +// MultipartForm returns requests's multipart form. +// +// Returns ErrNoMultipartForm if request's Content-Type +// isn't 'multipart/form-data'. +// +// RemoveMultipartFormFiles must be called after returned multipart form +// is processed. +func (req *Request) MultipartForm() (*multipart.Form, error) { + if req.multipartForm != nil { + return req.multipartForm, nil + } + + req.multipartFormBoundary = string(req.Header.MultipartFormBoundary()) + if len(req.multipartFormBoundary) == 0 { + return nil, ErrNoMultipartForm + } + + ce := req.Header.peek(strContentEncoding) + body := req.bodyBytes() + if bytes.Equal(ce, strGzip) { + // Do not care about memory usage here. + var err error + if body, err = AppendGunzipBytes(nil, body); err != nil { + return nil, fmt.Errorf("cannot gunzip request body: %s", err) + } + } else if len(ce) > 0 { + return nil, fmt.Errorf("unsupported Content-Encoding: %q", ce) + } + + f, err := readMultipartForm(bytes.NewReader(body), req.multipartFormBoundary, len(body), len(body)) + if err != nil { + return nil, err + } + req.multipartForm = f + return f, nil +} + +func marshalMultipartForm(f *multipart.Form, boundary string) ([]byte, error) { + var buf bytebufferpool.ByteBuffer + if err := WriteMultipartForm(&buf, f, boundary); err != nil { + return nil, err + } + return buf.B, nil +} + +// WriteMultipartForm writes the given multipart form f with the given +// boundary to w. +func WriteMultipartForm(w io.Writer, f *multipart.Form, boundary string) error { + // Do not care about memory allocations here, since multipart + // form processing is slow. + if len(boundary) == 0 { + panic("BUG: form boundary cannot be empty") + } + + mw := multipart.NewWriter(w) + if err := mw.SetBoundary(boundary); err != nil { + return fmt.Errorf("cannot use form boundary %q: %s", boundary, err) + } + + // marshal values + for k, vv := range f.Value { + for _, v := range vv { + if err := mw.WriteField(k, v); err != nil { + return fmt.Errorf("cannot write form field %q value %q: %s", k, v, err) + } + } + } + + // marshal files + for k, fvv := range f.File { + for _, fv := range fvv { + vw, err := mw.CreatePart(fv.Header) + if err != nil { + return fmt.Errorf("cannot create form file %q (%q): %s", k, fv.Filename, err) + } + fh, err := fv.Open() + if err != nil { + return fmt.Errorf("cannot open form file %q (%q): %s", k, fv.Filename, err) + } + if _, err = copyZeroAlloc(vw, fh); err != nil { + return fmt.Errorf("error when copying form file %q (%q): %s", k, fv.Filename, err) + } + if err = fh.Close(); err != nil { + return fmt.Errorf("cannot close form file %q (%q): %s", k, fv.Filename, err) + } + } + } + + if err := mw.Close(); err != nil { + return fmt.Errorf("error when closing multipart form writer: %s", err) + } + + return nil +} + +func readMultipartForm(r io.Reader, boundary string, size, maxInMemoryFileSize int) (*multipart.Form, error) { + // Do not care about memory allocations here, since they are tiny + // compared to multipart data (aka multi-MB files) usually sent + // in multipart/form-data requests. + + if size <= 0 { + return nil, fmt.Errorf("form size must be greater than 0. Given %d", size) + } + lr := io.LimitReader(r, int64(size)) + mr := multipart.NewReader(lr, boundary) + f, err := mr.ReadForm(int64(maxInMemoryFileSize)) + if err != nil { + return nil, fmt.Errorf("cannot read multipart/form-data body: %s", err) + } + return f, nil +} + +// Reset clears request contents. +func (req *Request) Reset() { + req.Header.Reset() + req.resetSkipHeader() +} + +func (req *Request) resetSkipHeader() { + req.ResetBody() + req.uri.Reset() + req.parsedURI = false + req.postArgs.Reset() + req.parsedPostArgs = false + req.isTLS = false +} + +// RemoveMultipartFormFiles removes multipart/form-data temporary files +// associated with the request. +func (req *Request) RemoveMultipartFormFiles() { + if req.multipartForm != nil { + // Do not check for error, since these files may be deleted or moved + // to new places by user code. + req.multipartForm.RemoveAll() //nolint:errcheck + req.multipartForm = nil + } + req.multipartFormBoundary = "" +} + +// Reset clears response contents. +func (resp *Response) Reset() { + resp.Header.Reset() + resp.resetSkipHeader() + resp.SkipBody = false + resp.raddr = nil + resp.laddr = nil + resp.ImmediateHeaderFlush = false +} + +func (resp *Response) resetSkipHeader() { + resp.ResetBody() +} + +// Read reads request (including body) from the given r. +// +// RemoveMultipartFormFiles or Reset must be called after +// reading multipart/form-data request in order to delete temporarily +// uploaded files. +// +// If MayContinue returns true, the caller must: +// +// - Either send StatusExpectationFailed response if request headers don't +// satisfy the caller. +// - Or send StatusContinue response before reading request body +// with ContinueReadBody. +// - Or close the connection. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (req *Request) Read(r *bufio.Reader) error { + return req.ReadLimitBody(r, 0) +} + +const defaultMaxInMemoryFileSize = 16 * 1024 * 1024 + +// ErrGetOnly is returned when server expects only GET requests, +// but some other type of request came (Server.GetOnly option is true). +var ErrGetOnly = errors.New("non-GET request received") + +// ReadLimitBody reads request from the given r, limiting the body size. +// +// If maxBodySize > 0 and the body size exceeds maxBodySize, +// then ErrBodyTooLarge is returned. +// +// RemoveMultipartFormFiles or Reset must be called after +// reading multipart/form-data request in order to delete temporarily +// uploaded files. +// +// If MayContinue returns true, the caller must: +// +// - Either send StatusExpectationFailed response if request headers don't +// satisfy the caller. +// - Or send StatusContinue response before reading request body +// with ContinueReadBody. +// - Or close the connection. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (req *Request) ReadLimitBody(r *bufio.Reader, maxBodySize int) error { + req.resetSkipHeader() + if err := req.Header.Read(r); err != nil { + return err + } + + return req.readLimitBody(r, maxBodySize, false) +} + +func (req *Request) readLimitBody(r *bufio.Reader, maxBodySize int, getOnly bool) error { + // Do not reset the request here - the caller must reset it before + // calling this method. + + if getOnly && !req.Header.IsGet() { + return ErrGetOnly + } + + if req.MayContinue() { + // 'Expect: 100-continue' header found. Let the caller deciding + // whether to read request body or + // to return StatusExpectationFailed. + return nil + } + + return req.ContinueReadBody(r, maxBodySize) +} + +// MayContinue returns true if the request contains +// 'Expect: 100-continue' header. +// +// The caller must do one of the following actions if MayContinue returns true: +// +// - Either send StatusExpectationFailed response if request headers don't +// satisfy the caller. +// - Or send StatusContinue response before reading request body +// with ContinueReadBody. +// - Or close the connection. +func (req *Request) MayContinue() bool { + return bytes.Equal(req.Header.peek(strExpect), str100Continue) +} + +// ContinueReadBody reads request body if request header contains +// 'Expect: 100-continue'. +// +// The caller must send StatusContinue response before calling this method. +// +// If maxBodySize > 0 and the body size exceeds maxBodySize, +// then ErrBodyTooLarge is returned. +func (req *Request) ContinueReadBody(r *bufio.Reader, maxBodySize int) error { + var err error + contentLength := req.Header.realContentLength() + if contentLength > 0 { + if maxBodySize > 0 && contentLength > maxBodySize { + return ErrBodyTooLarge + } + + // Pre-read multipart form data of known length. + // This way we limit memory usage for large file uploads, since their contents + // is streamed into temporary files if file size exceeds defaultMaxInMemoryFileSize. + req.multipartFormBoundary = string(req.Header.MultipartFormBoundary()) + if len(req.multipartFormBoundary) > 0 && len(req.Header.peek(strContentEncoding)) == 0 { + req.multipartForm, err = readMultipartForm(r, req.multipartFormBoundary, contentLength, defaultMaxInMemoryFileSize) + if err != nil { + req.Reset() + } + return err + } + } + + if contentLength == -2 { + // identity body has no sense for http requests, since + // the end of body is determined by connection close. + // So just ignore request body for requests without + // 'Content-Length' and 'Transfer-Encoding' headers. + req.Header.SetContentLength(0) + return nil + } + + bodyBuf := req.bodyBuffer() + bodyBuf.Reset() + bodyBuf.B, err = readBody(r, contentLength, maxBodySize, bodyBuf.B) + if err != nil { + req.Reset() + return err + } + req.Header.SetContentLength(len(bodyBuf.B)) + return nil +} + +// Read reads response (including body) from the given r. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (resp *Response) Read(r *bufio.Reader) error { + return resp.ReadLimitBody(r, 0) +} + +// ReadLimitBody reads response from the given r, limiting the body size. +// +// If maxBodySize > 0 and the body size exceeds maxBodySize, +// then ErrBodyTooLarge is returned. +// +// io.EOF is returned if r is closed before reading the first header byte. +func (resp *Response) ReadLimitBody(r *bufio.Reader, maxBodySize int) error { + resp.resetSkipHeader() + err := resp.Header.Read(r) + if err != nil { + return err + } + if resp.Header.StatusCode() == StatusContinue { + // Read the next response according to http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html . + if err = resp.Header.Read(r); err != nil { + return err + } + } + + if !resp.mustSkipBody() { + bodyBuf := resp.bodyBuffer() + bodyBuf.Reset() + bodyBuf.B, err = readBody(r, resp.Header.ContentLength(), maxBodySize, bodyBuf.B) + if err != nil { + return err + } + resp.Header.SetContentLength(len(bodyBuf.B)) + } + return nil +} + +func (resp *Response) mustSkipBody() bool { + return resp.SkipBody || resp.Header.mustSkipContentLength() +} + +var errRequestHostRequired = errors.New("missing required Host header in request") + +// WriteTo writes request to w. It implements io.WriterTo. +func (req *Request) WriteTo(w io.Writer) (int64, error) { + return writeBufio(req, w) +} + +// WriteTo writes response to w. It implements io.WriterTo. +func (resp *Response) WriteTo(w io.Writer) (int64, error) { + return writeBufio(resp, w) +} + +func writeBufio(hw httpWriter, w io.Writer) (int64, error) { + sw := acquireStatsWriter(w) + bw := acquireBufioWriter(sw) + err1 := hw.Write(bw) + err2 := bw.Flush() + releaseBufioWriter(bw) + n := sw.bytesWritten + releaseStatsWriter(sw) + + err := err1 + if err == nil { + err = err2 + } + return n, err +} + +type statsWriter struct { + w io.Writer + bytesWritten int64 +} + +func (w *statsWriter) Write(p []byte) (int, error) { + n, err := w.w.Write(p) + w.bytesWritten += int64(n) + return n, err +} + +func acquireStatsWriter(w io.Writer) *statsWriter { + v := statsWriterPool.Get() + if v == nil { + return &statsWriter{ + w: w, + } + } + sw := v.(*statsWriter) + sw.w = w + return sw +} + +func releaseStatsWriter(sw *statsWriter) { + sw.w = nil + sw.bytesWritten = 0 + statsWriterPool.Put(sw) +} + +var statsWriterPool sync.Pool + +func acquireBufioWriter(w io.Writer) *bufio.Writer { + v := bufioWriterPool.Get() + if v == nil { + return bufio.NewWriter(w) + } + bw := v.(*bufio.Writer) + bw.Reset(w) + return bw +} + +func releaseBufioWriter(bw *bufio.Writer) { + bufioWriterPool.Put(bw) +} + +var bufioWriterPool sync.Pool + +func (req *Request) onlyMultipartForm() bool { + return req.multipartForm != nil && (req.body == nil || len(req.body.B) == 0) +} + +// Write writes request to w. +// +// Write doesn't flush request to w for performance reasons. +// +// See also WriteTo. +func (req *Request) Write(w *bufio.Writer) error { + if len(req.Header.Host()) == 0 || req.parsedURI { + uri := req.URI() + host := uri.Host() + if len(host) == 0 { + return errRequestHostRequired + } + req.Header.SetHostBytes(host) + req.Header.SetRequestURIBytes(uri.RequestURI()) + + if len(uri.username) > 0 { + // RequestHeader.SetBytesKV only uses RequestHeader.bufKV.key + // So we are free to use RequestHeader.bufKV.value as a scratch pad for + // the base64 encoding. + nl := len(uri.username) + len(uri.password) + 1 + nb := nl + len(strBasicSpace) + tl := nb + base64.StdEncoding.EncodedLen(nl) + if tl > cap(req.Header.bufKV.value) { + req.Header.bufKV.value = make([]byte, 0, tl) + } + buf := req.Header.bufKV.value[:0] + buf = append(buf, uri.username...) + buf = append(buf, strColon...) + buf = append(buf, uri.password...) + buf = append(buf, strBasicSpace...) + base64.StdEncoding.Encode(buf[nb:tl], buf[:nl]) + req.Header.SetBytesKV(strAuthorization, buf[nl:tl]) + } + } + + if req.bodyStream != nil { + return req.writeBodyStream(w) + } + + body := req.bodyBytes() + var err error + if req.onlyMultipartForm() { + body, err = marshalMultipartForm(req.multipartForm, req.multipartFormBoundary) + if err != nil { + return fmt.Errorf("error when marshaling multipart form: %s", err) + } + req.Header.SetMultipartFormBoundary(req.multipartFormBoundary) + } + + hasBody := false + if len(body) == 0 { + body = req.postArgs.QueryString() + } + if len(body) != 0 || !req.Header.ignoreBody() { + hasBody = true + req.Header.SetContentLength(len(body)) + } + if err = req.Header.Write(w); err != nil { + return err + } + if hasBody { + _, err = w.Write(body) + } else if len(body) > 0 { + return fmt.Errorf("non-zero body for non-POST request. body=%q", body) + } + return err +} + +// WriteGzip writes response with gzipped body to w. +// +// The method gzips response body and sets 'Content-Encoding: gzip' +// header before writing response to w. +// +// WriteGzip doesn't flush response to w for performance reasons. +func (resp *Response) WriteGzip(w *bufio.Writer) error { + return resp.WriteGzipLevel(w, CompressDefaultCompression) +} + +// WriteGzipLevel writes response with gzipped body to w. +// +// Level is the desired compression level: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// * CompressHuffmanOnly +// +// The method gzips response body and sets 'Content-Encoding: gzip' +// header before writing response to w. +// +// WriteGzipLevel doesn't flush response to w for performance reasons. +func (resp *Response) WriteGzipLevel(w *bufio.Writer, level int) error { + if err := resp.gzipBody(level); err != nil { + return err + } + return resp.Write(w) +} + +// WriteDeflate writes response with deflated body to w. +// +// The method deflates response body and sets 'Content-Encoding: deflate' +// header before writing response to w. +// +// WriteDeflate doesn't flush response to w for performance reasons. +func (resp *Response) WriteDeflate(w *bufio.Writer) error { + return resp.WriteDeflateLevel(w, CompressDefaultCompression) +} + +// WriteDeflateLevel writes response with deflated body to w. +// +// Level is the desired compression level: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// * CompressHuffmanOnly +// +// The method deflates response body and sets 'Content-Encoding: deflate' +// header before writing response to w. +// +// WriteDeflateLevel doesn't flush response to w for performance reasons. +func (resp *Response) WriteDeflateLevel(w *bufio.Writer, level int) error { + if err := resp.deflateBody(level); err != nil { + return err + } + return resp.Write(w) +} + +func (resp *Response) gzipBody(level int) error { + if len(resp.Header.peek(strContentEncoding)) > 0 { + // It looks like the body is already compressed. + // Do not compress it again. + return nil + } + + if !resp.Header.isCompressibleContentType() { + // The content-type cannot be compressed. + return nil + } + + if resp.bodyStream != nil { + // Reset Content-Length to -1, since it is impossible + // to determine body size beforehand of streamed compression. + // For https://github.com/valyala/fasthttp/issues/176 . + resp.Header.SetContentLength(-1) + + // Do not care about memory allocations here, since gzip is slow + // and allocates a lot of memory by itself. + bs := resp.bodyStream + resp.bodyStream = NewStreamReader(func(sw *bufio.Writer) { + zw := acquireStacklessGzipWriter(sw, level) + fw := &flushWriter{ + wf: zw, + bw: sw, + } + copyZeroAlloc(fw, bs) //nolint:errcheck + releaseStacklessGzipWriter(zw, level) + if bsc, ok := bs.(io.Closer); ok { + bsc.Close() + } + }) + } else { + bodyBytes := resp.bodyBytes() + if len(bodyBytes) < minCompressLen { + // There is no sense in spending CPU time on small body compression, + // since there is a very high probability that the compressed + // body size will be bigger than the original body size. + return nil + } + w := responseBodyPool.Get() + w.B = AppendGzipBytesLevel(w.B, bodyBytes, level) + + // Hack: swap resp.body with w. + if resp.body != nil { + responseBodyPool.Put(resp.body) + } + resp.body = w + resp.bodyRaw = nil + } + resp.Header.SetCanonical(strContentEncoding, strGzip) + return nil +} + +func (resp *Response) deflateBody(level int) error { + if len(resp.Header.peek(strContentEncoding)) > 0 { + // It looks like the body is already compressed. + // Do not compress it again. + return nil + } + + if !resp.Header.isCompressibleContentType() { + // The content-type cannot be compressed. + return nil + } + + if resp.bodyStream != nil { + // Reset Content-Length to -1, since it is impossible + // to determine body size beforehand of streamed compression. + // For https://github.com/valyala/fasthttp/issues/176 . + resp.Header.SetContentLength(-1) + + // Do not care about memory allocations here, since flate is slow + // and allocates a lot of memory by itself. + bs := resp.bodyStream + resp.bodyStream = NewStreamReader(func(sw *bufio.Writer) { + zw := acquireStacklessDeflateWriter(sw, level) + fw := &flushWriter{ + wf: zw, + bw: sw, + } + copyZeroAlloc(fw, bs) //nolint:errcheck + releaseStacklessDeflateWriter(zw, level) + if bsc, ok := bs.(io.Closer); ok { + bsc.Close() + } + }) + } else { + bodyBytes := resp.bodyBytes() + if len(bodyBytes) < minCompressLen { + // There is no sense in spending CPU time on small body compression, + // since there is a very high probability that the compressed + // body size will be bigger than the original body size. + return nil + } + w := responseBodyPool.Get() + w.B = AppendDeflateBytesLevel(w.B, bodyBytes, level) + + // Hack: swap resp.body with w. + if resp.body != nil { + responseBodyPool.Put(resp.body) + } + resp.body = w + resp.bodyRaw = nil + } + resp.Header.SetCanonical(strContentEncoding, strDeflate) + return nil +} + +// Bodies with sizes smaller than minCompressLen aren't compressed at all +const minCompressLen = 200 + +type writeFlusher interface { + io.Writer + Flush() error +} + +type flushWriter struct { + wf writeFlusher + bw *bufio.Writer +} + +func (w *flushWriter) Write(p []byte) (int, error) { + n, err := w.wf.Write(p) + if err != nil { + return 0, err + } + if err = w.wf.Flush(); err != nil { + return 0, err + } + if err = w.bw.Flush(); err != nil { + return 0, err + } + return n, nil +} + +// Write writes response to w. +// +// Write doesn't flush response to w for performance reasons. +// +// See also WriteTo. +func (resp *Response) Write(w *bufio.Writer) error { + sendBody := !resp.mustSkipBody() + + if resp.bodyStream != nil { + return resp.writeBodyStream(w, sendBody) + } + + body := resp.bodyBytes() + bodyLen := len(body) + if sendBody || bodyLen > 0 { + resp.Header.SetContentLength(bodyLen) + } + if err := resp.Header.Write(w); err != nil { + return err + } + if sendBody { + if _, err := w.Write(body); err != nil { + return err + } + } + return nil +} + +func (req *Request) writeBodyStream(w *bufio.Writer) error { + var err error + + contentLength := req.Header.ContentLength() + if contentLength < 0 { + lrSize := limitedReaderSize(req.bodyStream) + if lrSize >= 0 { + contentLength = int(lrSize) + if int64(contentLength) != lrSize { + contentLength = -1 + } + if contentLength >= 0 { + req.Header.SetContentLength(contentLength) + } + } + } + if contentLength >= 0 { + if err = req.Header.Write(w); err == nil { + err = writeBodyFixedSize(w, req.bodyStream, int64(contentLength)) + } + } else { + req.Header.SetContentLength(-1) + if err = req.Header.Write(w); err == nil { + err = writeBodyChunked(w, req.bodyStream) + } + } + err1 := req.closeBodyStream() + if err == nil { + err = err1 + } + return err +} + +// ErrBodyStreamWritePanic is returned when panic happens during writing body stream. +type ErrBodyStreamWritePanic struct { + error +} + +func (resp *Response) writeBodyStream(w *bufio.Writer, sendBody bool) (err error) { + defer func() { + if r := recover(); r != nil { + err = &ErrBodyStreamWritePanic{ + error: fmt.Errorf("panic while writing body stream: %+v", r), + } + } + }() + + contentLength := resp.Header.ContentLength() + if contentLength < 0 { + lrSize := limitedReaderSize(resp.bodyStream) + if lrSize >= 0 { + contentLength = int(lrSize) + if int64(contentLength) != lrSize { + contentLength = -1 + } + if contentLength >= 0 { + resp.Header.SetContentLength(contentLength) + } + } + } + if contentLength >= 0 { + if err = resp.Header.Write(w); err == nil && sendBody { + if resp.ImmediateHeaderFlush { + err = w.Flush() + } + if err == nil { + err = writeBodyFixedSize(w, resp.bodyStream, int64(contentLength)) + } + } + } else { + resp.Header.SetContentLength(-1) + if err = resp.Header.Write(w); err == nil && sendBody { + if resp.ImmediateHeaderFlush { + err = w.Flush() + } + if err == nil { + err = writeBodyChunked(w, resp.bodyStream) + } + } + } + err1 := resp.closeBodyStream() + if err == nil { + err = err1 + } + return err +} + +func (req *Request) closeBodyStream() error { + if req.bodyStream == nil { + return nil + } + var err error + if bsc, ok := req.bodyStream.(io.Closer); ok { + err = bsc.Close() + } + req.bodyStream = nil + return err +} + +func (resp *Response) closeBodyStream() error { + if resp.bodyStream == nil { + return nil + } + var err error + if bsc, ok := resp.bodyStream.(io.Closer); ok { + err = bsc.Close() + } + resp.bodyStream = nil + return err +} + +// String returns request representation. +// +// Returns error message instead of request representation on error. +// +// Use Write instead of String for performance-critical code. +func (req *Request) String() string { + return getHTTPString(req) +} + +// String returns response representation. +// +// Returns error message instead of response representation on error. +// +// Use Write instead of String for performance-critical code. +func (resp *Response) String() string { + return getHTTPString(resp) +} + +func getHTTPString(hw httpWriter) string { + w := bytebufferpool.Get() + bw := bufio.NewWriter(w) + if err := hw.Write(bw); err != nil { + return err.Error() + } + if err := bw.Flush(); err != nil { + return err.Error() + } + s := string(w.B) + bytebufferpool.Put(w) + return s +} + +type httpWriter interface { + Write(w *bufio.Writer) error +} + +func writeBodyChunked(w *bufio.Writer, r io.Reader) error { + vbuf := copyBufPool.Get() + buf := vbuf.([]byte) + + var err error + var n int + for { + n, err = r.Read(buf) + if n == 0 { + if err == nil { + panic("BUG: io.Reader returned 0, nil") + } + if err == io.EOF { + if err = writeChunk(w, buf[:0]); err != nil { + break + } + err = nil + } + break + } + if err = writeChunk(w, buf[:n]); err != nil { + break + } + } + + copyBufPool.Put(vbuf) + return err +} + +func limitedReaderSize(r io.Reader) int64 { + lr, ok := r.(*io.LimitedReader) + if !ok { + return -1 + } + return lr.N +} + +func writeBodyFixedSize(w *bufio.Writer, r io.Reader, size int64) error { + if size > maxSmallFileSize { + // w buffer must be empty for triggering + // sendfile path in bufio.Writer.ReadFrom. + if err := w.Flush(); err != nil { + return err + } + } + + // Unwrap a single limited reader for triggering sendfile path + // in net.TCPConn.ReadFrom. + lr, ok := r.(*io.LimitedReader) + if ok { + r = lr.R + } + + n, err := copyZeroAlloc(w, r) + + if ok { + lr.N -= n + } + + if n != size && err == nil { + err = fmt.Errorf("copied %d bytes from body stream instead of %d bytes", n, size) + } + return err +} + +func copyZeroAlloc(w io.Writer, r io.Reader) (int64, error) { + vbuf := copyBufPool.Get() + buf := vbuf.([]byte) + n, err := io.CopyBuffer(w, r, buf) + copyBufPool.Put(vbuf) + return n, err +} + +var copyBufPool = sync.Pool{ + New: func() interface{} { + return make([]byte, 4096) + }, +} + +func writeChunk(w *bufio.Writer, b []byte) error { + n := len(b) + if err := writeHexInt(w, n); err != nil { + return err + } + if _, err := w.Write(strCRLF); err != nil { + return err + } + if _, err := w.Write(b); err != nil { + return err + } + _, err := w.Write(strCRLF) + err1 := w.Flush() + if err == nil { + err = err1 + } + return err +} + +// ErrBodyTooLarge is returned if either request or response body exceeds +// the given limit. +var ErrBodyTooLarge = errors.New("body size exceeds the given limit") + +func readBody(r *bufio.Reader, contentLength int, maxBodySize int, dst []byte) ([]byte, error) { + dst = dst[:0] + if contentLength >= 0 { + if maxBodySize > 0 && contentLength > maxBodySize { + return dst, ErrBodyTooLarge + } + return appendBodyFixedSize(r, dst, contentLength) + } + if contentLength == -1 { + return readBodyChunked(r, maxBodySize, dst) + } + return readBodyIdentity(r, maxBodySize, dst) +} + +func readBodyIdentity(r *bufio.Reader, maxBodySize int, dst []byte) ([]byte, error) { + dst = dst[:cap(dst)] + if len(dst) == 0 { + dst = make([]byte, 1024) + } + offset := 0 + for { + nn, err := r.Read(dst[offset:]) + if nn <= 0 { + if err != nil { + if err == io.EOF { + return dst[:offset], nil + } + return dst[:offset], err + } + panic(fmt.Sprintf("BUG: bufio.Read() returned (%d, nil)", nn)) + } + offset += nn + if maxBodySize > 0 && offset > maxBodySize { + return dst[:offset], ErrBodyTooLarge + } + if len(dst) == offset { + n := round2(2 * offset) + if maxBodySize > 0 && n > maxBodySize { + n = maxBodySize + 1 + } + b := make([]byte, n) + copy(b, dst) + dst = b + } + } +} + +func appendBodyFixedSize(r *bufio.Reader, dst []byte, n int) ([]byte, error) { + if n == 0 { + return dst, nil + } + + offset := len(dst) + dstLen := offset + n + if cap(dst) < dstLen { + b := make([]byte, round2(dstLen)) + copy(b, dst) + dst = b + } + dst = dst[:dstLen] + + for { + nn, err := r.Read(dst[offset:]) + if nn <= 0 { + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return dst[:offset], err + } + panic(fmt.Sprintf("BUG: bufio.Read() returned (%d, nil)", nn)) + } + offset += nn + if offset == dstLen { + return dst, nil + } + } +} + +// ErrBrokenChunk is returned when server receives a broken chunked body (Transfer-Encoding: chunked). +type ErrBrokenChunk struct { + error +} + +func readBodyChunked(r *bufio.Reader, maxBodySize int, dst []byte) ([]byte, error) { + if len(dst) > 0 { + panic("BUG: expected zero-length buffer") + } + + strCRLFLen := len(strCRLF) + for { + chunkSize, err := parseChunkSize(r) + if err != nil { + return dst, err + } + if maxBodySize > 0 && len(dst)+chunkSize > maxBodySize { + return dst, ErrBodyTooLarge + } + dst, err = appendBodyFixedSize(r, dst, chunkSize+strCRLFLen) + if err != nil { + return dst, err + } + if !bytes.Equal(dst[len(dst)-strCRLFLen:], strCRLF) { + return dst, ErrBrokenChunk{ + error: fmt.Errorf("cannot find crlf at the end of chunk"), + } + } + dst = dst[:len(dst)-strCRLFLen] + if chunkSize == 0 { + return dst, nil + } + } +} + +func parseChunkSize(r *bufio.Reader) (int, error) { + n, err := readHexInt(r) + if err != nil { + return -1, err + } + for { + c, err := r.ReadByte() + if err != nil { + return -1, ErrBrokenChunk{ + error: fmt.Errorf("cannot read '\r' char at the end of chunk size: %s", err), + } + } + // Skip any trailing whitespace after chunk size. + if c == ' ' { + continue + } + if c != '\r' { + return -1, ErrBrokenChunk{ + error: fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\r'), + } + } + break + } + c, err := r.ReadByte() + if err != nil { + return -1, ErrBrokenChunk{ + error: fmt.Errorf("cannot read '\n' char at the end of chunk size: %s", err), + } + } + if c != '\n' { + return -1, ErrBrokenChunk{ + error: fmt.Errorf("unexpected char %q at the end of chunk size. Expected %q", c, '\n'), + } + } + return n, nil +} + +func round2(n int) int { + if n <= 0 { + return 0 + } + n-- + x := uint(0) + for n > 0 { + n >>= 1 + x++ + } + return 1 << x +} diff --git a/vendor/github.com/valyala/fasthttp/lbclient.go b/vendor/github.com/valyala/fasthttp/lbclient.go new file mode 100644 index 000000000..46d14b75c --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/lbclient.go @@ -0,0 +1,165 @@ +package fasthttp + +import ( + "sync" + "sync/atomic" + "time" +) + +// BalancingClient is the interface for clients, which may be passed +// to LBClient.Clients. +type BalancingClient interface { + DoDeadline(req *Request, resp *Response, deadline time.Time) error + PendingRequests() int +} + +// LBClient balances requests among available LBClient.Clients. +// +// It has the following features: +// +// - Balances load among available clients using 'least loaded' + 'least total' +// hybrid technique. +// - Dynamically decreases load on unhealthy clients. +// +// It is forbidden copying LBClient instances. Create new instances instead. +// +// It is safe calling LBClient methods from concurrently running goroutines. +type LBClient struct { + noCopy noCopy //nolint:unused,structcheck + + // Clients must contain non-zero clients list. + // Incoming requests are balanced among these clients. + Clients []BalancingClient + + // HealthCheck is a callback called after each request. + // + // The request, response and the error returned by the client + // is passed to HealthCheck, so the callback may determine whether + // the client is healthy. + // + // Load on the current client is decreased if HealthCheck returns false. + // + // By default HealthCheck returns false if err != nil. + HealthCheck func(req *Request, resp *Response, err error) bool + + // Timeout is the request timeout used when calling LBClient.Do. + // + // DefaultLBClientTimeout is used by default. + Timeout time.Duration + + cs []*lbClient + + once sync.Once +} + +// DefaultLBClientTimeout is the default request timeout used by LBClient +// when calling LBClient.Do. +// +// The timeout may be overridden via LBClient.Timeout. +const DefaultLBClientTimeout = time.Second + +// DoDeadline calls DoDeadline on the least loaded client +func (cc *LBClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + return cc.get().DoDeadline(req, resp, deadline) +} + +// DoTimeout calculates deadline and calls DoDeadline on the least loaded client +func (cc *LBClient) DoTimeout(req *Request, resp *Response, timeout time.Duration) error { + deadline := time.Now().Add(timeout) + return cc.get().DoDeadline(req, resp, deadline) +} + +// Do calls calculates deadline using LBClient.Timeout and calls DoDeadline +// on the least loaded client. +func (cc *LBClient) Do(req *Request, resp *Response) error { + timeout := cc.Timeout + if timeout <= 0 { + timeout = DefaultLBClientTimeout + } + return cc.DoTimeout(req, resp, timeout) +} + +func (cc *LBClient) init() { + if len(cc.Clients) == 0 { + panic("BUG: LBClient.Clients cannot be empty") + } + for _, c := range cc.Clients { + cc.cs = append(cc.cs, &lbClient{ + c: c, + healthCheck: cc.HealthCheck, + }) + } +} + +func (cc *LBClient) get() *lbClient { + cc.once.Do(cc.init) + + cs := cc.cs + + minC := cs[0] + minN := minC.PendingRequests() + minT := atomic.LoadUint64(&minC.total) + for _, c := range cs[1:] { + n := c.PendingRequests() + t := atomic.LoadUint64(&c.total) + if n < minN || (n == minN && t < minT) { + minC = c + minN = n + minT = t + } + } + return minC +} + +type lbClient struct { + c BalancingClient + healthCheck func(req *Request, resp *Response, err error) bool + penalty uint32 + + // total amount of requests handled. + total uint64 +} + +func (c *lbClient) DoDeadline(req *Request, resp *Response, deadline time.Time) error { + err := c.c.DoDeadline(req, resp, deadline) + if !c.isHealthy(req, resp, err) && c.incPenalty() { + // Penalize the client returning error, so the next requests + // are routed to another clients. + time.AfterFunc(penaltyDuration, c.decPenalty) + } else { + atomic.AddUint64(&c.total, 1) + } + return err +} + +func (c *lbClient) PendingRequests() int { + n := c.c.PendingRequests() + m := atomic.LoadUint32(&c.penalty) + return n + int(m) +} + +func (c *lbClient) isHealthy(req *Request, resp *Response, err error) bool { + if c.healthCheck == nil { + return err == nil + } + return c.healthCheck(req, resp, err) +} + +func (c *lbClient) incPenalty() bool { + m := atomic.AddUint32(&c.penalty, 1) + if m > maxPenalty { + c.decPenalty() + return false + } + return true +} + +func (c *lbClient) decPenalty() { + atomic.AddUint32(&c.penalty, ^uint32(0)) +} + +const ( + maxPenalty = 300 + + penaltyDuration = 3 * time.Second +) diff --git a/vendor/github.com/valyala/fasthttp/methods.go b/vendor/github.com/valyala/fasthttp/methods.go new file mode 100644 index 000000000..a614584e0 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/methods.go @@ -0,0 +1,14 @@ +package fasthttp + +// HTTP methods were copied from net/http. +const ( + MethodGet = "GET" // RFC 7231, 4.3.1 + MethodHead = "HEAD" // RFC 7231, 4.3.2 + MethodPost = "POST" // RFC 7231, 4.3.3 + MethodPut = "PUT" // RFC 7231, 4.3.4 + MethodPatch = "PATCH" // RFC 5789 + MethodDelete = "DELETE" // RFC 7231, 4.3.5 + MethodConnect = "CONNECT" // RFC 7231, 4.3.6 + MethodOptions = "OPTIONS" // RFC 7231, 4.3.7 + MethodTrace = "TRACE" // RFC 7231, 4.3.8 +) diff --git a/vendor/github.com/valyala/fasthttp/nocopy.go b/vendor/github.com/valyala/fasthttp/nocopy.go new file mode 100644 index 000000000..9664cb06b --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/nocopy.go @@ -0,0 +1,11 @@ +package fasthttp + +// Embed this type into a struct, which mustn't be copied, +// so `go vet` gives a warning if this struct is copied. +// +// See https://github.com/golang/go/issues/8005#issuecomment-190753527 for details. +// and also: https://stackoverflow.com/questions/52494458/nocopy-minimal-example +type noCopy struct{} //nolint:unused + +func (*noCopy) Lock() {} +func (*noCopy) Unlock() {} diff --git a/vendor/github.com/valyala/fasthttp/peripconn.go b/vendor/github.com/valyala/fasthttp/peripconn.go new file mode 100644 index 000000000..afd2a9270 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/peripconn.go @@ -0,0 +1,100 @@ +package fasthttp + +import ( + "fmt" + "net" + "sync" +) + +type perIPConnCounter struct { + pool sync.Pool + lock sync.Mutex + m map[uint32]int +} + +func (cc *perIPConnCounter) Register(ip uint32) int { + cc.lock.Lock() + if cc.m == nil { + cc.m = make(map[uint32]int) + } + n := cc.m[ip] + 1 + cc.m[ip] = n + cc.lock.Unlock() + return n +} + +func (cc *perIPConnCounter) Unregister(ip uint32) { + cc.lock.Lock() + if cc.m == nil { + cc.lock.Unlock() + panic("BUG: perIPConnCounter.Register() wasn't called") + } + n := cc.m[ip] - 1 + if n < 0 { + cc.lock.Unlock() + panic(fmt.Sprintf("BUG: negative per-ip counter=%d for ip=%d", n, ip)) + } + cc.m[ip] = n + cc.lock.Unlock() +} + +type perIPConn struct { + net.Conn + + ip uint32 + perIPConnCounter *perIPConnCounter +} + +func acquirePerIPConn(conn net.Conn, ip uint32, counter *perIPConnCounter) *perIPConn { + v := counter.pool.Get() + if v == nil { + v = &perIPConn{ + perIPConnCounter: counter, + } + } + c := v.(*perIPConn) + c.Conn = conn + c.ip = ip + return c +} + +func releasePerIPConn(c *perIPConn) { + c.Conn = nil + c.perIPConnCounter.pool.Put(c) +} + +func (c *perIPConn) Close() error { + err := c.Conn.Close() + c.perIPConnCounter.Unregister(c.ip) + releasePerIPConn(c) + return err +} + +func getUint32IP(c net.Conn) uint32 { + return ip2uint32(getConnIP4(c)) +} + +func getConnIP4(c net.Conn) net.IP { + addr := c.RemoteAddr() + ipAddr, ok := addr.(*net.TCPAddr) + if !ok { + return net.IPv4zero + } + return ipAddr.IP.To4() +} + +func ip2uint32(ip net.IP) uint32 { + if len(ip) != 4 { + return 0 + } + return uint32(ip[0])<<24 | uint32(ip[1])<<16 | uint32(ip[2])<<8 | uint32(ip[3]) +} + +func uint322ip(ip uint32) net.IP { + b := make([]byte, 4) + b[0] = byte(ip >> 24) + b[1] = byte(ip >> 16) + b[2] = byte(ip >> 8) + b[3] = byte(ip) + return b +} diff --git a/vendor/github.com/valyala/fasthttp/server.go b/vendor/github.com/valyala/fasthttp/server.go new file mode 100644 index 000000000..7b017b24b --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/server.go @@ -0,0 +1,2587 @@ +package fasthttp + +import ( + "bufio" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + "mime/multipart" + "net" + "os" + "strings" + "sync" + "sync/atomic" + "time" +) + +var errNoCertOrKeyProvided = errors.New("cert or key has not provided") + +var ( + // ErrAlreadyServing is returned when calling Serve on a Server + // that is already serving connections. + ErrAlreadyServing = errors.New("Server is already serving connections") +) + +// ServeConn serves HTTP requests from the given connection +// using the given handler. +// +// ServeConn returns nil if all requests from the c are successfully served. +// It returns non-nil error otherwise. +// +// Connection c must immediately propagate all the data passed to Write() +// to the client. Otherwise requests' processing may hang. +// +// ServeConn closes c before returning. +func ServeConn(c net.Conn, handler RequestHandler) error { + v := serverPool.Get() + if v == nil { + v = &Server{} + } + s := v.(*Server) + s.Handler = handler + err := s.ServeConn(c) + s.Handler = nil + serverPool.Put(v) + return err +} + +var serverPool sync.Pool + +// Serve serves incoming connections from the given listener +// using the given handler. +// +// Serve blocks until the given listener returns permanent error. +func Serve(ln net.Listener, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.Serve(ln) +} + +// ServeTLS serves HTTPS requests from the given net.Listener +// using the given handler. +// +// certFile and keyFile are paths to TLS certificate and key files. +func ServeTLS(ln net.Listener, certFile, keyFile string, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ServeTLS(ln, certFile, keyFile) +} + +// ServeTLSEmbed serves HTTPS requests from the given net.Listener +// using the given handler. +// +// certData and keyData must contain valid TLS certificate and key data. +func ServeTLSEmbed(ln net.Listener, certData, keyData []byte, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ServeTLSEmbed(ln, certData, keyData) +} + +// ListenAndServe serves HTTP requests from the given TCP addr +// using the given handler. +func ListenAndServe(addr string, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ListenAndServe(addr) +} + +// ListenAndServeUNIX serves HTTP requests from the given UNIX addr +// using the given handler. +// +// The function deletes existing file at addr before starting serving. +// +// The server sets the given file mode for the UNIX addr. +func ListenAndServeUNIX(addr string, mode os.FileMode, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ListenAndServeUNIX(addr, mode) +} + +// ListenAndServeTLS serves HTTPS requests from the given TCP addr +// using the given handler. +// +// certFile and keyFile are paths to TLS certificate and key files. +func ListenAndServeTLS(addr, certFile, keyFile string, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ListenAndServeTLS(addr, certFile, keyFile) +} + +// ListenAndServeTLSEmbed serves HTTPS requests from the given TCP addr +// using the given handler. +// +// certData and keyData must contain valid TLS certificate and key data. +func ListenAndServeTLSEmbed(addr string, certData, keyData []byte, handler RequestHandler) error { + s := &Server{ + Handler: handler, + } + return s.ListenAndServeTLSEmbed(addr, certData, keyData) +} + +// RequestHandler must process incoming requests. +// +// RequestHandler must call ctx.TimeoutError() before returning +// if it keeps references to ctx and/or its' members after the return. +// Consider wrapping RequestHandler into TimeoutHandler if response time +// must be limited. +type RequestHandler func(ctx *RequestCtx) + +// ServeHandler must process tls.Config.NextProto negotiated requests. +type ServeHandler func(c net.Conn) error + +// Server implements HTTP server. +// +// Default Server settings should satisfy the majority of Server users. +// Adjust Server settings only if you really understand the consequences. +// +// It is forbidden copying Server instances. Create new Server instances +// instead. +// +// It is safe to call Server methods from concurrently running goroutines. +type Server struct { + noCopy noCopy //nolint:unused,structcheck + + // Handler for processing incoming requests. + // + // Take into account that no `panic` recovery is done by `fasthttp` (thus any `panic` will take down the entire server). + // Instead the user should use `recover` to handle these situations. + Handler RequestHandler + + // ErrorHandler for returning a response in case of an error while receiving or parsing the request. + // + // The following is a non-exhaustive list of errors that can be expected as argument: + // * io.EOF + // * io.ErrUnexpectedEOF + // * ErrGetOnly + // * ErrSmallBuffer + // * ErrBodyTooLarge + // * ErrBrokenChunks + ErrorHandler func(ctx *RequestCtx, err error) + + // HeaderReceived is called after receiving the header + // + // non zero RequestConfig field values will overwrite the default configs + HeaderReceived func(header *RequestHeader) RequestConfig + + // Server name for sending in response headers. + // + // Default server name is used if left blank. + Name string + + // The maximum number of concurrent connections the server may serve. + // + // DefaultConcurrency is used if not set. + Concurrency int + + // Whether to disable keep-alive connections. + // + // The server will close all the incoming connections after sending + // the first response to client if this option is set to true. + // + // By default keep-alive connections are enabled. + DisableKeepalive bool + + // Per-connection buffer size for requests' reading. + // This also limits the maximum header size. + // + // Increase this buffer if your clients send multi-KB RequestURIs + // and/or multi-KB headers (for example, BIG cookies). + // + // Default buffer size is used if not set. + ReadBufferSize int + + // Per-connection buffer size for responses' writing. + // + // Default buffer size is used if not set. + WriteBufferSize int + + // ReadTimeout is the amount of time allowed to read + // the full request including body. The connection's read + // deadline is reset when the connection opens, or for + // keep-alive connections after the first byte has been read. + // + // By default request read timeout is unlimited. + ReadTimeout time.Duration + + // WriteTimeout is the maximum duration before timing out + // writes of the response. It is reset after the request handler + // has returned. + // + // By default response write timeout is unlimited. + WriteTimeout time.Duration + + // IdleTimeout is the maximum amount of time to wait for the + // next request when keep-alive is enabled. If IdleTimeout + // is zero, the value of ReadTimeout is used. + IdleTimeout time.Duration + + // Maximum number of concurrent client connections allowed per IP. + // + // By default unlimited number of concurrent connections + // may be established to the server from a single IP address. + MaxConnsPerIP int + + // Maximum number of requests served per connection. + // + // The server closes connection after the last request. + // 'Connection: close' header is added to the last response. + // + // By default unlimited number of requests may be served per connection. + MaxRequestsPerConn int + + // MaxKeepaliveDuration is a no-op and only left here for backwards compatibility. + // Deprecated: Use IdleTimeout instead. + MaxKeepaliveDuration time.Duration + + // Whether to enable tcp keep-alive connections. + // + // Whether the operating system should send tcp keep-alive messages on the tcp connection. + // + // By default tcp keep-alive connections are disabled. + TCPKeepalive bool + + // Period between tcp keep-alive messages. + // + // TCP keep-alive period is determined by operation system by default. + TCPKeepalivePeriod time.Duration + + // Maximum request body size. + // + // The server rejects requests with bodies exceeding this limit. + // + // Request body size is limited by DefaultMaxRequestBodySize by default. + MaxRequestBodySize int + + // Aggressively reduces memory usage at the cost of higher CPU usage + // if set to true. + // + // Try enabling this option only if the server consumes too much memory + // serving mostly idle keep-alive connections. This may reduce memory + // usage by more than 50%. + // + // Aggressive memory usage reduction is disabled by default. + ReduceMemoryUsage bool + + // Rejects all non-GET requests if set to true. + // + // This option is useful as anti-DoS protection for servers + // accepting only GET requests. The request size is limited + // by ReadBufferSize if GetOnly is set. + // + // Server accepts all the requests by default. + GetOnly bool + + // Logs all errors, including the most frequent + // 'connection reset by peer', 'broken pipe' and 'connection timeout' + // errors. Such errors are common in production serving real-world + // clients. + // + // By default the most frequent errors such as + // 'connection reset by peer', 'broken pipe' and 'connection timeout' + // are suppressed in order to limit output log traffic. + LogAllErrors bool + + // Header names are passed as-is without normalization + // if this option is set. + // + // Disabled header names' normalization may be useful only for proxying + // incoming requests to other servers expecting case-sensitive + // header names. See https://github.com/valyala/fasthttp/issues/57 + // for details. + // + // By default request and response header names are normalized, i.e. + // The first letter and the first letters following dashes + // are uppercased, while all the other letters are lowercased. + // Examples: + // + // * HOST -> Host + // * content-type -> Content-Type + // * cONTENT-lenGTH -> Content-Length + DisableHeaderNamesNormalizing bool + + // SleepWhenConcurrencyLimitsExceeded is a duration to be slept of if + // the concurrency limit in exceeded (default [when is 0]: don't sleep + // and accept new connections immidiatelly). + SleepWhenConcurrencyLimitsExceeded time.Duration + + // NoDefaultServerHeader, when set to true, causes the default Server header + // to be excluded from the Response. + // + // The default Server header value is the value of the Name field or an + // internal default value in its absence. With this option set to true, + // the only time a Server header will be sent is if a non-zero length + // value is explicitly provided during a request. + NoDefaultServerHeader bool + + // NoDefaultContentType, when set to true, causes the default Content-Type + // header to be excluded from the Response. + // + // The default Content-Type header value is the internal default value. When + // set to true, the Content-Type will not be present. + NoDefaultContentType bool + + // ConnState specifies an optional callback function that is + // called when a client connection changes state. See the + // ConnState type and associated constants for details. + ConnState func(net.Conn, ConnState) + + // Logger, which is used by RequestCtx.Logger(). + // + // By default standard logger from log package is used. + Logger Logger + + // KeepHijackedConns is an opt-in disable of connection + // close by fasthttp after connections' HijackHandler returns. + // This allows to save goroutines, e.g. when fasthttp used to upgrade + // http connections to WS and connection goes to another handler, + // which will close it when needed. + KeepHijackedConns bool + + tlsConfig *tls.Config + nextProtos map[string]ServeHandler + + concurrency uint32 + concurrencyCh chan struct{} + perIPConnCounter perIPConnCounter + serverName atomic.Value + + ctxPool sync.Pool + readerPool sync.Pool + writerPool sync.Pool + hijackConnPool sync.Pool + + // We need to know our listeners so we can close them in Shutdown(). + ln []net.Listener + + mu sync.Mutex + open int32 + stop int32 + done chan struct{} +} + +// TimeoutHandler creates RequestHandler, which returns StatusRequestTimeout +// error with the given msg to the client if h didn't return during +// the given duration. +// +// The returned handler may return StatusTooManyRequests error with the given +// msg to the client if there are more than Server.Concurrency concurrent +// handlers h are running at the moment. +func TimeoutHandler(h RequestHandler, timeout time.Duration, msg string) RequestHandler { + return TimeoutWithCodeHandler(h, timeout, msg, StatusRequestTimeout) +} + +// TimeoutWithCodeHandler creates RequestHandler, which returns an error with +// the given msg and status code to the client if h didn't return during +// the given duration. +// +// The returned handler may return StatusTooManyRequests error with the given +// msg to the client if there are more than Server.Concurrency concurrent +// handlers h are running at the moment. +func TimeoutWithCodeHandler(h RequestHandler, timeout time.Duration, msg string, statusCode int) RequestHandler { + if timeout <= 0 { + return h + } + + return func(ctx *RequestCtx) { + concurrencyCh := ctx.s.concurrencyCh + select { + case concurrencyCh <- struct{}{}: + default: + ctx.Error(msg, StatusTooManyRequests) + return + } + + ch := ctx.timeoutCh + if ch == nil { + ch = make(chan struct{}, 1) + ctx.timeoutCh = ch + } + go func() { + h(ctx) + ch <- struct{}{} + <-concurrencyCh + }() + ctx.timeoutTimer = initTimer(ctx.timeoutTimer, timeout) + select { + case <-ch: + case <-ctx.timeoutTimer.C: + ctx.TimeoutErrorWithCode(msg, statusCode) + } + stopTimer(ctx.timeoutTimer) + } +} + +//RequestConfig configure the per request deadline and body limits +type RequestConfig struct { + // ReadTimeout is the maximum duration for reading the entire + // request body. + // a zero value means that default values will be honored + ReadTimeout time.Duration + // WriteTimeout is the maximum duration before timing out + // writes of the response. + // a zero value means that default values will be honored + WriteTimeout time.Duration + // Maximum request body size. + // a zero value means that default values will be honored + MaxRequestBodySize int +} + +// CompressHandler returns RequestHandler that transparently compresses +// response body generated by h if the request contains 'gzip' or 'deflate' +// 'Accept-Encoding' header. +func CompressHandler(h RequestHandler) RequestHandler { + return CompressHandlerLevel(h, CompressDefaultCompression) +} + +// CompressHandlerLevel returns RequestHandler that transparently compresses +// response body generated by h if the request contains 'gzip' or 'deflate' +// 'Accept-Encoding' header. +// +// Level is the desired compression level: +// +// * CompressNoCompression +// * CompressBestSpeed +// * CompressBestCompression +// * CompressDefaultCompression +// * CompressHuffmanOnly +func CompressHandlerLevel(h RequestHandler, level int) RequestHandler { + return func(ctx *RequestCtx) { + h(ctx) + if ctx.Request.Header.HasAcceptEncodingBytes(strGzip) { + ctx.Response.gzipBody(level) //nolint:errcheck + } else if ctx.Request.Header.HasAcceptEncodingBytes(strDeflate) { + ctx.Response.deflateBody(level) //nolint:errcheck + } + } +} + +// RequestCtx contains incoming request and manages outgoing response. +// +// It is forbidden copying RequestCtx instances. +// +// RequestHandler should avoid holding references to incoming RequestCtx and/or +// its' members after the return. +// If holding RequestCtx references after the return is unavoidable +// (for instance, ctx is passed to a separate goroutine and ctx lifetime cannot +// be controlled), then the RequestHandler MUST call ctx.TimeoutError() +// before return. +// +// It is unsafe modifying/reading RequestCtx instance from concurrently +// running goroutines. The only exception is TimeoutError*, which may be called +// while other goroutines accessing RequestCtx. +type RequestCtx struct { + noCopy noCopy //nolint:unused,structcheck + + // Incoming request. + // + // Copying Request by value is forbidden. Use pointer to Request instead. + Request Request + + // Outgoing response. + // + // Copying Response by value is forbidden. Use pointer to Response instead. + Response Response + + userValues userData + + connID uint64 + connRequestNum uint64 + connTime time.Time + + time time.Time + + logger ctxLogger + s *Server + c net.Conn + fbr firstByteReader + + timeoutResponse *Response + timeoutCh chan struct{} + timeoutTimer *time.Timer + + hijackHandler HijackHandler + hijackNoResponse bool +} + +// HijackHandler must process the hijacked connection c. +// +// If KeepHijackedConns is disabled, which is by default, +// the connection c is automatically closed after returning from HijackHandler. +// +// The connection c must not be used after returning from the handler, if KeepHijackedConns is disabled. +// +// When KeepHijackedConns enabled, fasthttp will not Close() the connection, +// you must do it when you need it. You must not use c in any way after calling Close(). +type HijackHandler func(c net.Conn) + +// Hijack registers the given handler for connection hijacking. +// +// The handler is called after returning from RequestHandler +// and sending http response. The current connection is passed +// to the handler. The connection is automatically closed after +// returning from the handler. +// +// The server skips calling the handler in the following cases: +// +// * 'Connection: close' header exists in either request or response. +// * Unexpected error during response writing to the connection. +// +// The server stops processing requests from hijacked connections. +// +// Server limits such as Concurrency, ReadTimeout, WriteTimeout, etc. +// aren't applied to hijacked connections. +// +// The handler must not retain references to ctx members. +// +// Arbitrary 'Connection: Upgrade' protocols may be implemented +// with HijackHandler. For instance, +// +// * WebSocket ( https://en.wikipedia.org/wiki/WebSocket ) +// * HTTP/2.0 ( https://en.wikipedia.org/wiki/HTTP/2 ) +// +func (ctx *RequestCtx) Hijack(handler HijackHandler) { + ctx.hijackHandler = handler +} + +// HijackSetNoResponse changes the behavior of hijacking a request. +// If HijackSetNoResponse is called with false fasthttp will send a response +// to the client before calling the HijackHandler (default). If HijackSetNoResponse +// is called with true no response is send back before calling the +// HijackHandler supplied in the Hijack function. +func (ctx *RequestCtx) HijackSetNoResponse(noResponse bool) { + ctx.hijackNoResponse = noResponse +} + +// Hijacked returns true after Hijack is called. +func (ctx *RequestCtx) Hijacked() bool { + return ctx.hijackHandler != nil +} + +// SetUserValue stores the given value (arbitrary object) +// under the given key in ctx. +// +// The value stored in ctx may be obtained by UserValue*. +// +// This functionality may be useful for passing arbitrary values between +// functions involved in request processing. +// +// All the values are removed from ctx after returning from the top +// RequestHandler. Additionally, Close method is called on each value +// implementing io.Closer before removing the value from ctx. +func (ctx *RequestCtx) SetUserValue(key string, value interface{}) { + ctx.userValues.Set(key, value) +} + +// SetUserValueBytes stores the given value (arbitrary object) +// under the given key in ctx. +// +// The value stored in ctx may be obtained by UserValue*. +// +// This functionality may be useful for passing arbitrary values between +// functions involved in request processing. +// +// All the values stored in ctx are deleted after returning from RequestHandler. +func (ctx *RequestCtx) SetUserValueBytes(key []byte, value interface{}) { + ctx.userValues.SetBytes(key, value) +} + +// UserValue returns the value stored via SetUserValue* under the given key. +func (ctx *RequestCtx) UserValue(key string) interface{} { + return ctx.userValues.Get(key) +} + +// UserValueBytes returns the value stored via SetUserValue* +// under the given key. +func (ctx *RequestCtx) UserValueBytes(key []byte) interface{} { + return ctx.userValues.GetBytes(key) +} + +// VisitUserValues calls visitor for each existing userValue. +// +// visitor must not retain references to key and value after returning. +// Make key and/or value copies if you need storing them after returning. +func (ctx *RequestCtx) VisitUserValues(visitor func([]byte, interface{})) { + for i, n := 0, len(ctx.userValues); i < n; i++ { + kv := &ctx.userValues[i] + visitor(kv.key, kv.value) + } +} + +type connTLSer interface { + Handshake() error + ConnectionState() tls.ConnectionState +} + +// IsTLS returns true if the underlying connection is tls.Conn. +// +// tls.Conn is an encrypted connection (aka SSL, HTTPS). +func (ctx *RequestCtx) IsTLS() bool { + // cast to (connTLSer) instead of (*tls.Conn), since it catches + // cases with overridden tls.Conn such as: + // + // type customConn struct { + // *tls.Conn + // + // // other custom fields here + // } + _, ok := ctx.c.(connTLSer) + return ok +} + +// TLSConnectionState returns TLS connection state. +// +// The function returns nil if the underlying connection isn't tls.Conn. +// +// The returned state may be used for verifying TLS version, client certificates, +// etc. +func (ctx *RequestCtx) TLSConnectionState() *tls.ConnectionState { + tlsConn, ok := ctx.c.(connTLSer) + if !ok { + return nil + } + state := tlsConn.ConnectionState() + return &state +} + +// Conn returns a reference to the underlying net.Conn. +// +// WARNING: Only use this method if you know what you are doing! +// +// Reading from or writing to the returned connection will end badly! +func (ctx *RequestCtx) Conn() net.Conn { + return ctx.c +} + +type firstByteReader struct { + c net.Conn + ch byte + byteRead bool +} + +func (r *firstByteReader) Read(b []byte) (int, error) { + if len(b) == 0 { + return 0, nil + } + nn := 0 + if !r.byteRead { + b[0] = r.ch + b = b[1:] + r.byteRead = true + nn = 1 + } + n, err := r.c.Read(b) + return n + nn, err +} + +// Logger is used for logging formatted messages. +type Logger interface { + // Printf must have the same semantics as log.Printf. + Printf(format string, args ...interface{}) +} + +var ctxLoggerLock sync.Mutex + +type ctxLogger struct { + ctx *RequestCtx + logger Logger +} + +func (cl *ctxLogger) Printf(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + ctxLoggerLock.Lock() + cl.logger.Printf("%.3f %s - %s", time.Since(cl.ctx.ConnTime()).Seconds(), cl.ctx.String(), msg) + ctxLoggerLock.Unlock() +} + +var zeroTCPAddr = &net.TCPAddr{ + IP: net.IPv4zero, +} + +// String returns unique string representation of the ctx. +// +// The returned value may be useful for logging. +func (ctx *RequestCtx) String() string { + return fmt.Sprintf("#%016X - %s<->%s - %s %s", ctx.ID(), ctx.LocalAddr(), ctx.RemoteAddr(), ctx.Request.Header.Method(), ctx.URI().FullURI()) +} + +// ID returns unique ID of the request. +func (ctx *RequestCtx) ID() uint64 { + return (ctx.connID << 32) | ctx.connRequestNum +} + +// ConnID returns unique connection ID. +// +// This ID may be used to match distinct requests to the same incoming +// connection. +func (ctx *RequestCtx) ConnID() uint64 { + return ctx.connID +} + +// Time returns RequestHandler call time. +func (ctx *RequestCtx) Time() time.Time { + return ctx.time +} + +// ConnTime returns the time the server started serving the connection +// the current request came from. +func (ctx *RequestCtx) ConnTime() time.Time { + return ctx.connTime +} + +// ConnRequestNum returns request sequence number +// for the current connection. +// +// Sequence starts with 1. +func (ctx *RequestCtx) ConnRequestNum() uint64 { + return ctx.connRequestNum +} + +// SetConnectionClose sets 'Connection: close' response header and closes +// connection after the RequestHandler returns. +func (ctx *RequestCtx) SetConnectionClose() { + ctx.Response.SetConnectionClose() +} + +// SetStatusCode sets response status code. +func (ctx *RequestCtx) SetStatusCode(statusCode int) { + ctx.Response.SetStatusCode(statusCode) +} + +// SetContentType sets response Content-Type. +func (ctx *RequestCtx) SetContentType(contentType string) { + ctx.Response.Header.SetContentType(contentType) +} + +// SetContentTypeBytes sets response Content-Type. +// +// It is safe modifying contentType buffer after function return. +func (ctx *RequestCtx) SetContentTypeBytes(contentType []byte) { + ctx.Response.Header.SetContentTypeBytes(contentType) +} + +// RequestURI returns RequestURI. +// +// This uri is valid until returning from RequestHandler. +func (ctx *RequestCtx) RequestURI() []byte { + return ctx.Request.Header.RequestURI() +} + +// URI returns requested uri. +// +// The uri is valid until returning from RequestHandler. +func (ctx *RequestCtx) URI() *URI { + return ctx.Request.URI() +} + +// Referer returns request referer. +// +// The referer is valid until returning from RequestHandler. +func (ctx *RequestCtx) Referer() []byte { + return ctx.Request.Header.Referer() +} + +// UserAgent returns User-Agent header value from the request. +func (ctx *RequestCtx) UserAgent() []byte { + return ctx.Request.Header.UserAgent() +} + +// Path returns requested path. +// +// The path is valid until returning from RequestHandler. +func (ctx *RequestCtx) Path() []byte { + return ctx.URI().Path() +} + +// Host returns requested host. +// +// The host is valid until returning from RequestHandler. +func (ctx *RequestCtx) Host() []byte { + return ctx.URI().Host() +} + +// QueryArgs returns query arguments from RequestURI. +// +// It doesn't return POST'ed arguments - use PostArgs() for this. +// +// Returned arguments are valid until returning from RequestHandler. +// +// See also PostArgs, FormValue and FormFile. +func (ctx *RequestCtx) QueryArgs() *Args { + return ctx.URI().QueryArgs() +} + +// PostArgs returns POST arguments. +// +// It doesn't return query arguments from RequestURI - use QueryArgs for this. +// +// Returned arguments are valid until returning from RequestHandler. +// +// See also QueryArgs, FormValue and FormFile. +func (ctx *RequestCtx) PostArgs() *Args { + return ctx.Request.PostArgs() +} + +// MultipartForm returns requests's multipart form. +// +// Returns ErrNoMultipartForm if request's content-type +// isn't 'multipart/form-data'. +// +// All uploaded temporary files are automatically deleted after +// returning from RequestHandler. Either move or copy uploaded files +// into new place if you want retaining them. +// +// Use SaveMultipartFile function for permanently saving uploaded file. +// +// The returned form is valid until returning from RequestHandler. +// +// See also FormFile and FormValue. +func (ctx *RequestCtx) MultipartForm() (*multipart.Form, error) { + return ctx.Request.MultipartForm() +} + +// FormFile returns uploaded file associated with the given multipart form key. +// +// The file is automatically deleted after returning from RequestHandler, +// so either move or copy uploaded file into new place if you want retaining it. +// +// Use SaveMultipartFile function for permanently saving uploaded file. +// +// The returned file header is valid until returning from RequestHandler. +func (ctx *RequestCtx) FormFile(key string) (*multipart.FileHeader, error) { + mf, err := ctx.MultipartForm() + if err != nil { + return nil, err + } + if mf.File == nil { + return nil, err + } + fhh := mf.File[key] + if fhh == nil { + return nil, ErrMissingFile + } + return fhh[0], nil +} + +// ErrMissingFile may be returned from FormFile when the is no uploaded file +// associated with the given multipart form key. +var ErrMissingFile = errors.New("there is no uploaded file associated with the given key") + +// SaveMultipartFile saves multipart file fh under the given filename path. +func SaveMultipartFile(fh *multipart.FileHeader, path string) error { + f, err := fh.Open() + if err != nil { + return err + } + + if ff, ok := f.(*os.File); ok { + // Windows can't rename files that are opened. + if err := f.Close(); err != nil { + return err + } + + // If renaming fails we try the normal copying method. + // Renaming could fail if the files are on different devices. + if os.Rename(ff.Name(), path) == nil { + return nil + } + + // Reopen f for the code below. + f, err = fh.Open() + if err != nil { + return err + } + } + + defer f.Close() + + ff, err := os.Create(path) + if err != nil { + return err + } + defer ff.Close() + _, err = copyZeroAlloc(ff, f) + return err +} + +// FormValue returns form value associated with the given key. +// +// The value is searched in the following places: +// +// * Query string. +// * POST or PUT body. +// +// There are more fine-grained methods for obtaining form values: +// +// * QueryArgs for obtaining values from query string. +// * PostArgs for obtaining values from POST or PUT body. +// * MultipartForm for obtaining values from multipart form. +// * FormFile for obtaining uploaded files. +// +// The returned value is valid until returning from RequestHandler. +func (ctx *RequestCtx) FormValue(key string) []byte { + v := ctx.QueryArgs().Peek(key) + if len(v) > 0 { + return v + } + v = ctx.PostArgs().Peek(key) + if len(v) > 0 { + return v + } + mf, err := ctx.MultipartForm() + if err == nil && mf.Value != nil { + vv := mf.Value[key] + if len(vv) > 0 { + return []byte(vv[0]) + } + } + return nil +} + +// IsGet returns true if request method is GET. +func (ctx *RequestCtx) IsGet() bool { + return ctx.Request.Header.IsGet() +} + +// IsPost returns true if request method is POST. +func (ctx *RequestCtx) IsPost() bool { + return ctx.Request.Header.IsPost() +} + +// IsPut returns true if request method is PUT. +func (ctx *RequestCtx) IsPut() bool { + return ctx.Request.Header.IsPut() +} + +// IsDelete returns true if request method is DELETE. +func (ctx *RequestCtx) IsDelete() bool { + return ctx.Request.Header.IsDelete() +} + +// IsConnect returns true if request method is CONNECT. +func (ctx *RequestCtx) IsConnect() bool { + return ctx.Request.Header.IsConnect() +} + +// IsOptions returns true if request method is OPTIONS. +func (ctx *RequestCtx) IsOptions() bool { + return ctx.Request.Header.IsOptions() +} + +// IsTrace returns true if request method is TRACE. +func (ctx *RequestCtx) IsTrace() bool { + return ctx.Request.Header.IsTrace() +} + +// IsPatch returns true if request method is PATCH. +func (ctx *RequestCtx) IsPatch() bool { + return ctx.Request.Header.IsPatch() +} + +// Method return request method. +// +// Returned value is valid until returning from RequestHandler. +func (ctx *RequestCtx) Method() []byte { + return ctx.Request.Header.Method() +} + +// IsHead returns true if request method is HEAD. +func (ctx *RequestCtx) IsHead() bool { + return ctx.Request.Header.IsHead() +} + +// RemoteAddr returns client address for the given request. +// +// Always returns non-nil result. +func (ctx *RequestCtx) RemoteAddr() net.Addr { + if ctx.c == nil { + return zeroTCPAddr + } + addr := ctx.c.RemoteAddr() + if addr == nil { + return zeroTCPAddr + } + return addr +} + +// LocalAddr returns server address for the given request. +// +// Always returns non-nil result. +func (ctx *RequestCtx) LocalAddr() net.Addr { + if ctx.c == nil { + return zeroTCPAddr + } + addr := ctx.c.LocalAddr() + if addr == nil { + return zeroTCPAddr + } + return addr +} + +// RemoteIP returns the client ip the request came from. +// +// Always returns non-nil result. +func (ctx *RequestCtx) RemoteIP() net.IP { + return addrToIP(ctx.RemoteAddr()) +} + +// LocalIP returns the server ip the request came to. +// +// Always returns non-nil result. +func (ctx *RequestCtx) LocalIP() net.IP { + return addrToIP(ctx.LocalAddr()) +} + +func addrToIP(addr net.Addr) net.IP { + x, ok := addr.(*net.TCPAddr) + if !ok { + return net.IPv4zero + } + return x.IP +} + +// Error sets response status code to the given value and sets response body +// to the given message. +// +// Warning: this will reset the response headers and body already set! +func (ctx *RequestCtx) Error(msg string, statusCode int) { + ctx.Response.Reset() + ctx.SetStatusCode(statusCode) + ctx.SetContentTypeBytes(defaultContentType) + ctx.SetBodyString(msg) +} + +// Success sets response Content-Type and body to the given values. +func (ctx *RequestCtx) Success(contentType string, body []byte) { + ctx.SetContentType(contentType) + ctx.SetBody(body) +} + +// SuccessString sets response Content-Type and body to the given values. +func (ctx *RequestCtx) SuccessString(contentType, body string) { + ctx.SetContentType(contentType) + ctx.SetBodyString(body) +} + +// Redirect sets 'Location: uri' response header and sets the given statusCode. +// +// statusCode must have one of the following values: +// +// * StatusMovedPermanently (301) +// * StatusFound (302) +// * StatusSeeOther (303) +// * StatusTemporaryRedirect (307) +// * StatusPermanentRedirect (308) +// +// All other statusCode values are replaced by StatusFound (302). +// +// The redirect uri may be either absolute or relative to the current +// request uri. Fasthttp will always send an absolute uri back to the client. +// To send a relative uri you can use the following code: +// +// strLocation = []byte("Location") // Put this with your top level var () declarations. +// ctx.Response.Header.SetCanonical(strLocation, "/relative?uri") +// ctx.Response.SetStatusCode(fasthttp.StatusMovedPermanently) +// +func (ctx *RequestCtx) Redirect(uri string, statusCode int) { + u := AcquireURI() + ctx.URI().CopyTo(u) + u.Update(uri) + ctx.redirect(u.FullURI(), statusCode) + ReleaseURI(u) +} + +// RedirectBytes sets 'Location: uri' response header and sets +// the given statusCode. +// +// statusCode must have one of the following values: +// +// * StatusMovedPermanently (301) +// * StatusFound (302) +// * StatusSeeOther (303) +// * StatusTemporaryRedirect (307) +// * StatusPermanentRedirect (308) +// +// All other statusCode values are replaced by StatusFound (302). +// +// The redirect uri may be either absolute or relative to the current +// request uri. Fasthttp will always send an absolute uri back to the client. +// To send a relative uri you can use the following code: +// +// strLocation = []byte("Location") // Put this with your top level var () declarations. +// ctx.Response.Header.SetCanonical(strLocation, "/relative?uri") +// ctx.Response.SetStatusCode(fasthttp.StatusMovedPermanently) +// +func (ctx *RequestCtx) RedirectBytes(uri []byte, statusCode int) { + s := b2s(uri) + ctx.Redirect(s, statusCode) +} + +func (ctx *RequestCtx) redirect(uri []byte, statusCode int) { + ctx.Response.Header.SetCanonical(strLocation, uri) + statusCode = getRedirectStatusCode(statusCode) + ctx.Response.SetStatusCode(statusCode) +} + +func getRedirectStatusCode(statusCode int) int { + if statusCode == StatusMovedPermanently || statusCode == StatusFound || + statusCode == StatusSeeOther || statusCode == StatusTemporaryRedirect || + statusCode == StatusPermanentRedirect { + return statusCode + } + return StatusFound +} + +// SetBody sets response body to the given value. +// +// It is safe re-using body argument after the function returns. +func (ctx *RequestCtx) SetBody(body []byte) { + ctx.Response.SetBody(body) +} + +// SetBodyString sets response body to the given value. +func (ctx *RequestCtx) SetBodyString(body string) { + ctx.Response.SetBodyString(body) +} + +// ResetBody resets response body contents. +func (ctx *RequestCtx) ResetBody() { + ctx.Response.ResetBody() +} + +// SendFile sends local file contents from the given path as response body. +// +// This is a shortcut to ServeFile(ctx, path). +// +// SendFile logs all the errors via ctx.Logger. +// +// See also ServeFile, FSHandler and FS. +func (ctx *RequestCtx) SendFile(path string) { + ServeFile(ctx, path) +} + +// SendFileBytes sends local file contents from the given path as response body. +// +// This is a shortcut to ServeFileBytes(ctx, path). +// +// SendFileBytes logs all the errors via ctx.Logger. +// +// See also ServeFileBytes, FSHandler and FS. +func (ctx *RequestCtx) SendFileBytes(path []byte) { + ServeFileBytes(ctx, path) +} + +// IfModifiedSince returns true if lastModified exceeds 'If-Modified-Since' +// value from the request header. +// +// The function returns true also 'If-Modified-Since' request header is missing. +func (ctx *RequestCtx) IfModifiedSince(lastModified time.Time) bool { + ifModStr := ctx.Request.Header.peek(strIfModifiedSince) + if len(ifModStr) == 0 { + return true + } + ifMod, err := ParseHTTPDate(ifModStr) + if err != nil { + return true + } + lastModified = lastModified.Truncate(time.Second) + return ifMod.Before(lastModified) +} + +// NotModified resets response and sets '304 Not Modified' response status code. +func (ctx *RequestCtx) NotModified() { + ctx.Response.Reset() + ctx.SetStatusCode(StatusNotModified) +} + +// NotFound resets response and sets '404 Not Found' response status code. +func (ctx *RequestCtx) NotFound() { + ctx.Response.Reset() + ctx.SetStatusCode(StatusNotFound) + ctx.SetBodyString("404 Page not found") +} + +// Write writes p into response body. +func (ctx *RequestCtx) Write(p []byte) (int, error) { + ctx.Response.AppendBody(p) + return len(p), nil +} + +// WriteString appends s to response body. +func (ctx *RequestCtx) WriteString(s string) (int, error) { + ctx.Response.AppendBodyString(s) + return len(s), nil +} + +// PostBody returns POST request body. +// +// The returned value is valid until RequestHandler return. +func (ctx *RequestCtx) PostBody() []byte { + return ctx.Request.Body() +} + +// SetBodyStream sets response body stream and, optionally body size. +// +// bodyStream.Close() is called after finishing reading all body data +// if it implements io.Closer. +// +// If bodySize is >= 0, then bodySize bytes must be provided by bodyStream +// before returning io.EOF. +// +// If bodySize < 0, then bodyStream is read until io.EOF. +// +// See also SetBodyStreamWriter. +func (ctx *RequestCtx) SetBodyStream(bodyStream io.Reader, bodySize int) { + ctx.Response.SetBodyStream(bodyStream, bodySize) +} + +// SetBodyStreamWriter registers the given stream writer for populating +// response body. +// +// Access to RequestCtx and/or its' members is forbidden from sw. +// +// This function may be used in the following cases: +// +// * if response body is too big (more than 10MB). +// * if response body is streamed from slow external sources. +// * if response body must be streamed to the client in chunks. +// (aka `http server push`). +func (ctx *RequestCtx) SetBodyStreamWriter(sw StreamWriter) { + ctx.Response.SetBodyStreamWriter(sw) +} + +// IsBodyStream returns true if response body is set via SetBodyStream*. +func (ctx *RequestCtx) IsBodyStream() bool { + return ctx.Response.IsBodyStream() +} + +// Logger returns logger, which may be used for logging arbitrary +// request-specific messages inside RequestHandler. +// +// Each message logged via returned logger contains request-specific information +// such as request id, request duration, local address, remote address, +// request method and request url. +// +// It is safe re-using returned logger for logging multiple messages +// for the current request. +// +// The returned logger is valid until returning from RequestHandler. +func (ctx *RequestCtx) Logger() Logger { + if ctx.logger.ctx == nil { + ctx.logger.ctx = ctx + } + if ctx.logger.logger == nil { + ctx.logger.logger = ctx.s.logger() + } + return &ctx.logger +} + +// TimeoutError sets response status code to StatusRequestTimeout and sets +// body to the given msg. +// +// All response modifications after TimeoutError call are ignored. +// +// TimeoutError MUST be called before returning from RequestHandler if there are +// references to ctx and/or its members in other goroutines remain. +// +// Usage of this function is discouraged. Prefer eliminating ctx references +// from pending goroutines instead of using this function. +func (ctx *RequestCtx) TimeoutError(msg string) { + ctx.TimeoutErrorWithCode(msg, StatusRequestTimeout) +} + +// TimeoutErrorWithCode sets response body to msg and response status +// code to statusCode. +// +// All response modifications after TimeoutErrorWithCode call are ignored. +// +// TimeoutErrorWithCode MUST be called before returning from RequestHandler +// if there are references to ctx and/or its members in other goroutines remain. +// +// Usage of this function is discouraged. Prefer eliminating ctx references +// from pending goroutines instead of using this function. +func (ctx *RequestCtx) TimeoutErrorWithCode(msg string, statusCode int) { + var resp Response + resp.SetStatusCode(statusCode) + resp.SetBodyString(msg) + ctx.TimeoutErrorWithResponse(&resp) +} + +// TimeoutErrorWithResponse marks the ctx as timed out and sends the given +// response to the client. +// +// All ctx modifications after TimeoutErrorWithResponse call are ignored. +// +// TimeoutErrorWithResponse MUST be called before returning from RequestHandler +// if there are references to ctx and/or its members in other goroutines remain. +// +// Usage of this function is discouraged. Prefer eliminating ctx references +// from pending goroutines instead of using this function. +func (ctx *RequestCtx) TimeoutErrorWithResponse(resp *Response) { + respCopy := &Response{} + resp.CopyTo(respCopy) + ctx.timeoutResponse = respCopy +} + +// NextProto adds nph to be processed when key is negotiated when TLS +// connection is established. +// +// This function can only be called before the server is started. +func (s *Server) NextProto(key string, nph ServeHandler) { + if s.nextProtos == nil { + s.nextProtos = make(map[string]ServeHandler) + } + s.configTLS() + s.tlsConfig.NextProtos = append(s.tlsConfig.NextProtos, key) + s.nextProtos[key] = nph +} + +func (s *Server) getNextProto(c net.Conn) (proto string, err error) { + if tlsConn, ok := c.(connTLSer); ok { + err = tlsConn.Handshake() + if err == nil { + proto = tlsConn.ConnectionState().NegotiatedProtocol + } + } + return +} + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe, ListenAndServeTLS and +// ListenAndServeTLSEmbed so dead TCP connections (e.g. closing laptop mid-download) +// eventually go away. +type tcpKeepaliveListener struct { + *net.TCPListener + keepalivePeriod time.Duration +} + +func (ln tcpKeepaliveListener) Accept() (net.Conn, error) { + tc, err := ln.AcceptTCP() + if err != nil { + return nil, err + } + if err := tc.SetKeepAlive(true); err != nil { + tc.Close() //nolint:errcheck + return nil, err + } + if ln.keepalivePeriod > 0 { + if err := tc.SetKeepAlivePeriod(ln.keepalivePeriod); err != nil { + tc.Close() //nolint:errcheck + return nil, err + } + } + return tc, nil +} + +// ListenAndServe serves HTTP requests from the given TCP4 addr. +// +// Pass custom listener to Serve if you need listening on non-TCP4 media +// such as IPv6. +// +// Accepted connections are configured to enable TCP keep-alives. +func (s *Server) ListenAndServe(addr string) error { + ln, err := net.Listen("tcp4", addr) + if err != nil { + return err + } + if s.TCPKeepalive { + if tcpln, ok := ln.(*net.TCPListener); ok { + return s.Serve(tcpKeepaliveListener{ + TCPListener: tcpln, + keepalivePeriod: s.TCPKeepalivePeriod, + }) + } + } + return s.Serve(ln) +} + +// ListenAndServeUNIX serves HTTP requests from the given UNIX addr. +// +// The function deletes existing file at addr before starting serving. +// +// The server sets the given file mode for the UNIX addr. +func (s *Server) ListenAndServeUNIX(addr string, mode os.FileMode) error { + if err := os.Remove(addr); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("unexpected error when trying to remove unix socket file %q: %s", addr, err) + } + ln, err := net.Listen("unix", addr) + if err != nil { + return err + } + if err = os.Chmod(addr, mode); err != nil { + return fmt.Errorf("cannot chmod %#o for %q: %s", mode, addr, err) + } + return s.Serve(ln) +} + +// ListenAndServeTLS serves HTTPS requests from the given TCP4 addr. +// +// certFile and keyFile are paths to TLS certificate and key files. +// +// Pass custom listener to Serve if you need listening on non-TCP4 media +// such as IPv6. +// +// If the certFile or keyFile has not been provided to the server structure, +// the function will use the previously added TLS configuration. +// +// Accepted connections are configured to enable TCP keep-alives. +func (s *Server) ListenAndServeTLS(addr, certFile, keyFile string) error { + ln, err := net.Listen("tcp4", addr) + if err != nil { + return err + } + if s.TCPKeepalive { + if tcpln, ok := ln.(*net.TCPListener); ok { + return s.ServeTLS(tcpKeepaliveListener{ + TCPListener: tcpln, + keepalivePeriod: s.TCPKeepalivePeriod, + }, certFile, keyFile) + } + } + return s.ServeTLS(ln, certFile, keyFile) +} + +// ListenAndServeTLSEmbed serves HTTPS requests from the given TCP4 addr. +// +// certData and keyData must contain valid TLS certificate and key data. +// +// Pass custom listener to Serve if you need listening on arbitrary media +// such as IPv6. +// +// If the certFile or keyFile has not been provided the server structure, +// the function will use previously added TLS configuration. +// +// Accepted connections are configured to enable TCP keep-alives. +func (s *Server) ListenAndServeTLSEmbed(addr string, certData, keyData []byte) error { + ln, err := net.Listen("tcp4", addr) + if err != nil { + return err + } + if s.TCPKeepalive { + if tcpln, ok := ln.(*net.TCPListener); ok { + return s.ServeTLSEmbed(tcpKeepaliveListener{ + TCPListener: tcpln, + keepalivePeriod: s.TCPKeepalivePeriod, + }, certData, keyData) + } + } + return s.ServeTLSEmbed(ln, certData, keyData) +} + +// ServeTLS serves HTTPS requests from the given listener. +// +// certFile and keyFile are paths to TLS certificate and key files. +// +// If the certFile or keyFile has not been provided the server structure, +// the function will use previously added TLS configuration. +func (s *Server) ServeTLS(ln net.Listener, certFile, keyFile string) error { + err := s.AppendCert(certFile, keyFile) + if err != nil && err != errNoCertOrKeyProvided { + return err + } + if s.tlsConfig == nil { + return errNoCertOrKeyProvided + } + s.tlsConfig.BuildNameToCertificate() + + return s.Serve( + tls.NewListener(ln, s.tlsConfig), + ) +} + +// ServeTLSEmbed serves HTTPS requests from the given listener. +// +// certData and keyData must contain valid TLS certificate and key data. +// +// If the certFile or keyFile has not been provided the server structure, +// the function will use previously added TLS configuration. +func (s *Server) ServeTLSEmbed(ln net.Listener, certData, keyData []byte) error { + err := s.AppendCertEmbed(certData, keyData) + if err != nil && err != errNoCertOrKeyProvided { + return err + } + if s.tlsConfig == nil { + return errNoCertOrKeyProvided + } + s.tlsConfig.BuildNameToCertificate() + + return s.Serve( + tls.NewListener(ln, s.tlsConfig), + ) +} + +// AppendCert appends certificate and keyfile to TLS Configuration. +// +// This function allows programmer to handle multiple domains +// in one server structure. See examples/multidomain +func (s *Server) AppendCert(certFile, keyFile string) error { + if len(certFile) == 0 && len(keyFile) == 0 { + return errNoCertOrKeyProvided + } + + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return fmt.Errorf("cannot load TLS key pair from certFile=%q and keyFile=%q: %s", certFile, keyFile, err) + } + + s.configTLS() + + s.tlsConfig.Certificates = append(s.tlsConfig.Certificates, cert) + return nil +} + +// AppendCertEmbed does the same as AppendCert but using in-memory data. +func (s *Server) AppendCertEmbed(certData, keyData []byte) error { + if len(certData) == 0 && len(keyData) == 0 { + return errNoCertOrKeyProvided + } + + cert, err := tls.X509KeyPair(certData, keyData) + if err != nil { + return fmt.Errorf("cannot load TLS key pair from the provided certData(%d) and keyData(%d): %s", + len(certData), len(keyData), err) + } + + s.configTLS() + + s.tlsConfig.Certificates = append(s.tlsConfig.Certificates, cert) + return nil +} + +func (s *Server) configTLS() { + if s.tlsConfig == nil { + s.tlsConfig = &tls.Config{ + PreferServerCipherSuites: true, + } + } +} + +// DefaultConcurrency is the maximum number of concurrent connections +// the Server may serve by default (i.e. if Server.Concurrency isn't set). +const DefaultConcurrency = 256 * 1024 + +// Serve serves incoming connections from the given listener. +// +// Serve blocks until the given listener returns permanent error. +func (s *Server) Serve(ln net.Listener) error { + var lastOverflowErrorTime time.Time + var lastPerIPErrorTime time.Time + var c net.Conn + var err error + + maxWorkersCount := s.getConcurrency() + + s.mu.Lock() + { + s.ln = append(s.ln, ln) + if s.done == nil { + s.done = make(chan struct{}) + } + + if s.concurrencyCh == nil { + s.concurrencyCh = make(chan struct{}, maxWorkersCount) + } + } + s.mu.Unlock() + + wp := &workerPool{ + WorkerFunc: s.serveConn, + MaxWorkersCount: maxWorkersCount, + LogAllErrors: s.LogAllErrors, + Logger: s.logger(), + connState: s.setState, + } + wp.Start() + + // Count our waiting to accept a connection as an open connection. + // This way we can't get into any weird state where just after accepting + // a connection Shutdown is called which reads open as 0 because it isn't + // incremented yet. + atomic.AddInt32(&s.open, 1) + defer atomic.AddInt32(&s.open, -1) + + for { + if c, err = acceptConn(s, ln, &lastPerIPErrorTime); err != nil { + wp.Stop() + if err == io.EOF { + return nil + } + return err + } + s.setState(c, StateNew) + atomic.AddInt32(&s.open, 1) + if !wp.Serve(c) { + atomic.AddInt32(&s.open, -1) + s.writeFastError(c, StatusServiceUnavailable, + "The connection cannot be served because Server.Concurrency limit exceeded") + c.Close() + s.setState(c, StateClosed) + if time.Since(lastOverflowErrorTime) > time.Minute { + s.logger().Printf("The incoming connection cannot be served, because %d concurrent connections are served. "+ + "Try increasing Server.Concurrency", maxWorkersCount) + lastOverflowErrorTime = time.Now() + } + + // The current server reached concurrency limit, + // so give other concurrently running servers a chance + // accepting incoming connections on the same address. + // + // There is a hope other servers didn't reach their + // concurrency limits yet :) + // + // See also: https://github.com/valyala/fasthttp/pull/485#discussion_r239994990 + if s.SleepWhenConcurrencyLimitsExceeded > 0 { + time.Sleep(s.SleepWhenConcurrencyLimitsExceeded) + } + } + c = nil + } +} + +// Shutdown gracefully shuts down the server without interrupting any active connections. +// Shutdown works by first closing all open listeners and then waiting indefinitely for all connections to return to idle and then shut down. +// +// When Shutdown is called, Serve, ListenAndServe, and ListenAndServeTLS immediately return nil. +// Make sure the program doesn't exit and waits instead for Shutdown to return. +// +// Shutdown does not close keepalive connections so its recommended to set ReadTimeout to something else than 0. +func (s *Server) Shutdown() error { + s.mu.Lock() + defer s.mu.Unlock() + + atomic.StoreInt32(&s.stop, 1) + defer atomic.StoreInt32(&s.stop, 0) + + if s.ln == nil { + return nil + } + + for _, ln := range s.ln { + if err := ln.Close(); err != nil { + return err + } + } + + if s.done != nil { + close(s.done) + } + + // Closing the listener will make Serve() call Stop on the worker pool. + // Setting .stop to 1 will make serveConn() break out of its loop. + // Now we just have to wait until all workers are done. + for { + if open := atomic.LoadInt32(&s.open); open == 0 { + break + } + // This is not an optimal solution but using a sync.WaitGroup + // here causes data races as it's hard to prevent Add() to be called + // while Wait() is waiting. + time.Sleep(time.Millisecond * 100) + } + + s.done = nil + s.ln = nil + return nil +} + +func acceptConn(s *Server, ln net.Listener, lastPerIPErrorTime *time.Time) (net.Conn, error) { + for { + c, err := ln.Accept() + if err != nil { + if c != nil { + panic("BUG: net.Listener returned non-nil conn and non-nil error") + } + if netErr, ok := err.(net.Error); ok && netErr.Temporary() { + s.logger().Printf("Temporary error when accepting new connections: %s", netErr) + time.Sleep(time.Second) + continue + } + if err != io.EOF && !strings.Contains(err.Error(), "use of closed network connection") { + s.logger().Printf("Permanent error when accepting new connections: %s", err) + return nil, err + } + return nil, io.EOF + } + if c == nil { + panic("BUG: net.Listener returned (nil, nil)") + } + if s.MaxConnsPerIP > 0 { + pic := wrapPerIPConn(s, c) + if pic == nil { + if time.Since(*lastPerIPErrorTime) > time.Minute { + s.logger().Printf("The number of connections from %s exceeds MaxConnsPerIP=%d", + getConnIP4(c), s.MaxConnsPerIP) + *lastPerIPErrorTime = time.Now() + } + continue + } + c = pic + } + return c, nil + } +} + +func wrapPerIPConn(s *Server, c net.Conn) net.Conn { + ip := getUint32IP(c) + if ip == 0 { + return c + } + n := s.perIPConnCounter.Register(ip) + if n > s.MaxConnsPerIP { + s.perIPConnCounter.Unregister(ip) + s.writeFastError(c, StatusTooManyRequests, "The number of connections from your ip exceeds MaxConnsPerIP") + c.Close() + return nil + } + return acquirePerIPConn(c, ip, &s.perIPConnCounter) +} + +var defaultLogger = Logger(log.New(os.Stderr, "", log.LstdFlags)) + +func (s *Server) logger() Logger { + if s.Logger != nil { + return s.Logger + } + return defaultLogger +} + +var ( + // ErrPerIPConnLimit may be returned from ServeConn if the number of connections + // per ip exceeds Server.MaxConnsPerIP. + ErrPerIPConnLimit = errors.New("too many connections per ip") + + // ErrConcurrencyLimit may be returned from ServeConn if the number + // of concurrently served connections exceeds Server.Concurrency. + ErrConcurrencyLimit = errors.New("cannot serve the connection because Server.Concurrency concurrent connections are served") +) + +// ServeConn serves HTTP requests from the given connection. +// +// ServeConn returns nil if all requests from the c are successfully served. +// It returns non-nil error otherwise. +// +// Connection c must immediately propagate all the data passed to Write() +// to the client. Otherwise requests' processing may hang. +// +// ServeConn closes c before returning. +func (s *Server) ServeConn(c net.Conn) error { + if s.MaxConnsPerIP > 0 { + pic := wrapPerIPConn(s, c) + if pic == nil { + return ErrPerIPConnLimit + } + c = pic + } + + n := atomic.AddUint32(&s.concurrency, 1) + if n > uint32(s.getConcurrency()) { + atomic.AddUint32(&s.concurrency, ^uint32(0)) + s.writeFastError(c, StatusServiceUnavailable, "The connection cannot be served because Server.Concurrency limit exceeded") + c.Close() + return ErrConcurrencyLimit + } + + atomic.AddInt32(&s.open, 1) + + err := s.serveConn(c) + + atomic.AddUint32(&s.concurrency, ^uint32(0)) + + if err != errHijacked { + err1 := c.Close() + s.setState(c, StateClosed) + if err == nil { + err = err1 + } + } else { + err = nil + s.setState(c, StateHijacked) + } + return err +} + +var errHijacked = errors.New("connection has been hijacked") + +// GetCurrentConcurrency returns a number of currently served +// connections. +// +// This function is intended be used by monitoring systems +func (s *Server) GetCurrentConcurrency() uint32 { + return atomic.LoadUint32(&s.concurrency) +} + +// GetOpenConnectionsCount returns a number of opened connections. +// +// This function is intended be used by monitoring systems +func (s *Server) GetOpenConnectionsCount() int32 { + return atomic.LoadInt32(&s.open) - 1 +} + +func (s *Server) getConcurrency() int { + n := s.Concurrency + if n <= 0 { + n = DefaultConcurrency + } + return n +} + +var globalConnID uint64 + +func nextConnID() uint64 { + return atomic.AddUint64(&globalConnID, 1) +} + +// DefaultMaxRequestBodySize is the maximum request body size the server +// reads by default. +// +// See Server.MaxRequestBodySize for details. +const DefaultMaxRequestBodySize = 4 * 1024 * 1024 + +func (s *Server) idleTimeout() time.Duration { + if s.IdleTimeout != 0 { + return s.IdleTimeout + } + return s.ReadTimeout +} + +func (s *Server) serveConn(c net.Conn) (err error) { + defer atomic.AddInt32(&s.open, -1) + + var proto string + if proto, err = s.getNextProto(c); err != nil { + return + } + if handler, ok := s.nextProtos[proto]; ok { + return handler(c) + } + + var serverName []byte + if !s.NoDefaultServerHeader { + serverName = s.getServerName() + } + connRequestNum := uint64(0) + connID := nextConnID() + connTime := time.Now() + maxRequestBodySize := s.MaxRequestBodySize + if maxRequestBodySize <= 0 { + maxRequestBodySize = DefaultMaxRequestBodySize + } + writeTimeout := s.WriteTimeout + + ctx := s.acquireCtx(c) + ctx.connTime = connTime + isTLS := ctx.IsTLS() + var ( + br *bufio.Reader + bw *bufio.Writer + + timeoutResponse *Response + hijackHandler HijackHandler + hijackNoResponse bool + + connectionClose bool + isHTTP11 bool + + reqReset bool + ) + for { + connRequestNum++ + + // If this is a keep-alive connection set the idle timeout. + if connRequestNum > 1 { + if d := s.idleTimeout(); d > 0 { + if err := c.SetReadDeadline(time.Now().Add(d)); err != nil { + panic(fmt.Sprintf("BUG: error in SetReadDeadline(%s): %s", d, err)) + } + } + } + + if !s.ReduceMemoryUsage || br != nil { + if br == nil { + br = acquireReader(ctx) + } + + // If this is a keep-alive connection we want to try and read the first bytes + // within the idle time. + if connRequestNum > 1 { + var b []byte + b, err = br.Peek(4) + if len(b) == 0 { + // If reading from a keep-alive connection returns nothing it means + // the connection was closed (either timeout or from the other side). + if err != io.EOF { + err = errNothingRead{err} + } + } + } + } else { + // If this is a keep-alive connection acquireByteReader will try to peek + // a couple of bytes already so the idle timeout will already be used. + br, err = acquireByteReader(&ctx) + } + + ctx.Request.isTLS = isTLS + ctx.Response.Header.noDefaultContentType = s.NoDefaultContentType + + if err == nil { + if s.ReadTimeout > 0 { + if err := c.SetReadDeadline(time.Now().Add(s.ReadTimeout)); err != nil { + panic(fmt.Sprintf("BUG: error in SetReadDeadline(%s): %s", s.ReadTimeout, err)) + } + } + if s.DisableHeaderNamesNormalizing { + ctx.Request.Header.DisableNormalizing() + ctx.Response.Header.DisableNormalizing() + } + // reading Headers + if err = ctx.Request.Header.Read(br); err == nil { + if onHdrRecv := s.HeaderReceived; onHdrRecv != nil { + reqConf := onHdrRecv(&ctx.Request.Header) + if reqConf.ReadTimeout > 0 { + deadline := time.Now().Add(reqConf.ReadTimeout) + if err := c.SetReadDeadline(deadline); err != nil { + panic(fmt.Sprintf("BUG: error in SetReadDeadline(%s): %s", deadline, err)) + } + } + if reqConf.MaxRequestBodySize > 0 { + maxRequestBodySize = reqConf.MaxRequestBodySize + } + if reqConf.WriteTimeout > 0 { + writeTimeout = reqConf.WriteTimeout + } + } + //read body + err = ctx.Request.readLimitBody(br, maxRequestBodySize, s.GetOnly) + } + if err == nil { + // If we read any bytes off the wire, we're active. + s.setState(c, StateActive) + } + + if (s.ReduceMemoryUsage && br.Buffered() == 0) || err != nil { + releaseReader(s, br) + br = nil + } + } + + if err != nil { + if err == io.EOF { + err = nil + } else if nr, ok := err.(errNothingRead); ok { + if connRequestNum > 1 { + // This is not the first request and we haven't read a single byte + // of a new request yet. This means it's just a keep-alive connection + // closing down either because the remote closed it or because + // or a read timeout on our side. Either way just close the connection + // and don't return any error response. + err = nil + } else { + err = nr.error + } + } + + if err != nil { + bw = s.writeErrorResponse(bw, ctx, serverName, err) + } + break + } + + // 'Expect: 100-continue' request handling. + // See http://www.w3.org/Protocols/rfc2616/rfc2616-sec8.html for details. + if ctx.Request.MayContinue() { + // Send 'HTTP/1.1 100 Continue' response. + if bw == nil { + bw = acquireWriter(ctx) + } + _, err = bw.Write(strResponseContinue) + if err != nil { + break + } + err = bw.Flush() + if err != nil { + break + } + if s.ReduceMemoryUsage { + releaseWriter(s, bw) + bw = nil + } + + // Read request body. + if br == nil { + br = acquireReader(ctx) + } + err = ctx.Request.ContinueReadBody(br, maxRequestBodySize) + if (s.ReduceMemoryUsage && br.Buffered() == 0) || err != nil { + releaseReader(s, br) + br = nil + } + if err != nil { + bw = s.writeErrorResponse(bw, ctx, serverName, err) + break + } + } + + connectionClose = s.DisableKeepalive || ctx.Request.Header.ConnectionClose() + isHTTP11 = ctx.Request.Header.IsHTTP11() + + if serverName != nil { + ctx.Response.Header.SetServerBytes(serverName) + } + ctx.connID = connID + ctx.connRequestNum = connRequestNum + ctx.time = time.Now() + s.Handler(ctx) + + timeoutResponse = ctx.timeoutResponse + if timeoutResponse != nil { + ctx = s.acquireCtx(c) + timeoutResponse.CopyTo(&ctx.Response) + if br != nil { + // Close connection, since br may be attached to the old ctx via ctx.fbr. + ctx.SetConnectionClose() + } + } + + if !ctx.IsGet() && ctx.IsHead() { + ctx.Response.SkipBody = true + } + reqReset = true + ctx.Request.Reset() + + hijackHandler = ctx.hijackHandler + ctx.hijackHandler = nil + hijackNoResponse = ctx.hijackNoResponse + ctx.hijackNoResponse = false + + ctx.userValues.Reset() + + if s.MaxRequestsPerConn > 0 && connRequestNum >= uint64(s.MaxRequestsPerConn) { + ctx.SetConnectionClose() + } + + if writeTimeout > 0 { + if err := c.SetWriteDeadline(time.Now().Add(writeTimeout)); err != nil { + panic(fmt.Sprintf("BUG: error in SetWriteDeadline(%s): %s", s.WriteTimeout, err)) + } + } + + connectionClose = connectionClose || ctx.Response.ConnectionClose() + if connectionClose { + ctx.Response.Header.SetCanonical(strConnection, strClose) + } else if !isHTTP11 { + // Set 'Connection: keep-alive' response header for non-HTTP/1.1 request. + // There is no need in setting this header for http/1.1, since in http/1.1 + // connections are keep-alive by default. + ctx.Response.Header.SetCanonical(strConnection, strKeepAlive) + } + + if serverName != nil && len(ctx.Response.Header.Server()) == 0 { + ctx.Response.Header.SetServerBytes(serverName) + } + + if !hijackNoResponse { + if bw == nil { + bw = acquireWriter(ctx) + } + if err = writeResponse(ctx, bw); err != nil { + break + } + + // Only flush the writer if we don't have another request in the pipeline. + // This is a big of an ugly optimization for https://www.techempower.com/benchmarks/ + // This benchmark will send 16 pipelined requests. It is faster to pack as many responses + // in a TCP packet and send it back at once than waiting for a flush every request. + // In real world circumstances this behaviour could be argued as being wrong. + if br == nil || br.Buffered() == 0 || connectionClose { + err = bw.Flush() + if err != nil { + break + } + } + if connectionClose { + break + } + if s.ReduceMemoryUsage && hijackHandler == nil { + releaseWriter(s, bw) + bw = nil + } + } + + if hijackHandler != nil { + var hjr io.Reader = c + if br != nil { + hjr = br + br = nil + + // br may point to ctx.fbr, so do not return ctx into pool below. + ctx = nil + } + if bw != nil { + err = bw.Flush() + if err != nil { + break + } + releaseWriter(s, bw) + bw = nil + } + err = c.SetReadDeadline(zeroTime) + if err != nil { + break + } + err = c.SetWriteDeadline(zeroTime) + if err != nil { + break + } + go hijackConnHandler(hjr, c, s, hijackHandler) + err = errHijacked + break + } + + s.setState(c, StateIdle) + + if atomic.LoadInt32(&s.stop) == 1 { + err = nil + break + } + } + + if br != nil { + releaseReader(s, br) + } + if bw != nil { + releaseWriter(s, bw) + } + if ctx != nil { + // in unexpected cases the for loop will break + // before request reset call. in such cases, call it before + // release to fix #548 + if !reqReset { + ctx.Request.Reset() + } + s.releaseCtx(ctx) + } + return +} + +func (s *Server) setState(nc net.Conn, state ConnState) { + if hook := s.ConnState; hook != nil { + hook(nc, state) + } +} + +func hijackConnHandler(r io.Reader, c net.Conn, s *Server, h HijackHandler) { + hjc := s.acquireHijackConn(r, c) + h(hjc) + + if br, ok := r.(*bufio.Reader); ok { + releaseReader(s, br) + } + if !s.KeepHijackedConns { + c.Close() + s.releaseHijackConn(hjc) + } +} + +func (s *Server) acquireHijackConn(r io.Reader, c net.Conn) *hijackConn { + v := s.hijackConnPool.Get() + if v == nil { + hjc := &hijackConn{ + Conn: c, + r: r, + s: s, + } + return hjc + } + hjc := v.(*hijackConn) + hjc.Conn = c + hjc.r = r + return hjc +} + +func (s *Server) releaseHijackConn(hjc *hijackConn) { + hjc.Conn = nil + hjc.r = nil + s.hijackConnPool.Put(hjc) +} + +type hijackConn struct { + net.Conn + r io.Reader + s *Server +} + +func (c *hijackConn) UnsafeConn() net.Conn { + return c.Conn +} + +func (c *hijackConn) Read(p []byte) (int, error) { + return c.r.Read(p) +} + +func (c *hijackConn) Close() error { + if !c.s.KeepHijackedConns { + // when we do not keep hijacked connections, + // it is closed in hijackConnHandler. + return nil + } + + conn := c.Conn + c.s.releaseHijackConn(c) + return conn.Close() +} + +// LastTimeoutErrorResponse returns the last timeout response set +// via TimeoutError* call. +// +// This function is intended for custom server implementations. +func (ctx *RequestCtx) LastTimeoutErrorResponse() *Response { + return ctx.timeoutResponse +} + +func writeResponse(ctx *RequestCtx, w *bufio.Writer) error { + if ctx.timeoutResponse != nil { + panic("BUG: cannot write timed out response") + } + err := ctx.Response.Write(w) + ctx.Response.Reset() + return err +} + +const ( + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 +) + +func acquireByteReader(ctxP **RequestCtx) (*bufio.Reader, error) { + ctx := *ctxP + s := ctx.s + c := ctx.c + s.releaseCtx(ctx) + + // Make GC happy, so it could garbage collect ctx + // while we waiting for the next request. + ctx = nil + *ctxP = nil + + var b [1]byte + n, err := c.Read(b[:]) + + ctx = s.acquireCtx(c) + *ctxP = ctx + if err != nil { + // Treat all errors as EOF on unsuccessful read + // of the first request byte. + return nil, io.EOF + } + if n != 1 { + panic("BUG: Reader must return at least one byte") + } + + ctx.fbr.c = c + ctx.fbr.ch = b[0] + ctx.fbr.byteRead = false + r := acquireReader(ctx) + r.Reset(&ctx.fbr) + return r, nil +} + +func acquireReader(ctx *RequestCtx) *bufio.Reader { + v := ctx.s.readerPool.Get() + if v == nil { + n := ctx.s.ReadBufferSize + if n <= 0 { + n = defaultReadBufferSize + } + return bufio.NewReaderSize(ctx.c, n) + } + r := v.(*bufio.Reader) + r.Reset(ctx.c) + return r +} + +func releaseReader(s *Server, r *bufio.Reader) { + s.readerPool.Put(r) +} + +func acquireWriter(ctx *RequestCtx) *bufio.Writer { + v := ctx.s.writerPool.Get() + if v == nil { + n := ctx.s.WriteBufferSize + if n <= 0 { + n = defaultWriteBufferSize + } + return bufio.NewWriterSize(ctx.c, n) + } + w := v.(*bufio.Writer) + w.Reset(ctx.c) + return w +} + +func releaseWriter(s *Server, w *bufio.Writer) { + s.writerPool.Put(w) +} + +func (s *Server) acquireCtx(c net.Conn) (ctx *RequestCtx) { + v := s.ctxPool.Get() + if v == nil { + ctx = &RequestCtx{ + s: s, + } + keepBodyBuffer := !s.ReduceMemoryUsage + ctx.Request.keepBodyBuffer = keepBodyBuffer + ctx.Response.keepBodyBuffer = keepBodyBuffer + } else { + ctx = v.(*RequestCtx) + } + ctx.c = c + return +} + +// Init2 prepares ctx for passing to RequestHandler. +// +// conn is used only for determining local and remote addresses. +// +// This function is intended for custom Server implementations. +// See https://github.com/valyala/httpteleport for details. +func (ctx *RequestCtx) Init2(conn net.Conn, logger Logger, reduceMemoryUsage bool) { + ctx.c = conn + ctx.logger.logger = logger + ctx.connID = nextConnID() + ctx.s = fakeServer + ctx.connRequestNum = 0 + ctx.connTime = time.Now() + + keepBodyBuffer := !reduceMemoryUsage + ctx.Request.keepBodyBuffer = keepBodyBuffer + ctx.Response.keepBodyBuffer = keepBodyBuffer +} + +// Init prepares ctx for passing to RequestHandler. +// +// remoteAddr and logger are optional. They are used by RequestCtx.Logger(). +// +// This function is intended for custom Server implementations. +func (ctx *RequestCtx) Init(req *Request, remoteAddr net.Addr, logger Logger) { + if remoteAddr == nil { + remoteAddr = zeroTCPAddr + } + c := &fakeAddrer{ + laddr: zeroTCPAddr, + raddr: remoteAddr, + } + if logger == nil { + logger = defaultLogger + } + ctx.Init2(c, logger, true) + req.CopyTo(&ctx.Request) +} + +// Deadline returns the time when work done on behalf of this context +// should be canceled. Deadline returns ok==false when no deadline is +// set. Successive calls to Deadline return the same results. +// +// This method always returns 0, false and is only present to make +// RequestCtx implement the context interface. +func (ctx *RequestCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +// Done returns a channel that's closed when work done on behalf of this +// context should be canceled. Done may return nil if this context can +// never be canceled. Successive calls to Done return the same value. +func (ctx *RequestCtx) Done() <-chan struct{} { + return ctx.s.done +} + +// Err returns a non-nil error value after Done is closed, +// successive calls to Err return the same error. +// If Done is not yet closed, Err returns nil. +// If Done is closed, Err returns a non-nil error explaining why: +// Canceled if the context was canceled (via server Shutdown) +// or DeadlineExceeded if the context's deadline passed. +func (ctx *RequestCtx) Err() error { + select { + case <-ctx.s.done: + return context.Canceled + default: + return nil + } +} + +// Value returns the value associated with this context for key, or nil +// if no value is associated with key. Successive calls to Value with +// the same key returns the same result. +// +// This method is present to make RequestCtx implement the context interface. +// This method is the same as calling ctx.UserValue(key) +func (ctx *RequestCtx) Value(key interface{}) interface{} { + if keyString, ok := key.(string); ok { + return ctx.UserValue(keyString) + } + return nil +} + +var fakeServer = &Server{ + // Initialize concurrencyCh for TimeoutHandler + concurrencyCh: make(chan struct{}, DefaultConcurrency), +} + +type fakeAddrer struct { + net.Conn + laddr net.Addr + raddr net.Addr +} + +func (fa *fakeAddrer) RemoteAddr() net.Addr { + return fa.raddr +} + +func (fa *fakeAddrer) LocalAddr() net.Addr { + return fa.laddr +} + +func (fa *fakeAddrer) Read(p []byte) (int, error) { + panic("BUG: unexpected Read call") +} + +func (fa *fakeAddrer) Write(p []byte) (int, error) { + panic("BUG: unexpected Write call") +} + +func (fa *fakeAddrer) Close() error { + panic("BUG: unexpected Close call") +} + +func (s *Server) releaseCtx(ctx *RequestCtx) { + if ctx.timeoutResponse != nil { + panic("BUG: cannot release timed out RequestCtx") + } + ctx.c = nil + ctx.fbr.c = nil + s.ctxPool.Put(ctx) +} + +func (s *Server) getServerName() []byte { + v := s.serverName.Load() + var serverName []byte + if v == nil { + serverName = []byte(s.Name) + if len(serverName) == 0 { + serverName = defaultServerName + } + s.serverName.Store(serverName) + } else { + serverName = v.([]byte) + } + return serverName +} + +func (s *Server) writeFastError(w io.Writer, statusCode int, msg string) { + w.Write(statusLine(statusCode)) //nolint:errcheck + + server := "" + if !s.NoDefaultServerHeader { + server = fmt.Sprintf("Server: %s\r\n", s.getServerName()) + } + + serverDateOnce.Do(updateServerDate) + + fmt.Fprintf(w, "Connection: close\r\n"+ + server+ + "Date: %s\r\n"+ + "Content-Type: text/plain\r\n"+ + "Content-Length: %d\r\n"+ + "\r\n"+ + "%s", + serverDate.Load(), len(msg), msg) +} + +func defaultErrorHandler(ctx *RequestCtx, err error) { + if _, ok := err.(*ErrSmallBuffer); ok { + ctx.Error("Too big request header", StatusRequestHeaderFieldsTooLarge) + } else if netErr, ok := err.(*net.OpError); ok && netErr.Timeout() { + ctx.Error("Request timeout", StatusRequestTimeout) + } else { + ctx.Error("Error when parsing request", StatusBadRequest) + } +} + +func (s *Server) writeErrorResponse(bw *bufio.Writer, ctx *RequestCtx, serverName []byte, err error) *bufio.Writer { + errorHandler := defaultErrorHandler + if s.ErrorHandler != nil { + errorHandler = s.ErrorHandler + } + + errorHandler(ctx, err) + + if serverName != nil { + ctx.Response.Header.SetServerBytes(serverName) + } + ctx.SetConnectionClose() + if bw == nil { + bw = acquireWriter(ctx) + } + writeResponse(ctx, bw) //nolint:errcheck + bw.Flush() + return bw +} + +// A ConnState represents the state of a client connection to a server. +// It's used by the optional Server.ConnState hook. +type ConnState int + +const ( + // StateNew represents a new connection that is expected to + // send a request immediately. Connections begin at this + // state and then transition to either StateActive or + // StateClosed. + StateNew ConnState = iota + + // StateActive represents a connection that has read 1 or more + // bytes of a request. The Server.ConnState hook for + // StateActive fires before the request has entered a handler + // and doesn't fire again until the request has been + // handled. After the request is handled, the state + // transitions to StateClosed, StateHijacked, or StateIdle. + // For HTTP/2, StateActive fires on the transition from zero + // to one active request, and only transitions away once all + // active requests are complete. That means that ConnState + // cannot be used to do per-request work; ConnState only notes + // the overall state of the connection. + StateActive + + // StateIdle represents a connection that has finished + // handling a request and is in the keep-alive state, waiting + // for a new request. Connections transition from StateIdle + // to either StateActive or StateClosed. + StateIdle + + // StateHijacked represents a hijacked connection. + // This is a terminal state. It does not transition to StateClosed. + StateHijacked + + // StateClosed represents a closed connection. + // This is a terminal state. Hijacked connections do not + // transition to StateClosed. + StateClosed +) + +var stateName = map[ConnState]string{ + StateNew: "new", + StateActive: "active", + StateIdle: "idle", + StateHijacked: "hijacked", + StateClosed: "closed", +} + +func (c ConnState) String() string { + return stateName[c] +} diff --git a/vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.key b/vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.key new file mode 100644 index 000000000..00a79a3b5 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IQusAs8PJdnG +3mURt/AXtgC+ceqLOatJ49JJE1VPTkMAy+oE1f1XvkMrYsHqmDf6GWVzgVXryL4U +wq2/nJSm56ddhN55nI8oSN3dtywUB8/ShelEN73nlN77PeD9tl6NksPwWaKrqxq0 +FlabRPZSQCfmgZbhDV8Sa8mfCkFU0G0lit6kLGceCKMvmW+9Bz7ebsYmVdmVMxmf +IJStFD44lWFTdUc65WISKEdW2ELcUefb0zOLw+0PCbXFGJH5x5ktksW8+BBk2Hkg +GeQRL/qPCccthbScO0VgNj3zJ3ZZL0ObSDAbvNDG85joeNjDNq5DT/BAZ0bOSbEF +sh+f9BAzAgMBAAECggEBAJWv2cq7Jw6MVwSRxYca38xuD6TUNBopgBvjREixURW2 +sNUaLuMb9Omp7fuOaE2N5rcJ+xnjPGIxh/oeN5MQctz9gwn3zf6vY+15h97pUb4D +uGvYPRDaT8YVGS+X9NMZ4ZCmqW2lpWzKnCFoGHcy8yZLbcaxBsRdvKzwOYGoPiFb +K2QuhXZ/1UPmqK9i2DFKtj40X6vBszTNboFxOVpXrPu0FJwLVSDf2hSZ4fMM0DH3 +YqwKcYf5te+hxGKgrqRA3tn0NCWii0in6QIwXMC+kMw1ebg/tZKqyDLMNptAK8J+ +DVw9m5X1seUHS5ehU/g2jrQrtK5WYn7MrFK4lBzlRwECgYEA/d1TeANYECDWRRDk +B0aaRZs87Rwl/J9PsvbsKvtU/bX+OfSOUjOa9iQBqn0LmU8GqusEET/QVUfocVwV +Bggf/5qDLxz100Rj0ags/yE/kNr0Bb31kkkKHFMnCT06YasR7qKllwrAlPJvQv9x +IzBKq+T/Dx08Wep9bCRSFhzRCnsCgYEA+jdeZXTDr/Vz+D2B3nAw1frqYFfGnEVY +wqmoK3VXMDkGuxsloO2rN+SyiUo3JNiQNPDub/t7175GH5pmKtZOlftePANsUjBj +wZ1D0rI5Bxu/71ibIUYIRVmXsTEQkh/ozoh3jXCZ9+bLgYiYx7789IUZZSokFQ3D +FICUT9KJ36kCgYAGoq9Y1rWJjmIrYfqj2guUQC+CfxbbGIrrwZqAsRsSmpwvhZ3m +tiSZxG0quKQB+NfSxdvQW5ulbwC7Xc3K35F+i9pb8+TVBdeaFkw+yu6vaZmxQLrX +fQM/pEjD7A7HmMIaO7QaU5SfEAsqdCTP56Y8AftMuNXn/8IRfo2KuGwaWwKBgFpU +ILzJoVdlad9E/Rw7LjYhZfkv1uBVXIyxyKcfrkEXZSmozDXDdxsvcZCEfVHM6Ipk +K/+7LuMcqp4AFEAEq8wTOdq6daFaHLkpt/FZK6M4TlruhtpFOPkoNc3e45eM83OT +6mziKINJC1CQ6m65sQHpBtjxlKMRG8rL/D6wx9s5AoGBAMRlqNPMwglT3hvDmsAt +9Lf9pdmhERUlHhD8bj8mDaBj2Aqv7f6VRJaYZqP403pKKQexuqcn80mtjkSAPFkN +Cj7BVt/RXm5uoxDTnfi26RF9F6yNDEJ7UU9+peBr99aazF/fTgW/1GcMkQnum8uV +c257YgaWmjK9uB0Y2r2VxS0G +-----END PRIVATE KEY----- diff --git a/vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.pem b/vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.pem new file mode 100644 index 000000000..93e77cd95 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/ssl-cert-snakeoil.pem @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICujCCAaKgAwIBAgIJAMbXnKZ/cikUMA0GCSqGSIb3DQEBCwUAMBUxEzARBgNV +BAMTCnVidW50dS5uYW4wHhcNMTUwMjA0MDgwMTM5WhcNMjUwMjAxMDgwMTM5WjAV +MRMwEQYDVQQDEwp1YnVudHUubmFuMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB +CgKCAQEA+CELrALPDyXZxt5lEbfwF7YAvnHqizmrSePSSRNVT05DAMvqBNX9V75D +K2LB6pg3+hllc4FV68i+FMKtv5yUpuenXYTeeZyPKEjd3bcsFAfP0oXpRDe955Te ++z3g/bZejZLD8Fmiq6satBZWm0T2UkAn5oGW4Q1fEmvJnwpBVNBtJYrepCxnHgij +L5lvvQc+3m7GJlXZlTMZnyCUrRQ+OJVhU3VHOuViEihHVthC3FHn29Mzi8PtDwm1 +xRiR+ceZLZLFvPgQZNh5IBnkES/6jwnHLYW0nDtFYDY98yd2WS9Dm0gwG7zQxvOY +6HjYwzauQ0/wQGdGzkmxBbIfn/QQMwIDAQABow0wCzAJBgNVHRMEAjAAMA0GCSqG +SIb3DQEBCwUAA4IBAQBQjKm/4KN/iTgXbLTL3i7zaxYXFLXsnT1tF+ay4VA8aj98 +L3JwRTciZ3A5iy/W4VSCt3eASwOaPWHKqDBB5RTtL73LoAqsWmO3APOGQAbixcQ2 +45GXi05OKeyiYRi1Nvq7Unv9jUkRDHUYVPZVSAjCpsXzPhFkmZoTRxmx5l0ZF7Li +K91lI5h+eFq0dwZwrmlPambyh1vQUi70VHv8DNToVU29kel7YLbxGbuqETfhrcy6 +X+Mha6RYITkAn5FqsZcKMsc9eYGEF4l3XV+oS7q6xfTxktYJMFTI18J0lQ2Lv/CI +whdMnYGntDQBE/iFCrJEGNsKGc38796GBOb5j+zd +-----END CERTIFICATE----- diff --git a/vendor/github.com/valyala/fasthttp/stackless/doc.go b/vendor/github.com/valyala/fasthttp/stackless/doc.go new file mode 100644 index 000000000..8c0cc497c --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/stackless/doc.go @@ -0,0 +1,3 @@ +// Package stackless provides functionality that may save stack space +// for high number of concurrently running goroutines. +package stackless diff --git a/vendor/github.com/valyala/fasthttp/stackless/func.go b/vendor/github.com/valyala/fasthttp/stackless/func.go new file mode 100644 index 000000000..9a49bcc26 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/stackless/func.go @@ -0,0 +1,79 @@ +package stackless + +import ( + "runtime" + "sync" +) + +// NewFunc returns stackless wrapper for the function f. +// +// Unlike f, the returned stackless wrapper doesn't use stack space +// on the goroutine that calls it. +// The wrapper may save a lot of stack space if the following conditions +// are met: +// +// - f doesn't contain blocking calls on network, I/O or channels; +// - f uses a lot of stack space; +// - the wrapper is called from high number of concurrent goroutines. +// +// The stackless wrapper returns false if the call cannot be processed +// at the moment due to high load. +func NewFunc(f func(ctx interface{})) func(ctx interface{}) bool { + if f == nil { + panic("BUG: f cannot be nil") + } + + funcWorkCh := make(chan *funcWork, runtime.GOMAXPROCS(-1)*2048) + onceInit := func() { + n := runtime.GOMAXPROCS(-1) + for i := 0; i < n; i++ { + go funcWorker(funcWorkCh, f) + } + } + var once sync.Once + + return func(ctx interface{}) bool { + once.Do(onceInit) + fw := getFuncWork() + fw.ctx = ctx + + select { + case funcWorkCh <- fw: + default: + putFuncWork(fw) + return false + } + <-fw.done + putFuncWork(fw) + return true + } +} + +func funcWorker(funcWorkCh <-chan *funcWork, f func(ctx interface{})) { + for fw := range funcWorkCh { + f(fw.ctx) + fw.done <- struct{}{} + } +} + +func getFuncWork() *funcWork { + v := funcWorkPool.Get() + if v == nil { + v = &funcWork{ + done: make(chan struct{}, 1), + } + } + return v.(*funcWork) +} + +func putFuncWork(fw *funcWork) { + fw.ctx = nil + funcWorkPool.Put(fw) +} + +var funcWorkPool sync.Pool + +type funcWork struct { + ctx interface{} + done chan struct{} +} diff --git a/vendor/github.com/valyala/fasthttp/stackless/writer.go b/vendor/github.com/valyala/fasthttp/stackless/writer.go new file mode 100644 index 000000000..b0d3e8dd9 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/stackless/writer.go @@ -0,0 +1,138 @@ +package stackless + +import ( + "errors" + "fmt" + "io" + + "github.com/valyala/bytebufferpool" +) + +// Writer is an interface stackless writer must conform to. +// +// The interface contains common subset for Writers from compress/* packages. +type Writer interface { + Write(p []byte) (int, error) + Flush() error + Close() error + Reset(w io.Writer) +} + +// NewWriterFunc must return new writer that will be wrapped into +// stackless writer. +type NewWriterFunc func(w io.Writer) Writer + +// NewWriter creates a stackless writer around a writer returned +// from newWriter. +// +// The returned writer writes data to dstW. +// +// Writers that use a lot of stack space may be wrapped into stackless writer, +// thus saving stack space for high number of concurrently running goroutines. +func NewWriter(dstW io.Writer, newWriter NewWriterFunc) Writer { + w := &writer{ + dstW: dstW, + } + w.zw = newWriter(&w.xw) + return w +} + +type writer struct { + dstW io.Writer + zw Writer + xw xWriter + + err error + n int + + p []byte + op op +} + +type op int + +const ( + opWrite op = iota + opFlush + opClose + opReset +) + +func (w *writer) Write(p []byte) (int, error) { + w.p = p + err := w.do(opWrite) + w.p = nil + return w.n, err +} + +func (w *writer) Flush() error { + return w.do(opFlush) +} + +func (w *writer) Close() error { + return w.do(opClose) +} + +func (w *writer) Reset(dstW io.Writer) { + w.xw.Reset() + w.do(opReset) //nolint:errcheck + w.dstW = dstW +} + +func (w *writer) do(op op) error { + w.op = op + if !stacklessWriterFunc(w) { + return errHighLoad + } + err := w.err + if err != nil { + return err + } + if w.xw.bb != nil && len(w.xw.bb.B) > 0 { + _, err = w.dstW.Write(w.xw.bb.B) + } + w.xw.Reset() + + return err +} + +var errHighLoad = errors.New("cannot compress data due to high load") + +var stacklessWriterFunc = NewFunc(writerFunc) + +func writerFunc(ctx interface{}) { + w := ctx.(*writer) + switch w.op { + case opWrite: + w.n, w.err = w.zw.Write(w.p) + case opFlush: + w.err = w.zw.Flush() + case opClose: + w.err = w.zw.Close() + case opReset: + w.zw.Reset(&w.xw) + w.err = nil + default: + panic(fmt.Sprintf("BUG: unexpected op: %d", w.op)) + } +} + +type xWriter struct { + bb *bytebufferpool.ByteBuffer +} + +func (w *xWriter) Write(p []byte) (int, error) { + if w.bb == nil { + w.bb = bufferPool.Get() + } + return w.bb.Write(p) +} + +func (w *xWriter) Reset() { + if w.bb != nil { + bufferPool.Put(w.bb) + w.bb = nil + } +} + +var bufferPool bytebufferpool.Pool diff --git a/vendor/github.com/valyala/fasthttp/status.go b/vendor/github.com/valyala/fasthttp/status.go new file mode 100644 index 000000000..6687efb42 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/status.go @@ -0,0 +1,176 @@ +package fasthttp + +import ( + "fmt" + "sync/atomic" +) + +// HTTP status codes were stolen from net/http. +const ( + StatusContinue = 100 // RFC 7231, 6.2.1 + StatusSwitchingProtocols = 101 // RFC 7231, 6.2.2 + StatusProcessing = 102 // RFC 2518, 10.1 + + StatusOK = 200 // RFC 7231, 6.3.1 + StatusCreated = 201 // RFC 7231, 6.3.2 + StatusAccepted = 202 // RFC 7231, 6.3.3 + StatusNonAuthoritativeInfo = 203 // RFC 7231, 6.3.4 + StatusNoContent = 204 // RFC 7231, 6.3.5 + StatusResetContent = 205 // RFC 7231, 6.3.6 + StatusPartialContent = 206 // RFC 7233, 4.1 + StatusMultiStatus = 207 // RFC 4918, 11.1 + StatusAlreadyReported = 208 // RFC 5842, 7.1 + StatusIMUsed = 226 // RFC 3229, 10.4.1 + + StatusMultipleChoices = 300 // RFC 7231, 6.4.1 + StatusMovedPermanently = 301 // RFC 7231, 6.4.2 + StatusFound = 302 // RFC 7231, 6.4.3 + StatusSeeOther = 303 // RFC 7231, 6.4.4 + StatusNotModified = 304 // RFC 7232, 4.1 + StatusUseProxy = 305 // RFC 7231, 6.4.5 + _ = 306 // RFC 7231, 6.4.6 (Unused) + StatusTemporaryRedirect = 307 // RFC 7231, 6.4.7 + StatusPermanentRedirect = 308 // RFC 7538, 3 + + StatusBadRequest = 400 // RFC 7231, 6.5.1 + StatusUnauthorized = 401 // RFC 7235, 3.1 + StatusPaymentRequired = 402 // RFC 7231, 6.5.2 + StatusForbidden = 403 // RFC 7231, 6.5.3 + StatusNotFound = 404 // RFC 7231, 6.5.4 + StatusMethodNotAllowed = 405 // RFC 7231, 6.5.5 + StatusNotAcceptable = 406 // RFC 7231, 6.5.6 + StatusProxyAuthRequired = 407 // RFC 7235, 3.2 + StatusRequestTimeout = 408 // RFC 7231, 6.5.7 + StatusConflict = 409 // RFC 7231, 6.5.8 + StatusGone = 410 // RFC 7231, 6.5.9 + StatusLengthRequired = 411 // RFC 7231, 6.5.10 + StatusPreconditionFailed = 412 // RFC 7232, 4.2 + StatusRequestEntityTooLarge = 413 // RFC 7231, 6.5.11 + StatusRequestURITooLong = 414 // RFC 7231, 6.5.12 + StatusUnsupportedMediaType = 415 // RFC 7231, 6.5.13 + StatusRequestedRangeNotSatisfiable = 416 // RFC 7233, 4.4 + StatusExpectationFailed = 417 // RFC 7231, 6.5.14 + StatusTeapot = 418 // RFC 7168, 2.3.3 + StatusUnprocessableEntity = 422 // RFC 4918, 11.2 + StatusLocked = 423 // RFC 4918, 11.3 + StatusFailedDependency = 424 // RFC 4918, 11.4 + StatusUpgradeRequired = 426 // RFC 7231, 6.5.15 + StatusPreconditionRequired = 428 // RFC 6585, 3 + StatusTooManyRequests = 429 // RFC 6585, 4 + StatusRequestHeaderFieldsTooLarge = 431 // RFC 6585, 5 + StatusUnavailableForLegalReasons = 451 // RFC 7725, 3 + + StatusInternalServerError = 500 // RFC 7231, 6.6.1 + StatusNotImplemented = 501 // RFC 7231, 6.6.2 + StatusBadGateway = 502 // RFC 7231, 6.6.3 + StatusServiceUnavailable = 503 // RFC 7231, 6.6.4 + StatusGatewayTimeout = 504 // RFC 7231, 6.6.5 + StatusHTTPVersionNotSupported = 505 // RFC 7231, 6.6.6 + StatusVariantAlsoNegotiates = 506 // RFC 2295, 8.1 + StatusInsufficientStorage = 507 // RFC 4918, 11.5 + StatusLoopDetected = 508 // RFC 5842, 7.2 + StatusNotExtended = 510 // RFC 2774, 7 + StatusNetworkAuthenticationRequired = 511 // RFC 6585, 6 +) + +var ( + statusLines atomic.Value + + statusMessages = map[int]string{ + StatusContinue: "Continue", + StatusSwitchingProtocols: "Switching Protocols", + StatusProcessing: "Processing", + + StatusOK: "OK", + StatusCreated: "Created", + StatusAccepted: "Accepted", + StatusNonAuthoritativeInfo: "Non-Authoritative Information", + StatusNoContent: "No Content", + StatusResetContent: "Reset Content", + StatusPartialContent: "Partial Content", + StatusMultiStatus: "Multi-Status", + StatusAlreadyReported: "Already Reported", + StatusIMUsed: "IM Used", + + StatusMultipleChoices: "Multiple Choices", + StatusMovedPermanently: "Moved Permanently", + StatusFound: "Found", + StatusSeeOther: "See Other", + StatusNotModified: "Not Modified", + StatusUseProxy: "Use Proxy", + StatusTemporaryRedirect: "Temporary Redirect", + StatusPermanentRedirect: "Permanent Redirect", + + StatusBadRequest: "Bad Request", + StatusUnauthorized: "Unauthorized", + StatusPaymentRequired: "Payment Required", + StatusForbidden: "Forbidden", + StatusNotFound: "Not Found", + StatusMethodNotAllowed: "Method Not Allowed", + StatusNotAcceptable: "Not Acceptable", + StatusProxyAuthRequired: "Proxy Authentication Required", + StatusRequestTimeout: "Request Timeout", + StatusConflict: "Conflict", + StatusGone: "Gone", + StatusLengthRequired: "Length Required", + StatusPreconditionFailed: "Precondition Failed", + StatusRequestEntityTooLarge: "Request Entity Too Large", + StatusRequestURITooLong: "Request URI Too Long", + StatusUnsupportedMediaType: "Unsupported Media Type", + StatusRequestedRangeNotSatisfiable: "Requested Range Not Satisfiable", + StatusExpectationFailed: "Expectation Failed", + StatusTeapot: "I'm a teapot", + StatusUnprocessableEntity: "Unprocessable Entity", + StatusLocked: "Locked", + StatusFailedDependency: "Failed Dependency", + StatusUpgradeRequired: "Upgrade Required", + StatusPreconditionRequired: "Precondition Required", + StatusTooManyRequests: "Too Many Requests", + StatusRequestHeaderFieldsTooLarge: "Request Header Fields Too Large", + StatusUnavailableForLegalReasons: "Unavailable For Legal Reasons", + + StatusInternalServerError: "Internal Server Error", + StatusNotImplemented: "Not Implemented", + StatusBadGateway: "Bad Gateway", + StatusServiceUnavailable: "Service Unavailable", + StatusGatewayTimeout: "Gateway Timeout", + StatusHTTPVersionNotSupported: "HTTP Version Not Supported", + StatusVariantAlsoNegotiates: "Variant Also Negotiates", + StatusInsufficientStorage: "Insufficient Storage", + StatusLoopDetected: "Loop Detected", + StatusNotExtended: "Not Extended", + StatusNetworkAuthenticationRequired: "Network Authentication Required", + } +) + +// StatusMessage returns HTTP status message for the given status code. +func StatusMessage(statusCode int) string { + s := statusMessages[statusCode] + if s == "" { + s = "Unknown Status Code" + } + return s +} + +func init() { + statusLines.Store(make(map[int][]byte)) +} + +func statusLine(statusCode int) []byte { + m := statusLines.Load().(map[int][]byte) + h := m[statusCode] + if h != nil { + return h + } + + statusText := StatusMessage(statusCode) + + h = []byte(fmt.Sprintf("HTTP/1.1 %d %s\r\n", statusCode, statusText)) + newM := make(map[int][]byte, len(m)+1) + for k, v := range m { + newM[k] = v + } + newM[statusCode] = h + statusLines.Store(newM) + return h +} diff --git a/vendor/github.com/valyala/fasthttp/stream.go b/vendor/github.com/valyala/fasthttp/stream.go new file mode 100644 index 000000000..aa23b1af7 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/stream.go @@ -0,0 +1,54 @@ +package fasthttp + +import ( + "bufio" + "io" + "sync" + + "github.com/valyala/fasthttp/fasthttputil" +) + +// StreamWriter must write data to w. +// +// Usually StreamWriter writes data to w in a loop (aka 'data streaming'). +// +// StreamWriter must return immediately if w returns error. +// +// Since the written data is buffered, do not forget calling w.Flush +// when the data must be propagated to reader. +type StreamWriter func(w *bufio.Writer) + +// NewStreamReader returns a reader, which replays all the data generated by sw. +// +// The returned reader may be passed to Response.SetBodyStream. +// +// Close must be called on the returned reader after all the required data +// has been read. Otherwise goroutine leak may occur. +// +// See also Response.SetBodyStreamWriter. +func NewStreamReader(sw StreamWriter) io.ReadCloser { + pc := fasthttputil.NewPipeConns() + pw := pc.Conn1() + pr := pc.Conn2() + + var bw *bufio.Writer + v := streamWriterBufPool.Get() + if v == nil { + bw = bufio.NewWriter(pw) + } else { + bw = v.(*bufio.Writer) + bw.Reset(pw) + } + + go func() { + sw(bw) + bw.Flush() + pw.Close() + + streamWriterBufPool.Put(bw) + }() + + return pr +} + +var streamWriterBufPool sync.Pool diff --git a/vendor/github.com/valyala/fasthttp/strings.go b/vendor/github.com/valyala/fasthttp/strings.go new file mode 100644 index 000000000..12f192630 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/strings.go @@ -0,0 +1,85 @@ +package fasthttp + +var ( + defaultServerName = []byte("fasthttp") + defaultUserAgent = []byte("fasthttp") + defaultContentType = []byte("text/plain; charset=utf-8") +) + +var ( + strSlash = []byte("/") + strSlashSlash = []byte("//") + strSlashDotDot = []byte("/..") + strSlashDotSlash = []byte("/./") + strSlashDotDotSlash = []byte("/../") + strCRLF = []byte("\r\n") + strHTTP = []byte("http") + strHTTPS = []byte("https") + strHTTP11 = []byte("HTTP/1.1") + strColon = []byte(":") + strColonSlashSlash = []byte("://") + strColonSpace = []byte(": ") + strGMT = []byte("GMT") + strAt = []byte("@") + + strResponseContinue = []byte("HTTP/1.1 100 Continue\r\n\r\n") + + strGet = []byte(MethodGet) + strHead = []byte(MethodHead) + strPost = []byte(MethodPost) + strPut = []byte(MethodPut) + strDelete = []byte(MethodDelete) + strConnect = []byte(MethodConnect) + strOptions = []byte(MethodOptions) + strTrace = []byte(MethodTrace) + strPatch = []byte(MethodPatch) + + strExpect = []byte(HeaderExpect) + strConnection = []byte(HeaderConnection) + strContentLength = []byte(HeaderContentLength) + strContentType = []byte(HeaderContentType) + strDate = []byte(HeaderDate) + strHost = []byte(HeaderHost) + strReferer = []byte(HeaderReferer) + strServer = []byte(HeaderServer) + strTransferEncoding = []byte(HeaderTransferEncoding) + strContentEncoding = []byte(HeaderContentEncoding) + strAcceptEncoding = []byte(HeaderAcceptEncoding) + strUserAgent = []byte(HeaderUserAgent) + strCookie = []byte(HeaderCookie) + strSetCookie = []byte(HeaderSetCookie) + strLocation = []byte(HeaderLocation) + strIfModifiedSince = []byte(HeaderIfModifiedSince) + strLastModified = []byte(HeaderLastModified) + strAcceptRanges = []byte(HeaderAcceptRanges) + strRange = []byte(HeaderRange) + strContentRange = []byte(HeaderContentRange) + strAuthorization = []byte(HeaderAuthorization) + + strCookieExpires = []byte("expires") + strCookieDomain = []byte("domain") + strCookiePath = []byte("path") + strCookieHTTPOnly = []byte("HttpOnly") + strCookieSecure = []byte("secure") + strCookieMaxAge = []byte("max-age") + strCookieSameSite = []byte("SameSite") + strCookieSameSiteLax = []byte("Lax") + strCookieSameSiteStrict = []byte("Strict") + strCookieSameSiteNone = []byte("None") + + strClose = []byte("close") + strGzip = []byte("gzip") + strDeflate = []byte("deflate") + strKeepAlive = []byte("keep-alive") + strUpgrade = []byte("Upgrade") + strChunked = []byte("chunked") + strIdentity = []byte("identity") + str100Continue = []byte("100-continue") + strPostArgsContentType = []byte("application/x-www-form-urlencoded") + strMultipartFormData = []byte("multipart/form-data") + strBoundary = []byte("boundary") + strBytes = []byte("bytes") + strTextSlash = []byte("text/") + strApplicationSlash = []byte("application/") + strBasicSpace = []byte("Basic ") +) diff --git a/vendor/github.com/valyala/fasthttp/tcpdialer.go b/vendor/github.com/valyala/fasthttp/tcpdialer.go new file mode 100644 index 000000000..8e432cf0c --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/tcpdialer.go @@ -0,0 +1,473 @@ +package fasthttp + +import ( + "context" + "errors" + "net" + "strconv" + "sync" + "sync/atomic" + "time" +) + +// Dial dials the given TCP addr using tcp4. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// * It returns ErrDialTimeout if connection cannot be established during +// DefaultDialTimeout seconds. Use DialTimeout for customizing dial timeout. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func Dial(addr string) (net.Conn, error) { + return defaultDialer.Dial(addr) +} + +// DialTimeout dials the given TCP addr using tcp4 using the given timeout. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func DialTimeout(addr string, timeout time.Duration) (net.Conn, error) { + return defaultDialer.DialTimeout(addr, timeout) +} + +// DialDualStack dials the given TCP addr using both tcp4 and tcp6. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// * It returns ErrDialTimeout if connection cannot be established during +// DefaultDialTimeout seconds. Use DialDualStackTimeout for custom dial +// timeout. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func DialDualStack(addr string) (net.Conn, error) { + return defaultDialer.DialDualStack(addr) +} + +// DialDualStackTimeout dials the given TCP addr using both tcp4 and tcp6 +// using the given timeout. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func DialDualStackTimeout(addr string, timeout time.Duration) (net.Conn, error) { + return defaultDialer.DialDualStackTimeout(addr, timeout) +} + +var ( + defaultDialer = &TCPDialer{Concurrency: 1000} +) + +// Resolver represents interface of the tcp resolver. +type Resolver interface { + LookupIPAddr(context.Context, string) (names []net.IPAddr, err error) +} + +// TCPDialer contains options to control a group of Dial calls. +type TCPDialer struct { + // Concurrency controls the maximum number of concurrent Dails + // that can be performed using this object. + // Setting this to 0 means unlimited. + // + // WARNING: This can only be changed before the first Dial. + // Changes made after the first Dial will not affect anything. + Concurrency int + + // This may be used to override DNS resolving policy, like this: + // var dialer = &fasthttp.TCPDialer{ + // Resolver: &net.Resolver{ + // PreferGo: true, + // StrictErrors: false, + // Dial: func (ctx context.Context, network, address string) (net.Conn, error) { + // d := net.Dialer{} + // return d.DialContext(ctx, "udp", "8.8.8.8:53") + // }, + // }, + // } + Resolver Resolver + + tcpAddrsLock sync.Mutex + tcpAddrsMap map[string]*tcpAddrEntry + + concurrencyCh chan struct{} + + once sync.Once +} + +// Dial dials the given TCP addr using tcp4. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// * It returns ErrDialTimeout if connection cannot be established during +// DefaultDialTimeout seconds. Use DialTimeout for customizing dial timeout. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func (d *TCPDialer) Dial(addr string) (net.Conn, error) { + return d.dial(addr, false, DefaultDialTimeout) +} + +// DialTimeout dials the given TCP addr using tcp4 using the given timeout. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func (d *TCPDialer) DialTimeout(addr string, timeout time.Duration) (net.Conn, error) { + return d.dial(addr, false, timeout) +} + +// DialDualStack dials the given TCP addr using both tcp4 and tcp6. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// * It returns ErrDialTimeout if connection cannot be established during +// DefaultDialTimeout seconds. Use DialDualStackTimeout for custom dial +// timeout. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func (d *TCPDialer) DialDualStack(addr string) (net.Conn, error) { + return d.dial(addr, true, DefaultDialTimeout) +} + +// DialDualStackTimeout dials the given TCP addr using both tcp4 and tcp6 +// using the given timeout. +// +// This function has the following additional features comparing to net.Dial: +// +// * It reduces load on DNS resolver by caching resolved TCP addressed +// for DefaultDNSCacheDuration. +// * It dials all the resolved TCP addresses in round-robin manner until +// connection is established. This may be useful if certain addresses +// are temporarily unreachable. +// +// This dialer is intended for custom code wrapping before passing +// to Client.Dial or HostClient.Dial. +// +// For instance, per-host counters and/or limits may be implemented +// by such wrappers. +// +// The addr passed to the function must contain port. Example addr values: +// +// * foobar.baz:443 +// * foo.bar:80 +// * aaa.com:8080 +func (d *TCPDialer) DialDualStackTimeout(addr string, timeout time.Duration) (net.Conn, error) { + return d.dial(addr, true, timeout) +} + +func (d *TCPDialer) dial(addr string, dualStack bool, timeout time.Duration) (net.Conn, error) { + d.once.Do(func() { + if d.Concurrency > 0 { + d.concurrencyCh = make(chan struct{}, d.Concurrency) + } + d.tcpAddrsMap = make(map[string]*tcpAddrEntry) + go d.tcpAddrsClean() + }) + + addrs, idx, err := d.getTCPAddrs(addr, dualStack) + if err != nil { + return nil, err + } + network := "tcp4" + if dualStack { + network = "tcp" + } + + var conn net.Conn + n := uint32(len(addrs)) + deadline := time.Now().Add(timeout) + for n > 0 { + conn, err = tryDial(network, &addrs[idx%n], deadline, d.concurrencyCh) + if err == nil { + return conn, nil + } + if err == ErrDialTimeout { + return nil, err + } + idx++ + n-- + } + return nil, err +} + +func tryDial(network string, addr *net.TCPAddr, deadline time.Time, concurrencyCh chan struct{}) (net.Conn, error) { + timeout := -time.Since(deadline) + if timeout <= 0 { + return nil, ErrDialTimeout + } + + if concurrencyCh != nil { + select { + case concurrencyCh <- struct{}{}: + default: + tc := AcquireTimer(timeout) + isTimeout := false + select { + case concurrencyCh <- struct{}{}: + case <-tc.C: + isTimeout = true + } + ReleaseTimer(tc) + if isTimeout { + return nil, ErrDialTimeout + } + } + } + + chv := dialResultChanPool.Get() + if chv == nil { + chv = make(chan dialResult, 1) + } + ch := chv.(chan dialResult) + go func() { + var dr dialResult + dr.conn, dr.err = net.DialTCP(network, nil, addr) + ch <- dr + if concurrencyCh != nil { + <-concurrencyCh + } + }() + + var ( + conn net.Conn + err error + ) + + tc := AcquireTimer(timeout) + select { + case dr := <-ch: + conn = dr.conn + err = dr.err + dialResultChanPool.Put(ch) + case <-tc.C: + err = ErrDialTimeout + } + ReleaseTimer(tc) + + return conn, err +} + +var dialResultChanPool sync.Pool + +type dialResult struct { + conn net.Conn + err error +} + +// ErrDialTimeout is returned when TCP dialing is timed out. +var ErrDialTimeout = errors.New("dialing to the given TCP address timed out") + +// DefaultDialTimeout is timeout used by Dial and DialDualStack +// for establishing TCP connections. +const DefaultDialTimeout = 3 * time.Second + +type tcpAddrEntry struct { + addrs []net.TCPAddr + addrsIdx uint32 + + resolveTime time.Time + pending bool +} + +// DefaultDNSCacheDuration is the duration for caching resolved TCP addresses +// by Dial* functions. +const DefaultDNSCacheDuration = time.Minute + +func (d *TCPDialer) tcpAddrsClean() { + expireDuration := 2 * DefaultDNSCacheDuration + for { + time.Sleep(time.Second) + t := time.Now() + + d.tcpAddrsLock.Lock() + for k, e := range d.tcpAddrsMap { + if t.Sub(e.resolveTime) > expireDuration { + delete(d.tcpAddrsMap, k) + } + } + d.tcpAddrsLock.Unlock() + } +} + +func (d *TCPDialer) getTCPAddrs(addr string, dualStack bool) ([]net.TCPAddr, uint32, error) { + d.tcpAddrsLock.Lock() + e := d.tcpAddrsMap[addr] + if e != nil && !e.pending && time.Since(e.resolveTime) > DefaultDNSCacheDuration { + e.pending = true + e = nil + } + d.tcpAddrsLock.Unlock() + + if e == nil { + addrs, err := resolveTCPAddrs(addr, dualStack, d.Resolver) + if err != nil { + d.tcpAddrsLock.Lock() + e = d.tcpAddrsMap[addr] + if e != nil && e.pending { + e.pending = false + } + d.tcpAddrsLock.Unlock() + return nil, 0, err + } + + e = &tcpAddrEntry{ + addrs: addrs, + resolveTime: time.Now(), + } + + d.tcpAddrsLock.Lock() + d.tcpAddrsMap[addr] = e + d.tcpAddrsLock.Unlock() + } + + idx := atomic.AddUint32(&e.addrsIdx, 1) + return e.addrs, idx, nil +} + +func resolveTCPAddrs(addr string, dualStack bool, resolver Resolver) ([]net.TCPAddr, error) { + host, portS, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + port, err := strconv.Atoi(portS) + if err != nil { + return nil, err + } + + if resolver == nil { + resolver = net.DefaultResolver + } + + ctx := context.Background() + ipaddrs, err := resolver.LookupIPAddr(ctx, host) + if err != nil { + return nil, err + } + + n := len(ipaddrs) + addrs := make([]net.TCPAddr, 0, n) + for i := 0; i < n; i++ { + ip := ipaddrs[i] + if !dualStack && ip.IP.To4() == nil { + continue + } + addrs = append(addrs, net.TCPAddr{ + IP: ip.IP, + Port: port, + Zone: ip.Zone, + }) + } + if len(addrs) == 0 { + return nil, errNoDNSEntries + } + return addrs, nil +} + +var errNoDNSEntries = errors.New("couldn't find DNS entries for the given domain. Try using DialDualStack") diff --git a/vendor/github.com/valyala/fasthttp/timer.go b/vendor/github.com/valyala/fasthttp/timer.go new file mode 100644 index 000000000..4e919384e --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/timer.go @@ -0,0 +1,54 @@ +package fasthttp + +import ( + "sync" + "time" +) + +func initTimer(t *time.Timer, timeout time.Duration) *time.Timer { + if t == nil { + return time.NewTimer(timeout) + } + if t.Reset(timeout) { + panic("BUG: active timer trapped into initTimer()") + } + return t +} + +func stopTimer(t *time.Timer) { + if !t.Stop() { + // Collect possibly added time from the channel + // if timer has been stopped and nobody collected its' value. + select { + case <-t.C: + default: + } + } +} + +// AcquireTimer returns a time.Timer from the pool and updates it to +// send the current time on its channel after at least timeout. +// +// The returned Timer may be returned to the pool with ReleaseTimer +// when no longer needed. This allows reducing GC load. +func AcquireTimer(timeout time.Duration) *time.Timer { + v := timerPool.Get() + if v == nil { + return time.NewTimer(timeout) + } + t := v.(*time.Timer) + initTimer(t, timeout) + return t +} + +// ReleaseTimer returns the time.Timer acquired via AcquireTimer to the pool +// and prevents the Timer from firing. +// +// Do not access the released time.Timer or read from it's channel otherwise +// data races may occur. +func ReleaseTimer(t *time.Timer) { + stopTimer(t) + timerPool.Put(t) +} + +var timerPool sync.Pool diff --git a/vendor/github.com/valyala/fasthttp/uri.go b/vendor/github.com/valyala/fasthttp/uri.go new file mode 100644 index 000000000..80e5bf3b6 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/uri.go @@ -0,0 +1,582 @@ +package fasthttp + +import ( + "bytes" + "io" + "sync" +) + +// AcquireURI returns an empty URI instance from the pool. +// +// Release the URI with ReleaseURI after the URI is no longer needed. +// This allows reducing GC load. +func AcquireURI() *URI { + return uriPool.Get().(*URI) +} + +// ReleaseURI releases the URI acquired via AcquireURI. +// +// The released URI mustn't be used after releasing it, otherwise data races +// may occur. +func ReleaseURI(u *URI) { + u.Reset() + uriPool.Put(u) +} + +var uriPool = &sync.Pool{ + New: func() interface{} { + return &URI{} + }, +} + +// URI represents URI :) . +// +// It is forbidden copying URI instances. Create new instance and use CopyTo +// instead. +// +// URI instance MUST NOT be used from concurrently running goroutines. +type URI struct { + noCopy noCopy //nolint:unused,structcheck + + pathOriginal []byte + scheme []byte + path []byte + queryString []byte + hash []byte + host []byte + + queryArgs Args + parsedQueryArgs bool + + // Path values are sent as-is without normalization + // + // Disabled path normalization may be useful for proxying incoming requests + // to servers that are expecting paths to be forwarded as-is. + // + // By default path values are normalized, i.e. + // extra slashes are removed, special characters are encoded. + DisablePathNormalizing bool + + fullURI []byte + requestURI []byte + + username []byte + password []byte +} + +// CopyTo copies uri contents to dst. +func (u *URI) CopyTo(dst *URI) { + dst.Reset() + dst.pathOriginal = append(dst.pathOriginal[:0], u.pathOriginal...) + dst.scheme = append(dst.scheme[:0], u.scheme...) + dst.path = append(dst.path[:0], u.path...) + dst.queryString = append(dst.queryString[:0], u.queryString...) + dst.hash = append(dst.hash[:0], u.hash...) + dst.host = append(dst.host[:0], u.host...) + dst.username = append(dst.username[:0], u.username...) + dst.password = append(dst.password[:0], u.password...) + + u.queryArgs.CopyTo(&dst.queryArgs) + dst.parsedQueryArgs = u.parsedQueryArgs + dst.DisablePathNormalizing = u.DisablePathNormalizing + + // fullURI and requestURI shouldn't be copied, since they are created + // from scratch on each FullURI() and RequestURI() call. +} + +// Hash returns URI hash, i.e. qwe of http://aaa.com/foo/bar?baz=123#qwe . +// +// The returned value is valid until the next URI method call. +func (u *URI) Hash() []byte { + return u.hash +} + +// SetHash sets URI hash. +func (u *URI) SetHash(hash string) { + u.hash = append(u.hash[:0], hash...) +} + +// SetHashBytes sets URI hash. +func (u *URI) SetHashBytes(hash []byte) { + u.hash = append(u.hash[:0], hash...) +} + +// Username returns URI username +func (u *URI) Username() []byte { + return u.username +} + +// SetUsername sets URI username. +func (u *URI) SetUsername(username string) { + u.username = append(u.username[:0], username...) +} + +// SetUsernameBytes sets URI username. +func (u *URI) SetUsernameBytes(username []byte) { + u.username = append(u.username[:0], username...) +} + +// Password returns URI password +func (u *URI) Password() []byte { + return u.password +} + +// SetPassword sets URI password. +func (u *URI) SetPassword(password string) { + u.password = append(u.password[:0], password...) +} + +// SetPasswordBytes sets URI password. +func (u *URI) SetPasswordBytes(password []byte) { + u.password = append(u.password[:0], password...) +} + +// QueryString returns URI query string, +// i.e. baz=123 of http://aaa.com/foo/bar?baz=123#qwe . +// +// The returned value is valid until the next URI method call. +func (u *URI) QueryString() []byte { + return u.queryString +} + +// SetQueryString sets URI query string. +func (u *URI) SetQueryString(queryString string) { + u.queryString = append(u.queryString[:0], queryString...) + u.parsedQueryArgs = false +} + +// SetQueryStringBytes sets URI query string. +func (u *URI) SetQueryStringBytes(queryString []byte) { + u.queryString = append(u.queryString[:0], queryString...) + u.parsedQueryArgs = false +} + +// Path returns URI path, i.e. /foo/bar of http://aaa.com/foo/bar?baz=123#qwe . +// +// The returned path is always urldecoded and normalized, +// i.e. '//f%20obar/baz/../zzz' becomes '/f obar/zzz'. +// +// The returned value is valid until the next URI method call. +func (u *URI) Path() []byte { + path := u.path + if len(path) == 0 { + path = strSlash + } + return path +} + +// SetPath sets URI path. +func (u *URI) SetPath(path string) { + u.pathOriginal = append(u.pathOriginal[:0], path...) + u.path = normalizePath(u.path, u.pathOriginal) +} + +// SetPathBytes sets URI path. +func (u *URI) SetPathBytes(path []byte) { + u.pathOriginal = append(u.pathOriginal[:0], path...) + u.path = normalizePath(u.path, u.pathOriginal) +} + +// PathOriginal returns the original path from requestURI passed to URI.Parse(). +// +// The returned value is valid until the next URI method call. +func (u *URI) PathOriginal() []byte { + return u.pathOriginal +} + +// Scheme returns URI scheme, i.e. http of http://aaa.com/foo/bar?baz=123#qwe . +// +// Returned scheme is always lowercased. +// +// The returned value is valid until the next URI method call. +func (u *URI) Scheme() []byte { + scheme := u.scheme + if len(scheme) == 0 { + scheme = strHTTP + } + return scheme +} + +// SetScheme sets URI scheme, i.e. http, https, ftp, etc. +func (u *URI) SetScheme(scheme string) { + u.scheme = append(u.scheme[:0], scheme...) + lowercaseBytes(u.scheme) +} + +// SetSchemeBytes sets URI scheme, i.e. http, https, ftp, etc. +func (u *URI) SetSchemeBytes(scheme []byte) { + u.scheme = append(u.scheme[:0], scheme...) + lowercaseBytes(u.scheme) +} + +// Reset clears uri. +func (u *URI) Reset() { + u.pathOriginal = u.pathOriginal[:0] + u.scheme = u.scheme[:0] + u.path = u.path[:0] + u.queryString = u.queryString[:0] + u.hash = u.hash[:0] + u.username = u.username[:0] + u.password = u.password[:0] + + u.host = u.host[:0] + u.queryArgs.Reset() + u.parsedQueryArgs = false + u.DisablePathNormalizing = false + + // There is no need in u.fullURI = u.fullURI[:0], since full uri + // is calculated on each call to FullURI(). + + // There is no need in u.requestURI = u.requestURI[:0], since requestURI + // is calculated on each call to RequestURI(). +} + +// Host returns host part, i.e. aaa.com of http://aaa.com/foo/bar?baz=123#qwe . +// +// Host is always lowercased. +func (u *URI) Host() []byte { + return u.host +} + +// SetHost sets host for the uri. +func (u *URI) SetHost(host string) { + u.host = append(u.host[:0], host...) + lowercaseBytes(u.host) +} + +// SetHostBytes sets host for the uri. +func (u *URI) SetHostBytes(host []byte) { + u.host = append(u.host[:0], host...) + lowercaseBytes(u.host) +} + +// Parse initializes URI from the given host and uri. +// +// host may be nil. In this case uri must contain fully qualified uri, +// i.e. with scheme and host. http is assumed if scheme is omitted. +// +// uri may contain e.g. RequestURI without scheme and host if host is non-empty. +func (u *URI) Parse(host, uri []byte) { + u.parse(host, uri, false) +} + +func (u *URI) parse(host, uri []byte, isTLS bool) { + u.Reset() + + if len(host) == 0 || bytes.Contains(uri, strColonSlashSlash) { + scheme, newHost, newURI := splitHostURI(host, uri) + u.scheme = append(u.scheme, scheme...) + lowercaseBytes(u.scheme) + host = newHost + uri = newURI + } + + if isTLS { + u.scheme = append(u.scheme[:0], strHTTPS...) + } + + if n := bytes.Index(host, strAt); n >= 0 { + auth := host[:n] + host = host[n+1:] + + if n := bytes.Index(auth, strColon); n >= 0 { + u.username = auth[:n] + u.password = auth[n+1:] + } else { + u.username = auth + u.password = auth[:0] // Make sure it's not nil + } + } + + u.host = append(u.host, host...) + lowercaseBytes(u.host) + + b := uri + queryIndex := bytes.IndexByte(b, '?') + fragmentIndex := bytes.IndexByte(b, '#') + // Ignore query in fragment part + if fragmentIndex >= 0 && queryIndex > fragmentIndex { + queryIndex = -1 + } + + if queryIndex < 0 && fragmentIndex < 0 { + u.pathOriginal = append(u.pathOriginal, b...) + u.path = normalizePath(u.path, u.pathOriginal) + return + } + + if queryIndex >= 0 { + // Path is everything up to the start of the query + u.pathOriginal = append(u.pathOriginal, b[:queryIndex]...) + u.path = normalizePath(u.path, u.pathOriginal) + + if fragmentIndex < 0 { + u.queryString = append(u.queryString, b[queryIndex+1:]...) + } else { + u.queryString = append(u.queryString, b[queryIndex+1:fragmentIndex]...) + u.hash = append(u.hash, b[fragmentIndex+1:]...) + } + return + } + + // fragmentIndex >= 0 && queryIndex < 0 + // Path is up to the start of fragment + u.pathOriginal = append(u.pathOriginal, b[:fragmentIndex]...) + u.path = normalizePath(u.path, u.pathOriginal) + u.hash = append(u.hash, b[fragmentIndex+1:]...) +} + +func normalizePath(dst, src []byte) []byte { + dst = dst[:0] + dst = addLeadingSlash(dst, src) + dst = decodeArgAppendNoPlus(dst, src) + + // remove duplicate slashes + b := dst + bSize := len(b) + for { + n := bytes.Index(b, strSlashSlash) + if n < 0 { + break + } + b = b[n:] + copy(b, b[1:]) + b = b[:len(b)-1] + bSize-- + } + dst = dst[:bSize] + + // remove /./ parts + b = dst + for { + n := bytes.Index(b, strSlashDotSlash) + if n < 0 { + break + } + nn := n + len(strSlashDotSlash) - 1 + copy(b[n:], b[nn:]) + b = b[:len(b)-nn+n] + } + + // remove /foo/../ parts + for { + n := bytes.Index(b, strSlashDotDotSlash) + if n < 0 { + break + } + nn := bytes.LastIndexByte(b[:n], '/') + if nn < 0 { + nn = 0 + } + n += len(strSlashDotDotSlash) - 1 + copy(b[nn:], b[n:]) + b = b[:len(b)-n+nn] + } + + // remove trailing /foo/.. + n := bytes.LastIndex(b, strSlashDotDot) + if n >= 0 && n+len(strSlashDotDot) == len(b) { + nn := bytes.LastIndexByte(b[:n], '/') + if nn < 0 { + return strSlash + } + b = b[:nn+1] + } + + return b +} + +// RequestURI returns RequestURI - i.e. URI without Scheme and Host. +func (u *URI) RequestURI() []byte { + var dst []byte + if u.DisablePathNormalizing { + dst = append(u.requestURI[:0], u.PathOriginal()...) + } else { + dst = appendQuotedPath(u.requestURI[:0], u.Path()) + } + if u.queryArgs.Len() > 0 { + dst = append(dst, '?') + dst = u.queryArgs.AppendBytes(dst) + } else if len(u.queryString) > 0 { + dst = append(dst, '?') + dst = append(dst, u.queryString...) + } + if len(u.hash) > 0 { + dst = append(dst, '#') + dst = append(dst, u.hash...) + } + u.requestURI = dst + return u.requestURI +} + +// LastPathSegment returns the last part of uri path after '/'. +// +// Examples: +// +// * For /foo/bar/baz.html path returns baz.html. +// * For /foo/bar/ returns empty byte slice. +// * For /foobar.js returns foobar.js. +func (u *URI) LastPathSegment() []byte { + path := u.Path() + n := bytes.LastIndexByte(path, '/') + if n < 0 { + return path + } + return path[n+1:] +} + +// Update updates uri. +// +// The following newURI types are accepted: +// +// * Absolute, i.e. http://foobar.com/aaa/bb?cc . In this case the original +// uri is replaced by newURI. +// * Absolute without scheme, i.e. //foobar.com/aaa/bb?cc. In this case +// the original scheme is preserved. +// * Missing host, i.e. /aaa/bb?cc . In this case only RequestURI part +// of the original uri is replaced. +// * Relative path, i.e. xx?yy=abc . In this case the original RequestURI +// is updated according to the new relative path. +func (u *URI) Update(newURI string) { + u.UpdateBytes(s2b(newURI)) +} + +// UpdateBytes updates uri. +// +// The following newURI types are accepted: +// +// * Absolute, i.e. http://foobar.com/aaa/bb?cc . In this case the original +// uri is replaced by newURI. +// * Absolute without scheme, i.e. //foobar.com/aaa/bb?cc. In this case +// the original scheme is preserved. +// * Missing host, i.e. /aaa/bb?cc . In this case only RequestURI part +// of the original uri is replaced. +// * Relative path, i.e. xx?yy=abc . In this case the original RequestURI +// is updated according to the new relative path. +func (u *URI) UpdateBytes(newURI []byte) { + u.requestURI = u.updateBytes(newURI, u.requestURI) +} + +func (u *URI) updateBytes(newURI, buf []byte) []byte { + if len(newURI) == 0 { + return buf + } + + n := bytes.Index(newURI, strSlashSlash) + if n >= 0 { + // absolute uri + var b [32]byte + schemeOriginal := b[:0] + if len(u.scheme) > 0 { + schemeOriginal = append([]byte(nil), u.scheme...) + } + u.Parse(nil, newURI) + if len(schemeOriginal) > 0 && len(u.scheme) == 0 { + u.scheme = append(u.scheme[:0], schemeOriginal...) + } + return buf + } + + if newURI[0] == '/' { + // uri without host + buf = u.appendSchemeHost(buf[:0]) + buf = append(buf, newURI...) + u.Parse(nil, buf) + return buf + } + + // relative path + switch newURI[0] { + case '?': + // query string only update + u.SetQueryStringBytes(newURI[1:]) + return append(buf[:0], u.FullURI()...) + case '#': + // update only hash + u.SetHashBytes(newURI[1:]) + return append(buf[:0], u.FullURI()...) + default: + // update the last path part after the slash + path := u.Path() + n = bytes.LastIndexByte(path, '/') + if n < 0 { + panic("BUG: path must contain at least one slash") + } + buf = u.appendSchemeHost(buf[:0]) + buf = appendQuotedPath(buf, path[:n+1]) + buf = append(buf, newURI...) + u.Parse(nil, buf) + return buf + } +} + +// FullURI returns full uri in the form {Scheme}://{Host}{RequestURI}#{Hash}. +func (u *URI) FullURI() []byte { + u.fullURI = u.AppendBytes(u.fullURI[:0]) + return u.fullURI +} + +// AppendBytes appends full uri to dst and returns the extended dst. +func (u *URI) AppendBytes(dst []byte) []byte { + dst = u.appendSchemeHost(dst) + return append(dst, u.RequestURI()...) +} + +func (u *URI) appendSchemeHost(dst []byte) []byte { + dst = append(dst, u.Scheme()...) + dst = append(dst, strColonSlashSlash...) + return append(dst, u.Host()...) +} + +// WriteTo writes full uri to w. +// +// WriteTo implements io.WriterTo interface. +func (u *URI) WriteTo(w io.Writer) (int64, error) { + n, err := w.Write(u.FullURI()) + return int64(n), err +} + +// String returns full uri. +func (u *URI) String() string { + return string(u.FullURI()) +} + +func splitHostURI(host, uri []byte) ([]byte, []byte, []byte) { + n := bytes.Index(uri, strSlashSlash) + if n < 0 { + return strHTTP, host, uri + } + scheme := uri[:n] + if bytes.IndexByte(scheme, '/') >= 0 { + return strHTTP, host, uri + } + if len(scheme) > 0 && scheme[len(scheme)-1] == ':' { + scheme = scheme[:len(scheme)-1] + } + n += len(strSlashSlash) + uri = uri[n:] + n = bytes.IndexByte(uri, '/') + if n < 0 { + // A hack for bogus urls like foobar.com?a=b without + // slash after host. + if n = bytes.IndexByte(uri, '?'); n >= 0 { + return scheme, uri[:n], uri[n:] + } + return scheme, uri, strSlash + } + return scheme, uri[:n], uri[n:] +} + +// QueryArgs returns query args. +func (u *URI) QueryArgs() *Args { + u.parseQueryArgs() + return &u.queryArgs +} + +func (u *URI) parseQueryArgs() { + if u.parsedQueryArgs { + return + } + u.queryArgs.ParseBytes(u.queryString) + u.parsedQueryArgs = true +} diff --git a/vendor/github.com/valyala/fasthttp/uri_unix.go b/vendor/github.com/valyala/fasthttp/uri_unix.go new file mode 100644 index 000000000..1e3073329 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/uri_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package fasthttp + +func addLeadingSlash(dst, src []byte) []byte { + // add leading slash for unix paths + if len(src) == 0 || src[0] != '/' { + dst = append(dst, '/') + } + + return dst +} diff --git a/vendor/github.com/valyala/fasthttp/uri_windows.go b/vendor/github.com/valyala/fasthttp/uri_windows.go new file mode 100644 index 000000000..95917a6bc --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/uri_windows.go @@ -0,0 +1,12 @@ +// +build windows + +package fasthttp + +func addLeadingSlash(dst, src []byte) []byte { + // zero length and "C:/" case + if len(src) == 0 || (len(src) > 2 && src[1] != ':') { + dst = append(dst, '/') + } + + return dst +} diff --git a/vendor/github.com/valyala/fasthttp/userdata.go b/vendor/github.com/valyala/fasthttp/userdata.go new file mode 100644 index 000000000..bd3e28aa1 --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/userdata.go @@ -0,0 +1,71 @@ +package fasthttp + +import ( + "io" +) + +type userDataKV struct { + key []byte + value interface{} +} + +type userData []userDataKV + +func (d *userData) Set(key string, value interface{}) { + args := *d + n := len(args) + for i := 0; i < n; i++ { + kv := &args[i] + if string(kv.key) == key { + kv.value = value + return + } + } + + c := cap(args) + if c > n { + args = args[:n+1] + kv := &args[n] + kv.key = append(kv.key[:0], key...) + kv.value = value + *d = args + return + } + + kv := userDataKV{} + kv.key = append(kv.key[:0], key...) + kv.value = value + *d = append(args, kv) +} + +func (d *userData) SetBytes(key []byte, value interface{}) { + d.Set(b2s(key), value) +} + +func (d *userData) Get(key string) interface{} { + args := *d + n := len(args) + for i := 0; i < n; i++ { + kv := &args[i] + if string(kv.key) == key { + return kv.value + } + } + return nil +} + +func (d *userData) GetBytes(key []byte) interface{} { + return d.Get(b2s(key)) +} + +func (d *userData) Reset() { + args := *d + n := len(args) + for i := 0; i < n; i++ { + v := args[i].value + if vc, ok := v.(io.Closer); ok { + vc.Close() + } + } + *d = (*d)[:0] +} diff --git a/vendor/github.com/valyala/fasthttp/workerpool.go b/vendor/github.com/valyala/fasthttp/workerpool.go new file mode 100644 index 000000000..9b1987e8d --- /dev/null +++ b/vendor/github.com/valyala/fasthttp/workerpool.go @@ -0,0 +1,249 @@ +package fasthttp + +import ( + "net" + "runtime" + "strings" + "sync" + "time" +) + +// workerPool serves incoming connections via a pool of workers +// in FILO order, i.e. the most recently stopped worker will serve the next +// incoming connection. +// +// Such a scheme keeps CPU caches hot (in theory). +type workerPool struct { + // Function for serving server connections. + // It must leave c unclosed. + WorkerFunc ServeHandler + + MaxWorkersCount int + + LogAllErrors bool + + MaxIdleWorkerDuration time.Duration + + Logger Logger + + lock sync.Mutex + workersCount int + mustStop bool + + ready []*workerChan + + stopCh chan struct{} + + workerChanPool sync.Pool + + connState func(net.Conn, ConnState) +} + +type workerChan struct { + lastUseTime time.Time + ch chan net.Conn +} + +func (wp *workerPool) Start() { + if wp.stopCh != nil { + panic("BUG: workerPool already started") + } + wp.stopCh = make(chan struct{}) + stopCh := wp.stopCh + wp.workerChanPool.New = func() interface{} { + return &workerChan{ + ch: make(chan net.Conn, workerChanCap), + } + } + go func() { + var scratch []*workerChan + for { + wp.clean(&scratch) + select { + case <-stopCh: + return + default: + time.Sleep(wp.getMaxIdleWorkerDuration()) + } + } + }() +} + +func (wp *workerPool) Stop() { + if wp.stopCh == nil { + panic("BUG: workerPool wasn't started") + } + close(wp.stopCh) + wp.stopCh = nil + + // Stop all the workers waiting for incoming connections. + // Do not wait for busy workers - they will stop after + // serving the connection and noticing wp.mustStop = true. + wp.lock.Lock() + ready := wp.ready + for i := range ready { + ready[i].ch <- nil + ready[i] = nil + } + wp.ready = ready[:0] + wp.mustStop = true + wp.lock.Unlock() +} + +func (wp *workerPool) getMaxIdleWorkerDuration() time.Duration { + if wp.MaxIdleWorkerDuration <= 0 { + return 10 * time.Second + } + return wp.MaxIdleWorkerDuration +} + +func (wp *workerPool) clean(scratch *[]*workerChan) { + maxIdleWorkerDuration := wp.getMaxIdleWorkerDuration() + + // Clean least recently used workers if they didn't serve connections + // for more than maxIdleWorkerDuration. + criticalTime := time.Now().Add(-maxIdleWorkerDuration) + + wp.lock.Lock() + ready := wp.ready + n := len(ready) + + // Use binary-search algorithm to find out the index of the least recently worker which can be cleaned up. + l, r, mid := 0, n-1, 0 + for l <= r { + mid = (l + r) / 2 + if criticalTime.After(wp.ready[mid].lastUseTime) { + l = mid + 1 + } else { + r = mid - 1 + } + } + i := r + if i == -1 { + wp.lock.Unlock() + return + } + + *scratch = append((*scratch)[:0], ready[:i+1]...) + m := copy(ready, ready[i+1:]) + for i = m; i < n; i++ { + ready[i] = nil + } + wp.ready = ready[:m] + wp.lock.Unlock() + + // Notify obsolete workers to stop. + // This notification must be outside the wp.lock, since ch.ch + // may be blocking and may consume a lot of time if many workers + // are located on non-local CPUs. + tmp := *scratch + for i := range tmp { + tmp[i].ch <- nil + tmp[i] = nil + } +} + +func (wp *workerPool) Serve(c net.Conn) bool { + ch := wp.getCh() + if ch == nil { + return false + } + ch.ch <- c + return true +} + +var workerChanCap = func() int { + // Use blocking workerChan if GOMAXPROCS=1. + // This immediately switches Serve to WorkerFunc, which results + // in higher performance (under go1.5 at least). + if runtime.GOMAXPROCS(0) == 1 { + return 0 + } + + // Use non-blocking workerChan if GOMAXPROCS>1, + // since otherwise the Serve caller (Acceptor) may lag accepting + // new connections if WorkerFunc is CPU-bound. + return 1 +}() + +func (wp *workerPool) getCh() *workerChan { + var ch *workerChan + createWorker := false + + wp.lock.Lock() + ready := wp.ready + n := len(ready) - 1 + if n < 0 { + if wp.workersCount < wp.MaxWorkersCount { + createWorker = true + wp.workersCount++ + } + } else { + ch = ready[n] + ready[n] = nil + wp.ready = ready[:n] + } + wp.lock.Unlock() + + if ch == nil { + if !createWorker { + return nil + } + vch := wp.workerChanPool.Get() + ch = vch.(*workerChan) + go func() { + wp.workerFunc(ch) + wp.workerChanPool.Put(vch) + }() + } + return ch +} + +func (wp *workerPool) release(ch *workerChan) bool { + ch.lastUseTime = time.Now() + wp.lock.Lock() + if wp.mustStop { + wp.lock.Unlock() + return false + } + wp.ready = append(wp.ready, ch) + wp.lock.Unlock() + return true +} + +func (wp *workerPool) workerFunc(ch *workerChan) { + var c net.Conn + + var err error + for c = range ch.ch { + if c == nil { + break + } + + if err = wp.WorkerFunc(c); err != nil && err != errHijacked { + errStr := err.Error() + if wp.LogAllErrors || !(strings.Contains(errStr, "broken pipe") || + strings.Contains(errStr, "reset by peer") || + strings.Contains(errStr, "request headers: small read buffer") || + strings.Contains(errStr, "unexpected EOF") || + strings.Contains(errStr, "i/o timeout")) { + wp.Logger.Printf("error when serving connection %q<->%q: %s", c.LocalAddr(), c.RemoteAddr(), err) + } + } + if err == errHijacked { + wp.connState(c, StateHijacked) + } else { + _ = c.Close() + wp.connState(c, StateClosed) + } + c = nil + + if !wp.release(ch) { + break + } + } + + wp.lock.Lock() + wp.workersCount-- + wp.lock.Unlock() +} diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml new file mode 100644 index 000000000..055480b9e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 000000000..8da58fbf6 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE new file mode 100644 index 000000000..866d74a7a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 000000000..b50c6e877 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,133 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 000000000..1f7e87e67 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 000000000..129bc2a97 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 000000000..a1c2cc526 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 000000000..0ee738e11 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod new file mode 100644 index 000000000..1934e8769 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/go.mod @@ -0,0 +1,5 @@ +module "gopkg.in/yaml.v2" + +require ( + "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 +) diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 000000000..81d05dfe5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 000000000..7c1f5fac3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 000000000..4120e0c91 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 000000000..0b9bb6030 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 000000000..4c45e660a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 000000000..a2dde608c --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 000000000..89650e293 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,466 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 000000000..f6a9c8e34 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 000000000..8110ce3c3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index b880af80e..f371a97aa 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -91,10 +91,15 @@ github.com/klauspost/compress/fse github.com/klauspost/compress/gzip github.com/klauspost/compress/huff0 github.com/klauspost/compress/snappy +github.com/klauspost/compress/zlib github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash # github.com/valyala/bytebufferpool v1.0.0 github.com/valyala/bytebufferpool +# github.com/valyala/fasthttp v1.9.0 +github.com/valyala/fasthttp +github.com/valyala/fasthttp/fasthttputil +github.com/valyala/fasthttp/stackless # github.com/valyala/fastjson v1.5.0 github.com/valyala/fastjson github.com/valyala/fastjson/fastfloat @@ -239,6 +244,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap +# gopkg.in/yaml.v2 v2.2.8 +gopkg.in/yaml.v2 # honnef.co/go/tools v0.0.1-2019.2.3 honnef.co/go/tools/arg honnef.co/go/tools/cmd/staticcheck