mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-23 12:31:07 +01:00
vendor: make vendor-update
This commit is contained in:
parent
b6ed9afd6d
commit
e5ebdb9b1a
14
go.mod
14
go.mod
@ -3,7 +3,7 @@ module github.com/VictoriaMetrics/VictoriaMetrics
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
cloud.google.com/go/storage v1.20.0
|
||||
cloud.google.com/go/storage v1.21.0
|
||||
github.com/VictoriaMetrics/fastcache v1.9.0
|
||||
|
||||
// Do not use the original github.com/valyala/fasthttp because of issues
|
||||
@ -11,12 +11,12 @@ require (
|
||||
github.com/VictoriaMetrics/fasthttp v1.1.0
|
||||
github.com/VictoriaMetrics/metrics v1.18.1
|
||||
github.com/VictoriaMetrics/metricsql v0.40.0
|
||||
github.com/aws/aws-sdk-go v1.42.52
|
||||
github.com/aws/aws-sdk-go v1.43.2
|
||||
github.com/cespare/xxhash/v2 v2.1.2
|
||||
github.com/cheggaaa/pb/v3 v3.0.8
|
||||
github.com/golang/snappy v0.0.4
|
||||
github.com/influxdata/influxdb v1.9.6
|
||||
github.com/klauspost/compress v1.14.2
|
||||
github.com/klauspost/compress v1.14.3
|
||||
github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9
|
||||
github.com/urfave/cli/v2 v2.3.0
|
||||
github.com/valyala/fastjson v1.6.3
|
||||
@ -27,14 +27,14 @@ require (
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158
|
||||
google.golang.org/api v0.68.0
|
||||
google.golang.org/api v0.69.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.100.2 // indirect
|
||||
cloud.google.com/go/compute v1.2.0 // indirect
|
||||
cloud.google.com/go/iam v0.1.1 // indirect
|
||||
cloud.google.com/go/compute v1.3.0 // indirect
|
||||
cloud.google.com/go/iam v0.2.0 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||
@ -68,7 +68,7 @@ require (
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220211171837-173942840c17 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c // indirect
|
||||
google.golang.org/grpc v1.44.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
|
31
go.sum
31
go.sum
@ -41,12 +41,14 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7
|
||||
cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o=
|
||||
cloud.google.com/go/bigtable v1.10.1/go.mod h1:cyHeKlx6dcZCO0oSQucYdauseD8kIENGuDOJPKMCVg8=
|
||||
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
|
||||
cloud.google.com/go/compute v1.2.0 h1:EKki8sSdvDU0OO9mAXGwPXOTOgPz2l08R0/IutDH11I=
|
||||
cloud.google.com/go/compute v1.2.0/go.mod h1:xlogom/6gr8RJGBe7nT2eGsQYAFUbbv8dbC29qE3Xmw=
|
||||
cloud.google.com/go/compute v1.3.0 h1:mPL/MzDDYHsh5tHRS9mhmhWlcgClCrCa6ApQCU6wnHI=
|
||||
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/iam v0.1.1 h1:4CapQyNFjiksks1/x7jsvsygFPhihslYk5GptIrlX68=
|
||||
cloud.google.com/go/iam v0.1.1/go.mod h1:CKqrcnI/suGpybEHxZ7BMehL0oA4LpdyJdUlTl9jVMw=
|
||||
cloud.google.com/go/iam v0.2.0 h1:Ouq6qif4mZdXkb3SiFMpxvu0JQJB1Yid9TsZ23N6hg8=
|
||||
cloud.google.com/go/iam v0.2.0/go.mod h1:BCK88+tmjAwnZYfOSizmKCTSFjJHCa18t3DpdGEY13Y=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
@ -56,8 +58,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.20.0 h1:kv3rQ3clEQdxqokkCCgQo+bxPqcuXiROjxvnKb8Oqdk=
|
||||
cloud.google.com/go/storage v1.20.0/go.mod h1:TiC1o6FxNCG8y5gB7rqCsFZCIYPMPZCO81ppOoEPLGI=
|
||||
cloud.google.com/go/storage v1.21.0 h1:HwnT2u2D309SFDHQII6m18HlrCi3jAXhUMTLOWXYH14=
|
||||
cloud.google.com/go/storage v1.21.0/go.mod h1:XmRlxkgPjlBONznT2dDUU/5XlpU2OjMnKuqnZI01LAA=
|
||||
collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
@ -162,8 +164,8 @@ github.com/aws/aws-sdk-go v1.30.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZve
|
||||
github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
|
||||
github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
|
||||
github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/aws/aws-sdk-go v1.42.52 h1:/+TZ46+0qu9Ph/UwjVrU3SG8OBi87uJLrLiYRNZKbHQ=
|
||||
github.com/aws/aws-sdk-go v1.42.52/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc=
|
||||
github.com/aws/aws-sdk-go v1.43.2 h1:T6LuKCNu8CYXXDn3xJoldh8FbdvuVH7C9aSuLNrlht0=
|
||||
github.com/aws/aws-sdk-go v1.43.2/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc=
|
||||
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
|
||||
github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4=
|
||||
github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o=
|
||||
@ -658,8 +660,8 @@ github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8
|
||||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.14.2 h1:S0OHlFk/Gbon/yauFJ4FfJJF5V0fc5HbBTJazi28pRw=
|
||||
github.com/klauspost/compress v1.14.2/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.14.3 h1:DQv1WP+iS4srNjibdnHtqu8JNWCDMluj5NzPnFJsnvk=
|
||||
github.com/klauspost/compress v1.14.3/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
@ -1305,6 +1307,7 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158 h1:rm+CHSpPEEW2IsXUib1ThaHIjuBVZjxNgSKmBLFfD4c=
|
||||
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@ -1459,8 +1462,9 @@ google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3h
|
||||
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
|
||||
google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM=
|
||||
google.golang.org/api v0.66.0/go.mod h1:I1dmXYpX7HGwz/ejRxwQp2qj5bFAz93HiCU1C1oYd9M=
|
||||
google.golang.org/api v0.68.0 h1:9eJiHhwJKIYX6sX2fUZxQLi7pDRA/MYu8c12q6WbJik=
|
||||
google.golang.org/api v0.68.0/go.mod h1:sOM8pTpwgflXRhz+oC8H2Dr+UcbMqkPPWNJo88Q7TH8=
|
||||
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
|
||||
google.golang.org/api v0.69.0 h1:yHW5s2SFyDapr/43kYtIQmoaaFVW4baLMLwqV4auj2A=
|
||||
google.golang.org/api v0.69.0/go.mod h1:boanBiw+h5c3s+tBPgEzLDRHfFLWV0qXxRHz3ws7C80=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@ -1543,10 +1547,13 @@ google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ6
|
||||
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220201184016-50beb8ab5c44/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220204002441-d6cc3cc0770e/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220211171837-173942840c17 h1:2X+CNIheCutWRyKRte8szGxrE5ggtV4U+NKAbh/oLhg=
|
||||
google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220211171837-173942840c17/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220216160803-4663080d8bc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c h1:TU4rFa5APdKTq0s6B7WTsH6Xmx0Knj86s6Biz56mErE=
|
||||
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
|
||||
|
7
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
7
vendor/cloud.google.com/go/iam/CHANGES.md
generated
vendored
@ -1,5 +1,12 @@
|
||||
# Changes
|
||||
|
||||
## [0.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v0.1.1...iam/v0.2.0) (2022-02-14)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **iam:** add file for tracking version ([17b36ea](https://github.com/googleapis/google-cloud-go/commit/17b36ead42a96b1a01105122074e65164357519e))
|
||||
|
||||
### [0.1.1](https://www.github.com/googleapis/google-cloud-go/compare/iam/v0.1.0...iam/v0.1.1) (2022-01-14)
|
||||
|
||||
|
||||
|
8
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
8
vendor/cloud.google.com/go/storage/CHANGES.md
generated
vendored
@ -1,6 +1,14 @@
|
||||
# Changes
|
||||
|
||||
|
||||
## [1.21.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.20.0...storage/v1.21.0) (2022-02-17)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* **storage:** add better version metadata to calls ([#5507](https://github.com/googleapis/google-cloud-go/issues/5507)) ([13fe0bc](https://github.com/googleapis/google-cloud-go/commit/13fe0bc0d8acbffd46b59ab69b25449f1cbd6a88)), refs [#2749](https://github.com/googleapis/google-cloud-go/issues/2749)
|
||||
* **storage:** add Writer.ChunkRetryDeadline ([#5482](https://github.com/googleapis/google-cloud-go/issues/5482)) ([498a746](https://github.com/googleapis/google-cloud-go/commit/498a746769fa43958b92af8875b927879947128e))
|
||||
|
||||
## [1.20.0](https://www.github.com/googleapis/google-cloud-go/compare/storage/v1.19.0...storage/v1.20.0) (2022-02-04)
|
||||
|
||||
|
||||
|
2
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
2
vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
generated
vendored
@ -84,7 +84,7 @@ import (
|
||||
type clientHookParams struct{}
|
||||
type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
|
||||
|
||||
const versionClient = "20220202"
|
||||
const versionClient = "20220216"
|
||||
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
|
18
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
Normal file
18
vendor/cloud.google.com/go/storage/internal/version.go
generated
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
// Copyright 2022 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.21.0"
|
5
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
5
vendor/cloud.google.com/go/storage/storage.go
generated
vendored
@ -40,6 +40,7 @@ import (
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/trace"
|
||||
"cloud.google.com/go/internal/version"
|
||||
"cloud.google.com/go/storage/internal"
|
||||
gapic "cloud.google.com/go/storage/internal/apiv2"
|
||||
"github.com/googleapis/gax-go/v2"
|
||||
"golang.org/x/oauth2/google"
|
||||
@ -69,7 +70,7 @@ var (
|
||||
errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys())
|
||||
)
|
||||
|
||||
var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", version.Repo)
|
||||
var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", internal.Version)
|
||||
|
||||
const (
|
||||
// ScopeFullControl grants permissions to manage your
|
||||
@ -91,7 +92,7 @@ const (
|
||||
defaultConnPoolSize = 4
|
||||
)
|
||||
|
||||
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
|
||||
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), internal.Version)
|
||||
|
||||
func setClientHeader(headers http.Header) {
|
||||
headers.Set("x-goog-api-client", xGoogHeader)
|
||||
|
21
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
21
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
@ -76,6 +77,23 @@ type Writer struct {
|
||||
// ChunkSize must be set before the first Write call.
|
||||
ChunkSize int
|
||||
|
||||
// ChunkRetryDeadline sets a per-chunk retry deadline for multi-chunk
|
||||
// resumable uploads.
|
||||
//
|
||||
// For uploads of larger files, the Writer will attempt to retry if the
|
||||
// request to upload a particular chunk fails with a transient error.
|
||||
// If a single chunk has been attempting to upload for longer than this
|
||||
// deadline and the request fails, it will no longer be retried, and the error
|
||||
// will be returned to the caller. This is only applicable for files which are
|
||||
// large enough to require a multi-chunk resumable upload. The default value
|
||||
// is 32s. Users may want to pick a longer deadline if they are using larger
|
||||
// values for ChunkSize or if they expect to have a slow or unreliable
|
||||
// internet connection.
|
||||
//
|
||||
// To set a deadline on the entire upload, use context timeout or
|
||||
// cancellation.
|
||||
ChunkRetryDeadline time.Duration
|
||||
|
||||
// ProgressFunc can be used to monitor the progress of a large write.
|
||||
// operation. If ProgressFunc is not nil and writing requires multiple
|
||||
// calls to the underlying service (see
|
||||
@ -127,6 +145,9 @@ func (w *Writer) open() error {
|
||||
if c := attrs.ContentType; c != "" {
|
||||
mediaOpts = append(mediaOpts, googleapi.ContentType(c))
|
||||
}
|
||||
if w.ChunkRetryDeadline != 0 {
|
||||
mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(w.ChunkRetryDeadline))
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(w.donec)
|
||||
|
3
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
3
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
@ -9817,6 +9817,9 @@ var awsPartition = partition{
|
||||
endpointKey{
|
||||
Region: "ap-southeast-2",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ap-southeast-3",
|
||||
}: endpoint{},
|
||||
endpointKey{
|
||||
Region: "ca-central-1",
|
||||
}: endpoint{},
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@ -5,4 +5,4 @@ package aws
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.42.52"
|
||||
const SDKVersion = "1.43.2"
|
||||
|
4
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
generated
vendored
@ -272,6 +272,9 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error)
|
||||
|
||||
switch value := v.Interface().(type) {
|
||||
case string:
|
||||
if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" {
|
||||
value = base64.StdEncoding.EncodeToString([]byte(value))
|
||||
}
|
||||
str = value
|
||||
case []byte:
|
||||
str = base64.StdEncoding.EncodeToString(value)
|
||||
@ -306,5 +309,6 @@ func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error)
|
||||
err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type())
|
||||
return "", err
|
||||
}
|
||||
|
||||
return str, nil
|
||||
}
|
||||
|
7
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
generated
vendored
@ -204,6 +204,13 @@ func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) erro
|
||||
|
||||
switch v.Interface().(type) {
|
||||
case *string:
|
||||
if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" {
|
||||
b, err := base64.StdEncoding.DecodeString(header)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to decode JSONValue, %v", err)
|
||||
}
|
||||
header = string(b)
|
||||
}
|
||||
v.Set(reflect.ValueOf(&header))
|
||||
case []byte:
|
||||
b, err := base64.StdEncoding.DecodeString(header)
|
||||
|
2
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
@ -10535,7 +10535,7 @@ type SelectObjectContentEventStream struct {
|
||||
// (e.g. http.Response.Body), that will be closed when the stream Close method
|
||||
// is called.
|
||||
//
|
||||
// es := NewSelectObjectContentEventStream(func(o *SelectObjectContentEventStream{
|
||||
// es := NewSelectObjectContentEventStream(func(o *SelectObjectContentEventStream){
|
||||
// es.Reader = myMockStreamReader
|
||||
// es.StreamCloser = myMockStreamCloser
|
||||
// })
|
||||
|
8
vendor/github.com/klauspost/compress/README.md
generated
vendored
8
vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -17,6 +17,14 @@ This package provides various compression algorithms.
|
||||
|
||||
# changelog
|
||||
|
||||
* Jan 25, 2022 (v1.14.2)
|
||||
* zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
|
||||
* zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
|
||||
* zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470)
|
||||
* zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472)
|
||||
* flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473)
|
||||
* zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475)
|
||||
|
||||
* Jan 11, 2022 (v1.14.1)
|
||||
* s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
|
||||
* flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)
|
||||
|
2
vendor/github.com/klauspost/compress/flate/fast_encoder.go
generated
vendored
2
vendor/github.com/klauspost/compress/flate/fast_encoder.go
generated
vendored
@ -179,7 +179,7 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
|
||||
// matchlenLong will return the match length between offsets and t in src.
|
||||
// It is assumed that s > t, that t >=0 and s < len(src).
|
||||
func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
|
||||
if debugDecode {
|
||||
if debugDeflate {
|
||||
if t >= s {
|
||||
panic(fmt.Sprint("t >=s:", t, s))
|
||||
}
|
||||
|
125
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
125
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -24,6 +25,10 @@ const (
|
||||
codegenCodeCount = 19
|
||||
badCode = 255
|
||||
|
||||
// maxPredefinedTokens is the maximum number of tokens
|
||||
// where we check if fixed size is smaller.
|
||||
maxPredefinedTokens = 250
|
||||
|
||||
// bufferFlushSize indicates the buffer size
|
||||
// after which bytes are flushed to the writer.
|
||||
// Should preferably be a multiple of 6, since
|
||||
@ -36,8 +41,11 @@ const (
|
||||
bufferSize = bufferFlushSize + 8
|
||||
)
|
||||
|
||||
// Minimum length code that emits bits.
|
||||
const lengthExtraBitsMinCode = 8
|
||||
|
||||
// The number of extra bits needed by length code X - LENGTH_CODES_START.
|
||||
var lengthExtraBits = [32]int8{
|
||||
var lengthExtraBits = [32]uint8{
|
||||
/* 257 */ 0, 0, 0,
|
||||
/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
|
||||
/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
|
||||
@ -51,6 +59,9 @@ var lengthBase = [32]uint8{
|
||||
64, 80, 96, 112, 128, 160, 192, 224, 255,
|
||||
}
|
||||
|
||||
// Minimum offset code that emits bits.
|
||||
const offsetExtraBitsMinCode = 4
|
||||
|
||||
// offset code word extra bits.
|
||||
var offsetExtraBits = [32]int8{
|
||||
0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
|
||||
@ -78,10 +89,10 @@ func init() {
|
||||
|
||||
for i := range offsetCombined[:] {
|
||||
// Don't use extended window values...
|
||||
if offsetBase[i] > 0x006000 {
|
||||
if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 {
|
||||
continue
|
||||
}
|
||||
offsetCombined[i] = uint32(offsetExtraBits[i])<<16 | (offsetBase[i])
|
||||
offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8)
|
||||
}
|
||||
}
|
||||
|
||||
@ -97,7 +108,7 @@ type huffmanBitWriter struct {
|
||||
// Data waiting to be written is bytes[0:nbytes]
|
||||
// and then the low nbits of bits.
|
||||
bits uint64
|
||||
nbits uint16
|
||||
nbits uint8
|
||||
nbytes uint8
|
||||
lastHuffMan bool
|
||||
literalEncoding *huffmanEncoder
|
||||
@ -215,7 +226,7 @@ func (w *huffmanBitWriter) write(b []byte) {
|
||||
_, w.err = w.writer.Write(b)
|
||||
}
|
||||
|
||||
func (w *huffmanBitWriter) writeBits(b int32, nb uint16) {
|
||||
func (w *huffmanBitWriter) writeBits(b int32, nb uint8) {
|
||||
w.bits |= uint64(b) << (w.nbits & 63)
|
||||
w.nbits += nb
|
||||
if w.nbits >= 48 {
|
||||
@ -571,7 +582,10 @@ func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
|
||||
// Fixed Huffman baseline.
|
||||
var literalEncoding = fixedLiteralEncoding
|
||||
var offsetEncoding = fixedOffsetEncoding
|
||||
var size = w.fixedSize(extraBits)
|
||||
var size = math.MaxInt32
|
||||
if tokens.n < maxPredefinedTokens {
|
||||
size = w.fixedSize(extraBits)
|
||||
}
|
||||
|
||||
// Dynamic Huffman?
|
||||
var numCodegens int
|
||||
@ -672,19 +686,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
|
||||
size = reuseSize
|
||||
}
|
||||
|
||||
if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
|
||||
// Check if we get a reasonable size decrease.
|
||||
if storable && ssize <= size {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
if tokens.n < maxPredefinedTokens {
|
||||
if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
|
||||
// Check if we get a reasonable size decrease.
|
||||
if storable && ssize <= size {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
w.writeFixedHeader(eof)
|
||||
if !sync {
|
||||
tokens.AddEOB()
|
||||
}
|
||||
w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
|
||||
return
|
||||
}
|
||||
w.writeFixedHeader(eof)
|
||||
if !sync {
|
||||
tokens.AddEOB()
|
||||
}
|
||||
w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
|
||||
return
|
||||
}
|
||||
// Check if we get a reasonable size decrease.
|
||||
if storable && ssize <= size {
|
||||
@ -717,19 +733,21 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b
|
||||
size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
|
||||
|
||||
// Store predefined, if we don't get a reasonable improvement.
|
||||
if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
|
||||
// Store bytes, if we don't get an improvement.
|
||||
if storable && ssize <= preSize {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
if tokens.n < maxPredefinedTokens {
|
||||
if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
|
||||
// Store bytes, if we don't get an improvement.
|
||||
if storable && ssize <= preSize {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
w.writeFixedHeader(eof)
|
||||
if !sync {
|
||||
tokens.AddEOB()
|
||||
}
|
||||
w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
|
||||
return
|
||||
}
|
||||
w.writeFixedHeader(eof)
|
||||
if !sync {
|
||||
tokens.AddEOB()
|
||||
}
|
||||
w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
|
||||
return
|
||||
}
|
||||
|
||||
if storable && ssize <= size {
|
||||
@ -833,9 +851,9 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
|
||||
|
||||
for _, t := range tokens {
|
||||
if t < matchType {
|
||||
if t < 256 {
|
||||
//w.writeCode(lits[t.literal()])
|
||||
c := lits[t.literal()]
|
||||
c := lits[t]
|
||||
bits |= uint64(c.code) << (nbits & 63)
|
||||
nbits += c.len
|
||||
if nbits >= 48 {
|
||||
@ -858,12 +876,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
|
||||
// Write the length
|
||||
length := t.length()
|
||||
lengthCode := lengthCode(length)
|
||||
lengthCode := lengthCode(length) & 31
|
||||
if false {
|
||||
w.writeCode(lengths[lengthCode&31])
|
||||
w.writeCode(lengths[lengthCode])
|
||||
} else {
|
||||
// inlined
|
||||
c := lengths[lengthCode&31]
|
||||
c := lengths[lengthCode]
|
||||
bits |= uint64(c.code) << (nbits & 63)
|
||||
nbits += c.len
|
||||
if nbits >= 48 {
|
||||
@ -883,10 +901,10 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
}
|
||||
}
|
||||
|
||||
extraLengthBits := uint16(lengthExtraBits[lengthCode&31])
|
||||
if extraLengthBits > 0 {
|
||||
if lengthCode >= lengthExtraBitsMinCode {
|
||||
extraLengthBits := lengthExtraBits[lengthCode]
|
||||
//w.writeBits(extraLength, extraLengthBits)
|
||||
extraLength := int32(length - lengthBase[lengthCode&31])
|
||||
extraLength := int32(length - lengthBase[lengthCode])
|
||||
bits |= uint64(extraLength) << (nbits & 63)
|
||||
nbits += extraLengthBits
|
||||
if nbits >= 48 {
|
||||
@ -907,10 +925,9 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
}
|
||||
// Write the offset
|
||||
offset := t.offset()
|
||||
offsetCode := offset >> 16
|
||||
offset &= matchOffsetOnlyMask
|
||||
offsetCode := (offset >> 16) & 31
|
||||
if false {
|
||||
w.writeCode(offs[offsetCode&31])
|
||||
w.writeCode(offs[offsetCode])
|
||||
} else {
|
||||
// inlined
|
||||
c := offs[offsetCode]
|
||||
@ -932,11 +949,12 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
}
|
||||
}
|
||||
}
|
||||
offsetComb := offsetCombined[offsetCode]
|
||||
if offsetComb > 1<<16 {
|
||||
|
||||
if offsetCode >= offsetExtraBitsMinCode {
|
||||
offsetComb := offsetCombined[offsetCode]
|
||||
//w.writeBits(extraOffset, extraOffsetBits)
|
||||
bits |= uint64(offset-(offsetComb&0xffff)) << (nbits & 63)
|
||||
nbits += uint16(offsetComb >> 16)
|
||||
bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
|
||||
nbits += uint8(offsetComb)
|
||||
if nbits >= 48 {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
|
||||
@ -1002,6 +1020,26 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
// https://stackoverflow.com/a/25454430
|
||||
const guessHeaderSizeBits = 70 * 8
|
||||
histogram(input, w.literalFreq[:numLiterals], fill)
|
||||
ssize, storable := w.storedSize(input)
|
||||
if storable && len(input) > 1024 {
|
||||
// Quick check for incompressible content.
|
||||
abs := float64(0)
|
||||
avg := float64(len(input)) / 256
|
||||
max := float64(len(input) * 2)
|
||||
for _, v := range w.literalFreq[:256] {
|
||||
diff := float64(v) - avg
|
||||
abs += diff * diff
|
||||
if abs > max {
|
||||
break
|
||||
}
|
||||
}
|
||||
if abs < max {
|
||||
// No chance we can compress this...
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
return
|
||||
}
|
||||
}
|
||||
w.literalFreq[endBlockMarker] = 1
|
||||
w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
|
||||
if fill {
|
||||
@ -1019,7 +1057,6 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
estBits += estBits >> w.logNewTablePenalty
|
||||
|
||||
// Store bytes, if we don't get a reasonable improvement.
|
||||
ssize, storable := w.storedSize(input)
|
||||
if storable && ssize <= estBits {
|
||||
w.writeStoredHeader(len(input), eof)
|
||||
w.writeBytes(input)
|
||||
|
40
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
40
vendor/github.com/klauspost/compress/flate/huffman_code.go
generated
vendored
@ -17,7 +17,8 @@ const (
|
||||
|
||||
// hcode is a huffman code with a bit code and bit length.
|
||||
type hcode struct {
|
||||
code, len uint16
|
||||
code uint16
|
||||
len uint8
|
||||
}
|
||||
|
||||
type huffmanEncoder struct {
|
||||
@ -56,7 +57,7 @@ type levelInfo struct {
|
||||
}
|
||||
|
||||
// set sets the code and length of an hcode.
|
||||
func (h *hcode) set(code uint16, length uint16) {
|
||||
func (h *hcode) set(code uint16, length uint8) {
|
||||
h.len = length
|
||||
h.code = code
|
||||
}
|
||||
@ -80,7 +81,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
|
||||
var ch uint16
|
||||
for ch = 0; ch < literalCount; ch++ {
|
||||
var bits uint16
|
||||
var size uint16
|
||||
var size uint8
|
||||
switch {
|
||||
case ch < 144:
|
||||
// size 8, 000110000 .. 10111111
|
||||
@ -99,7 +100,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder {
|
||||
bits = ch + 192 - 280
|
||||
size = 8
|
||||
}
|
||||
codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size}
|
||||
codes[ch] = hcode{code: reverseBits(bits, size), len: size}
|
||||
}
|
||||
return h
|
||||
}
|
||||
@ -187,14 +188,19 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
|
||||
// of the level j ancestor.
|
||||
var leafCounts [maxBitsLimit][maxBitsLimit]int32
|
||||
|
||||
// Descending to only have 1 bounds check.
|
||||
l2f := int32(list[2].freq)
|
||||
l1f := int32(list[1].freq)
|
||||
l0f := int32(list[0].freq) + int32(list[1].freq)
|
||||
|
||||
for level := int32(1); level <= maxBits; level++ {
|
||||
// For every level, the first two items are the first two characters.
|
||||
// We initialize the levels as if we had already figured this out.
|
||||
levels[level] = levelInfo{
|
||||
level: level,
|
||||
lastFreq: int32(list[1].freq),
|
||||
nextCharFreq: int32(list[2].freq),
|
||||
nextPairFreq: int32(list[0].freq) + int32(list[1].freq),
|
||||
lastFreq: l1f,
|
||||
nextCharFreq: l2f,
|
||||
nextPairFreq: l0f,
|
||||
}
|
||||
leafCounts[level][level] = 2
|
||||
if level == 1 {
|
||||
@ -205,8 +211,8 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
|
||||
// We need a total of 2*n - 2 items at top level and have already generated 2.
|
||||
levels[maxBits].needed = 2*n - 4
|
||||
|
||||
level := maxBits
|
||||
for {
|
||||
level := uint32(maxBits)
|
||||
for level < 16 {
|
||||
l := &levels[level]
|
||||
if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
|
||||
// We've run out of both leafs and pairs.
|
||||
@ -238,7 +244,13 @@ func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
|
||||
// more values in the level below
|
||||
l.lastFreq = l.nextPairFreq
|
||||
// Take leaf counts from the lower level, except counts[level] remains the same.
|
||||
copy(leafCounts[level][:level], leafCounts[level-1][:level])
|
||||
if true {
|
||||
save := leafCounts[level][level]
|
||||
leafCounts[level] = leafCounts[level-1]
|
||||
leafCounts[level][level] = save
|
||||
} else {
|
||||
copy(leafCounts[level][:level], leafCounts[level-1][:level])
|
||||
}
|
||||
levels[l.level-1].needed = 2
|
||||
}
|
||||
|
||||
@ -296,7 +308,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
|
||||
|
||||
sortByLiteral(chunk)
|
||||
for _, node := range chunk {
|
||||
h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)}
|
||||
h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint8(n)}
|
||||
code++
|
||||
}
|
||||
list = list[0 : len(list)-int(bits)]
|
||||
@ -309,6 +321,7 @@ func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalN
|
||||
// maxBits The maximum number of bits to use for any literal.
|
||||
func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
|
||||
list := h.freqcache[:len(freq)+1]
|
||||
codes := h.codes[:len(freq)]
|
||||
// Number of non-zero literals
|
||||
count := 0
|
||||
// Set list to be the set of all non-zero literals and their frequencies
|
||||
@ -317,11 +330,10 @@ func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
|
||||
list[count] = literalNode{uint16(i), f}
|
||||
count++
|
||||
} else {
|
||||
list[count] = literalNode{}
|
||||
h.codes[i].len = 0
|
||||
codes[i].len = 0
|
||||
}
|
||||
}
|
||||
list[len(freq)] = literalNode{}
|
||||
list[count] = literalNode{}
|
||||
|
||||
list = list[:count]
|
||||
if count <= 2 {
|
||||
|
222
vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
222
vendor/github.com/klauspost/compress/flate/inflate.go
generated
vendored
@ -36,6 +36,13 @@ type lengthExtra struct {
|
||||
|
||||
var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
|
||||
|
||||
var bitMask32 = [32]uint32{
|
||||
0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
|
||||
0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
|
||||
0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
|
||||
0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
|
||||
} // up to 32 bits
|
||||
|
||||
// Initialize the fixedHuffmanDecoder only once upon first use.
|
||||
var fixedOnce sync.Once
|
||||
var fixedHuffmanDecoder huffmanDecoder
|
||||
@ -559,221 +566,6 @@ func (f *decompressor) readHuffman() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Decode a single Huffman block from f.
|
||||
// hl and hd are the Huffman states for the lit/length values
|
||||
// and the distance values, respectively. If hd == nil, using the
|
||||
// fixed distance encoding associated with fixed Huffman blocks.
|
||||
func (f *decompressor) huffmanBlockGeneric() {
|
||||
const (
|
||||
stateInit = iota // Zero value must be stateInit
|
||||
stateDict
|
||||
)
|
||||
|
||||
switch f.stepState {
|
||||
case stateInit:
|
||||
goto readLiteral
|
||||
case stateDict:
|
||||
goto copyHistory
|
||||
}
|
||||
|
||||
readLiteral:
|
||||
// Read literal and/or (length, distance) according to RFC section 3.2.3.
|
||||
{
|
||||
var v int
|
||||
{
|
||||
// Inlined v, err := f.huffSym(f.hl)
|
||||
// Since a huffmanDecoder can be empty or be composed of a degenerate tree
|
||||
// with single element, huffSym must error on these two edge cases. In both
|
||||
// cases, the chunks slice will be 0 for the invalid sequence, leading it
|
||||
// satisfy the n == 0 check below.
|
||||
n := uint(f.hl.maxRead)
|
||||
// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
|
||||
// but is smart enough to keep local variables in registers, so use nb and b,
|
||||
// inline call to moreBits and reassign b,nb back to f on return.
|
||||
nb, b := f.nb, f.b
|
||||
for {
|
||||
for nb < n {
|
||||
c, err := f.r.ReadByte()
|
||||
if err != nil {
|
||||
f.b = b
|
||||
f.nb = nb
|
||||
f.err = noEOF(err)
|
||||
return
|
||||
}
|
||||
f.roffset++
|
||||
b |= uint32(c) << (nb & regSizeMaskUint32)
|
||||
nb += 8
|
||||
}
|
||||
chunk := f.hl.chunks[b&(huffmanNumChunks-1)]
|
||||
n = uint(chunk & huffmanCountMask)
|
||||
if n > huffmanChunkBits {
|
||||
chunk = f.hl.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&f.hl.linkMask]
|
||||
n = uint(chunk & huffmanCountMask)
|
||||
}
|
||||
if n <= nb {
|
||||
if n == 0 {
|
||||
f.b = b
|
||||
f.nb = nb
|
||||
if debugDecode {
|
||||
fmt.Println("huffsym: n==0")
|
||||
}
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
f.b = b >> (n & regSizeMaskUint32)
|
||||
f.nb = nb - n
|
||||
v = int(chunk >> huffmanValueShift)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var n uint // number of bits extra
|
||||
var length int
|
||||
var err error
|
||||
switch {
|
||||
case v < 256:
|
||||
f.dict.writeByte(byte(v))
|
||||
if f.dict.availWrite() == 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBlockGeneric
|
||||
f.stepState = stateInit
|
||||
return
|
||||
}
|
||||
goto readLiteral
|
||||
case v == 256:
|
||||
f.finishBlock()
|
||||
return
|
||||
// otherwise, reference to older data
|
||||
case v < 265:
|
||||
length = v - (257 - 3)
|
||||
n = 0
|
||||
case v < 269:
|
||||
length = v*2 - (265*2 - 11)
|
||||
n = 1
|
||||
case v < 273:
|
||||
length = v*4 - (269*4 - 19)
|
||||
n = 2
|
||||
case v < 277:
|
||||
length = v*8 - (273*8 - 35)
|
||||
n = 3
|
||||
case v < 281:
|
||||
length = v*16 - (277*16 - 67)
|
||||
n = 4
|
||||
case v < 285:
|
||||
length = v*32 - (281*32 - 131)
|
||||
n = 5
|
||||
case v < maxNumLit:
|
||||
length = 258
|
||||
n = 0
|
||||
default:
|
||||
if debugDecode {
|
||||
fmt.Println(v, ">= maxNumLit")
|
||||
}
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
if n > 0 {
|
||||
for f.nb < n {
|
||||
if err = f.moreBits(); err != nil {
|
||||
if debugDecode {
|
||||
fmt.Println("morebits n>0:", err)
|
||||
}
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
length += int(f.b & uint32(1<<(n®SizeMaskUint32)-1))
|
||||
f.b >>= n & regSizeMaskUint32
|
||||
f.nb -= n
|
||||
}
|
||||
|
||||
var dist uint32
|
||||
if f.hd == nil {
|
||||
for f.nb < 5 {
|
||||
if err = f.moreBits(); err != nil {
|
||||
if debugDecode {
|
||||
fmt.Println("morebits f.nb<5:", err)
|
||||
}
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
dist = uint32(bits.Reverse8(uint8(f.b & 0x1F << 3)))
|
||||
f.b >>= 5
|
||||
f.nb -= 5
|
||||
} else {
|
||||
sym, err := f.huffSym(f.hd)
|
||||
if err != nil {
|
||||
if debugDecode {
|
||||
fmt.Println("huffsym:", err)
|
||||
}
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
dist = uint32(sym)
|
||||
}
|
||||
|
||||
switch {
|
||||
case dist < 4:
|
||||
dist++
|
||||
case dist < maxNumDist:
|
||||
nb := uint(dist-2) >> 1
|
||||
// have 1 bit in bottom of dist, need nb more.
|
||||
extra := (dist & 1) << (nb & regSizeMaskUint32)
|
||||
for f.nb < nb {
|
||||
if err = f.moreBits(); err != nil {
|
||||
if debugDecode {
|
||||
fmt.Println("morebits f.nb<nb:", err)
|
||||
}
|
||||
f.err = err
|
||||
return
|
||||
}
|
||||
}
|
||||
extra |= f.b & uint32(1<<(nb®SizeMaskUint32)-1)
|
||||
f.b >>= nb & regSizeMaskUint32
|
||||
f.nb -= nb
|
||||
dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
|
||||
default:
|
||||
if debugDecode {
|
||||
fmt.Println("dist too big:", dist, maxNumDist)
|
||||
}
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
|
||||
// No check on length; encoding can be prescient.
|
||||
if dist > uint32(f.dict.histSize()) {
|
||||
if debugDecode {
|
||||
fmt.Println("dist > f.dict.histSize():", dist, f.dict.histSize())
|
||||
}
|
||||
f.err = CorruptInputError(f.roffset)
|
||||
return
|
||||
}
|
||||
|
||||
f.copyLen, f.copyDist = length, int(dist)
|
||||
goto copyHistory
|
||||
}
|
||||
|
||||
copyHistory:
|
||||
// Perform a backwards copy according to RFC section 3.2.3.
|
||||
{
|
||||
cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen)
|
||||
if cnt == 0 {
|
||||
cnt = f.dict.writeCopy(f.copyDist, f.copyLen)
|
||||
}
|
||||
f.copyLen -= cnt
|
||||
|
||||
if f.dict.availWrite() == 0 || f.copyLen > 0 {
|
||||
f.toRead = f.dict.readFlush()
|
||||
f.step = (*decompressor).huffmanBlockGeneric // We need to continue this work
|
||||
f.stepState = stateDict
|
||||
return
|
||||
}
|
||||
goto readLiteral
|
||||
}
|
||||
}
|
||||
|
||||
// Copy a single uncompressed data block from input to output.
|
||||
func (f *decompressor) dataBlock() {
|
||||
// Uncompressed.
|
||||
|
659
vendor/github.com/klauspost/compress/flate/inflate_gen.go
generated
vendored
659
vendor/github.com/klauspost/compress/flate/inflate_gen.go
generated
vendored
File diff suppressed because it is too large
Load Diff
56
vendor/github.com/klauspost/compress/flate/level1.go
generated
vendored
56
vendor/github.com/klauspost/compress/flate/level1.go
generated
vendored
@ -1,6 +1,10 @@
|
||||
package flate
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
// fastGen maintains the table for matches,
|
||||
// and the previous byte block for level 2.
|
||||
@ -116,7 +120,32 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
t := candidate.offset - e.cur
|
||||
l := e.matchlenLong(s+4, t+4, src) + 4
|
||||
var l = int32(4)
|
||||
if false {
|
||||
l = e.matchlenLong(s+4, t+4, src) + 4
|
||||
} else {
|
||||
// inlined:
|
||||
a := src[s+4:]
|
||||
b := src[t+4:]
|
||||
for len(a) >= 8 {
|
||||
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
|
||||
l += int32(bits.TrailingZeros64(diff) >> 3)
|
||||
break
|
||||
}
|
||||
l += 8
|
||||
a = a[8:]
|
||||
b = b[8:]
|
||||
}
|
||||
if len(a) < 8 {
|
||||
b = b[:len(a)]
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
break
|
||||
}
|
||||
l++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
@ -129,7 +158,28 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
|
||||
}
|
||||
|
||||
// Save the match found
|
||||
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
||||
if false {
|
||||
dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
|
||||
} else {
|
||||
// Inlined...
|
||||
xoffset := uint32(s - t - baseMatchOffset)
|
||||
xlength := l
|
||||
oc := offsetCode(xoffset)
|
||||
xoffset |= oc << 16
|
||||
for xlength > 0 {
|
||||
xl := xlength
|
||||
if xl > 258 {
|
||||
// We need to have at least baseMatchLength left over for next loop.
|
||||
xl = 258 - baseMatchLength
|
||||
}
|
||||
xlength -= xl
|
||||
xl -= baseMatchLength
|
||||
dst.extraHist[lengthCodes1[uint8(xl)]]++
|
||||
dst.offHist[oc]++
|
||||
dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
|
||||
dst.n++
|
||||
}
|
||||
}
|
||||
s += l
|
||||
nextEmit = s
|
||||
if nextS >= s {
|
||||
|
45
vendor/github.com/klauspost/compress/flate/level3.go
generated
vendored
45
vendor/github.com/klauspost/compress/flate/level3.go
generated
vendored
@ -5,7 +5,7 @@ import "fmt"
|
||||
// fastEncL3
|
||||
type fastEncL3 struct {
|
||||
fastGen
|
||||
table [tableSize]tableEntryPrev
|
||||
table [1 << 16]tableEntryPrev
|
||||
}
|
||||
|
||||
// Encode uses a similar algorithm to level 2, will check up to two candidates.
|
||||
@ -13,6 +13,8 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
|
||||
const (
|
||||
inputMargin = 8 - 1
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
tableBits = 16
|
||||
tableSize = 1 << tableBits
|
||||
)
|
||||
|
||||
if debugDeflate && e.cur < 0 {
|
||||
@ -73,7 +75,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
|
||||
nextS := s
|
||||
var candidate tableEntry
|
||||
for {
|
||||
nextHash := hash(cv)
|
||||
nextHash := hash4u(cv, tableBits)
|
||||
s = nextS
|
||||
nextS = s + 1 + (s-nextEmit)>>skipLog
|
||||
if nextS > sLimit {
|
||||
@ -156,7 +158,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
|
||||
// Index first pair after match end.
|
||||
if int(t+4) < len(src) && t > 0 {
|
||||
cv := load3232(src, t)
|
||||
nextHash := hash(cv)
|
||||
nextHash := hash4u(cv, tableBits)
|
||||
e.table[nextHash] = tableEntryPrev{
|
||||
Prev: e.table[nextHash].Cur,
|
||||
Cur: tableEntry{offset: e.cur + t},
|
||||
@ -165,30 +167,31 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-3 to s.
|
||||
x := load6432(src, s-3)
|
||||
prevHash := hash(uint32(x))
|
||||
e.table[prevHash] = tableEntryPrev{
|
||||
Prev: e.table[prevHash].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 3},
|
||||
// Store every 5th hash in-between.
|
||||
for i := s - l + 2; i < s-5; i += 5 {
|
||||
nextHash := hash4u(load3232(src, i), tableBits)
|
||||
e.table[nextHash] = tableEntryPrev{
|
||||
Prev: e.table[nextHash].Cur,
|
||||
Cur: tableEntry{offset: e.cur + i}}
|
||||
}
|
||||
x >>= 8
|
||||
prevHash = hash(uint32(x))
|
||||
// We could immediately start working at s now, but to improve
|
||||
// compression we first update the hash table at s-2 to s.
|
||||
x := load6432(src, s-2)
|
||||
prevHash := hash4u(uint32(x), tableBits)
|
||||
|
||||
e.table[prevHash] = tableEntryPrev{
|
||||
Prev: e.table[prevHash].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 2},
|
||||
}
|
||||
x >>= 8
|
||||
prevHash = hash(uint32(x))
|
||||
prevHash = hash4u(uint32(x), tableBits)
|
||||
|
||||
e.table[prevHash] = tableEntryPrev{
|
||||
Prev: e.table[prevHash].Cur,
|
||||
Cur: tableEntry{offset: e.cur + s - 1},
|
||||
}
|
||||
x >>= 8
|
||||
currHash := hash(uint32(x))
|
||||
currHash := hash4u(uint32(x), tableBits)
|
||||
candidates := e.table[currHash]
|
||||
cv = uint32(x)
|
||||
e.table[currHash] = tableEntryPrev{
|
||||
@ -200,15 +203,15 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
|
||||
candidate = candidates.Cur
|
||||
minOffset := e.cur + s - (maxMatchOffset - 4)
|
||||
|
||||
if candidate.offset > minOffset && cv != load3232(src, candidate.offset-e.cur) {
|
||||
// We only check if value mismatches.
|
||||
// Offset will always be invalid in other cases.
|
||||
if candidate.offset > minOffset {
|
||||
if cv == load3232(src, candidate.offset-e.cur) {
|
||||
// Found a match...
|
||||
continue
|
||||
}
|
||||
candidate = candidates.Prev
|
||||
if candidate.offset > minOffset && cv == load3232(src, candidate.offset-e.cur) {
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset <= maxMatchOffset {
|
||||
continue
|
||||
}
|
||||
// Match at prev...
|
||||
continue
|
||||
}
|
||||
}
|
||||
cv = uint32(x >> 8)
|
||||
|
17
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
17
vendor/github.com/klauspost/compress/flate/token.go
generated
vendored
@ -13,11 +13,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// From top
|
||||
// 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused
|
||||
// 8 bits: xlength = length - MIN_MATCH_LENGTH
|
||||
// 5 bits offsetcode
|
||||
// 16 bits xoffset = offset - MIN_OFFSET_SIZE, or literal
|
||||
// bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits
|
||||
// bits 16-22 offsetcode - 5 bits
|
||||
// bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits
|
||||
// bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits
|
||||
lengthShift = 22
|
||||
offsetMask = 1<<lengthShift - 1
|
||||
typeMask = 3 << 30
|
||||
@ -276,7 +275,7 @@ func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
|
||||
xoffset |= oCode << 16
|
||||
|
||||
t.extraHist[lengthCodes1[uint8(xlength)]]++
|
||||
t.offHist[oCode]++
|
||||
t.offHist[oCode&31]++
|
||||
t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset)
|
||||
t.n++
|
||||
}
|
||||
@ -300,7 +299,7 @@ func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
|
||||
xlength -= xl
|
||||
xl -= baseMatchLength
|
||||
t.extraHist[lengthCodes1[uint8(xl)]]++
|
||||
t.offHist[oc]++
|
||||
t.offHist[oc&31]++
|
||||
t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
|
||||
t.n++
|
||||
}
|
||||
@ -356,8 +355,8 @@ func (t token) offset() uint32 { return uint32(t) & offsetMask }
|
||||
|
||||
func (t token) length() uint8 { return uint8(t >> lengthShift) }
|
||||
|
||||
// The code is never more than 8 bits, but is returned as uint32 for convenience.
|
||||
func lengthCode(len uint8) uint32 { return uint32(lengthCodes[len]) }
|
||||
// Convert length to code.
|
||||
func lengthCode(len uint8) uint8 { return lengthCodes[len] }
|
||||
|
||||
// Returns the offset code corresponding to a specific offset
|
||||
func offsetCode(off uint32) uint32 {
|
||||
|
4
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
@ -85,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
|
||||
// TEMPLATE
|
||||
const hashLog = tableBits
|
||||
// seems global, but would be nice to tweak.
|
||||
const kSearchStrength = 7
|
||||
const kSearchStrength = 6
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := s
|
||||
@ -334,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
||||
// TEMPLATE
|
||||
const hashLog = tableBits
|
||||
// seems global, but would be nice to tweak.
|
||||
const kSearchStrength = 8
|
||||
const kSearchStrength = 6
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := s
|
||||
|
6
vendor/google.golang.org/api/internal/gensupport/resumable.go
generated
vendored
6
vendor/google.golang.org/api/internal/gensupport/resumable.go
generated
vendored
@ -155,6 +155,12 @@ func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// This case is very unlikely but possible only if rx.ChunkRetryDeadline is
|
||||
// set to a very small value, in which case no requests will be sent before
|
||||
// the deadline. Return an error to avoid causing a panic.
|
||||
if resp == nil {
|
||||
return nil, fmt.Errorf("upload request to %v not sent, choose larger value for ChunkRetryDealine", rx.URI)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
// Configure retryable error criteria.
|
||||
|
2
vendor/google.golang.org/api/internal/version.go
generated
vendored
2
vendor/google.golang.org/api/internal/version.go
generated
vendored
@ -5,4 +5,4 @@
|
||||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "0.68.0"
|
||||
const Version = "0.69.0"
|
||||
|
23
vendor/modules.txt
vendored
23
vendor/modules.txt
vendored
@ -5,15 +5,16 @@ cloud.google.com/go/internal
|
||||
cloud.google.com/go/internal/optional
|
||||
cloud.google.com/go/internal/trace
|
||||
cloud.google.com/go/internal/version
|
||||
# cloud.google.com/go/compute v1.2.0
|
||||
## explicit; go 1.11
|
||||
# cloud.google.com/go/compute v1.3.0
|
||||
## explicit; go 1.15
|
||||
cloud.google.com/go/compute/metadata
|
||||
# cloud.google.com/go/iam v0.1.1
|
||||
## explicit; go 1.11
|
||||
# cloud.google.com/go/iam v0.2.0
|
||||
## explicit; go 1.15
|
||||
cloud.google.com/go/iam
|
||||
# cloud.google.com/go/storage v1.20.0
|
||||
## explicit; go 1.11
|
||||
# cloud.google.com/go/storage v1.21.0
|
||||
## explicit; go 1.15
|
||||
cloud.google.com/go/storage
|
||||
cloud.google.com/go/storage/internal
|
||||
cloud.google.com/go/storage/internal/apiv2
|
||||
# github.com/VictoriaMetrics/fastcache v1.9.0
|
||||
## explicit; go 1.13
|
||||
@ -33,7 +34,7 @@ github.com/VictoriaMetrics/metricsql/binaryop
|
||||
# github.com/VividCortex/ewma v1.2.0
|
||||
## explicit; go 1.12
|
||||
github.com/VividCortex/ewma
|
||||
# github.com/aws/aws-sdk-go v1.42.52
|
||||
# github.com/aws/aws-sdk-go v1.43.2
|
||||
## explicit; go 1.11
|
||||
github.com/aws/aws-sdk-go/aws
|
||||
github.com/aws/aws-sdk-go/aws/arn
|
||||
@ -146,7 +147,7 @@ github.com/influxdata/influxdb/pkg/escape
|
||||
# github.com/jmespath/go-jmespath v0.4.0
|
||||
## explicit; go 1.14
|
||||
github.com/jmespath/go-jmespath
|
||||
# github.com/klauspost/compress v1.14.2
|
||||
# github.com/klauspost/compress v1.14.3
|
||||
## explicit; go 1.15
|
||||
github.com/klauspost/compress
|
||||
github.com/klauspost/compress/flate
|
||||
@ -304,8 +305,8 @@ golang.org/x/text/unicode/norm
|
||||
## explicit; go 1.11
|
||||
golang.org/x/xerrors
|
||||
golang.org/x/xerrors/internal
|
||||
# google.golang.org/api v0.68.0
|
||||
## explicit; go 1.11
|
||||
# google.golang.org/api v0.69.0
|
||||
## explicit; go 1.15
|
||||
google.golang.org/api/googleapi
|
||||
google.golang.org/api/googleapi/transport
|
||||
google.golang.org/api/iamcredentials/v1
|
||||
@ -337,7 +338,7 @@ google.golang.org/appengine/internal/socket
|
||||
google.golang.org/appengine/internal/urlfetch
|
||||
google.golang.org/appengine/socket
|
||||
google.golang.org/appengine/urlfetch
|
||||
# google.golang.org/genproto v0.0.0-20220211171837-173942840c17
|
||||
# google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c
|
||||
## explicit; go 1.15
|
||||
google.golang.org/genproto/googleapis/api/annotations
|
||||
google.golang.org/genproto/googleapis/iam/v1
|
||||
|
Loading…
Reference in New Issue
Block a user